@@ -570,6 +570,56 @@ LWLockInitialize(LWLock *lock, int tranche_id)
570570dlist_init (& lock -> waiters );
571571}
572572
573+ #if (defined(__GNUC__ )|| defined(__INTEL_COMPILER ))&& (defined(__ppc__ )|| defined(__powerpc__ )|| defined(__ppc64__ )|| defined(__powerpc64__ ))
574+
575+ /*
576+ * Special optimization for PowerPC processors: put logic dealing with LWLock
577+ * state between lwarx/stwcx operations.
578+ */
579+ static bool
580+ LWLockAttemptLock (LWLock * lock ,LWLockMode mode )
581+ {
582+ uint32 mask ,increment ;
583+ bool result ;
584+
585+ AssertArg (mode == LW_EXCLUSIVE || mode == LW_SHARED );
586+
587+ if (mode == LW_EXCLUSIVE )
588+ {
589+ mask = LW_LOCK_MASK ;
590+ increment = LW_VAL_EXCLUSIVE ;
591+ }
592+ else
593+ {
594+ mask = LW_VAL_EXCLUSIVE ;
595+ increment = LW_VAL_SHARED ;
596+ }
597+
598+ __asm__ __volatile__(
599+ "0:lwarx 3,0,%4\n"
600+ "and 4,3,%2\n"
601+ "cmpwi 4,0\n"
602+ "bne- 1f\n"
603+ "add 3,3,%3\n"
604+ "stwcx. 3,0,%4\n"
605+ "bne- 0b\n"
606+ "li %0,0\n"
607+ "b 2f\n"
608+ "1: li %0,1\n"
609+ #ifdef USE_PPC_LWSYNC
610+ "2:lwsync\n"
611+ #else
612+ "2:isync\n"
613+ #endif
614+ :"=&r" (result ),"+m" (lock -> state )
615+ :"r" (mask ),"r" (increment ),"r" (& lock -> state )
616+ :"memory" ,"cc" ,"r3" ,"r4" );
617+
618+ return result ;
619+ }
620+
621+ #else
622+
573623/*
574624 * Internal function that tries to atomically acquire the lwlock in the passed
575625 * in mode.
@@ -642,6 +692,8 @@ LWLockAttemptLock(LWLock *lock, LWLockMode mode)
642692pg_unreachable ();
643693}
644694
695+ #endif
696+
645697/*
646698 * Wakeup all the lockers that currently have a chance to acquire the lock.
647699 */