Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commit6f901b6

Browse files
committed
Oops, only wanted datetime.c changes in there. lock stuff reversed out.
1 parent9e7b9c6 commit6f901b6

File tree

1 file changed

+58
-63
lines changed

1 file changed

+58
-63
lines changed

‎src/backend/storage/lmgr/lwlock.c

Lines changed: 58 additions & 63 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
* Portions Copyright (c) 1994, Regents of the University of California
1616
*
1717
* IDENTIFICATION
18-
* $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lwlock.c,v 1.6 2001/12/29 21:28:18 momjian Exp $
18+
* $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lwlock.c,v 1.7 2001/12/29 21:30:32 momjian Exp $
1919
*
2020
*-------------------------------------------------------------------------
2121
*/
@@ -195,8 +195,7 @@ void
195195
LWLockAcquire(LWLockIdlockid,LWLockModemode)
196196
{
197197
volatileLWLock*lock=LWLockArray+lockid;
198-
PROC*proc=MyProc;
199-
intextraWaits=0;
198+
boolmustwait;
200199

201200
PRINT_LWDEBUG("LWLockAcquire",lockid,lock);
202201

@@ -207,57 +206,43 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode)
207206
*/
208207
HOLD_INTERRUPTS();
209208

210-
/*
211-
* Loop here to try to acquire lock after each time we are signaled
212-
* by LWLockRelease.
213-
*
214-
* NOTE: it might seem better to have LWLockRelease actually grant us
215-
* the lock, rather than retrying and possibly having to go back to
216-
* sleep. But in practice that is no good because it means a process
217-
* swap for every lock acquisition when two or more processes are
218-
* contending for the same lock. Since LWLocks are normally used to
219-
* protect not-very-long sections of computation, a process needs to
220-
* be able to acquire and release the same lock many times during a
221-
* single process dispatch cycle, even in the presence of contention.
222-
* The efficiency of being able to do that outweighs the inefficiency of
223-
* sometimes wasting a dispatch cycle because the lock is not free when a
224-
* released waiter gets to run. See pgsql-hackers archives for 29-Dec-01.
225-
*/
226-
for (;;)
227-
{
228-
boolmustwait;
229-
230-
/* Acquire mutex. Time spent holding mutex should be short! */
231-
SpinLockAcquire_NoHoldoff(&lock->mutex);
209+
/* Acquire mutex. Time spent holding mutex should be short! */
210+
SpinLockAcquire_NoHoldoff(&lock->mutex);
232211

233-
/* If I can get the lock, do so quickly. */
234-
if (mode==LW_EXCLUSIVE)
212+
/* If I can get the lock, do so quickly. */
213+
if (mode==LW_EXCLUSIVE)
214+
{
215+
if (lock->exclusive==0&&lock->shared==0)
235216
{
236-
if (lock->exclusive==0&&lock->shared==0)
237-
{
238-
lock->exclusive++;
239-
mustwait= false;
240-
}
241-
else
242-
mustwait= true;
217+
lock->exclusive++;
218+
mustwait= false;
243219
}
244220
else
221+
mustwait= true;
222+
}
223+
else
224+
{
225+
/*
226+
* If there is someone waiting (presumably for exclusive access),
227+
* queue up behind him even though I could get the lock. This
228+
* prevents a stream of read locks from starving a writer.
229+
*/
230+
if (lock->exclusive==0&&lock->head==NULL)
245231
{
246-
if (lock->exclusive==0)
247-
{
248-
lock->shared++;
249-
mustwait= false;
250-
}
251-
else
252-
mustwait= true;
232+
lock->shared++;
233+
mustwait= false;
253234
}
235+
else
236+
mustwait= true;
237+
}
254238

255-
if (!mustwait)
256-
break;/* got the lock */
239+
if (mustwait)
240+
{
241+
/* Add myself to wait queue */
242+
PROC*proc=MyProc;
243+
intextraWaits=0;
257244

258245
/*
259-
* Add myself to wait queue.
260-
*
261246
* If we don't have a PROC structure, there's no way to wait. This
262247
* should never occur, since MyProc should only be null during
263248
* shared memory initialization.
@@ -282,9 +267,9 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode)
282267
*
283268
* Since we share the process wait semaphore with the regular lock
284269
* manager and ProcWaitForSignal, and we may need to acquire an
285-
* LWLock while one of those is pending, it is possible that we get
286-
* awakened for a reason other than beingsignaled by LWLockRelease.
287-
* If so, loop back and wait again. Once we've gotten theLWLock,
270+
* LWLock while one of those is pending, it is possible that we
271+
*getawakened for a reason other than beinggranted the LWLock.
272+
* If so, loop back and wait again. Once we've gotten thelock,
288273
* re-increment the sema by the number of additional signals
289274
* received, so that the lock manager or signal manager will see
290275
* the received signal when it next waits.
@@ -302,21 +287,23 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode)
302287

303288
LOG_LWDEBUG("LWLockAcquire",lockid,"awakened");
304289

305-
/* Now loop back and try to acquire lock again. */
290+
/*
291+
* The awakener already updated the lock struct's state, so we
292+
* don't need to do anything more to it. Just need to fix the
293+
* semaphore count.
294+
*/
295+
while (extraWaits-->0)
296+
IpcSemaphoreUnlock(proc->sem.semId,proc->sem.semNum);
297+
}
298+
else
299+
{
300+
/* Got the lock without waiting */
301+
SpinLockRelease_NoHoldoff(&lock->mutex);
306302
}
307-
308-
/* We are done updating shared state of the lock itself. */
309-
SpinLockRelease_NoHoldoff(&lock->mutex);
310303

311304
/* Add lock to list of locks held by this backend */
312305
Assert(num_held_lwlocks<MAX_SIMUL_LWLOCKS);
313306
held_lwlocks[num_held_lwlocks++]=lockid;
314-
315-
/*
316-
* Fix the process wait semaphore's count for any absorbed wakeups.
317-
*/
318-
while (extraWaits-->0)
319-
IpcSemaphoreUnlock(proc->sem.semId,proc->sem.semNum);
320307
}
321308

322309
/*
@@ -357,7 +344,12 @@ LWLockConditionalAcquire(LWLockId lockid, LWLockMode mode)
357344
}
358345
else
359346
{
360-
if (lock->exclusive==0)
347+
/*
348+
* If there is someone waiting (presumably for exclusive access),
349+
* queue up behind him even though I could get the lock. This
350+
* prevents a stream of read locks from starving a writer.
351+
*/
352+
if (lock->exclusive==0&&lock->head==NULL)
361353
{
362354
lock->shared++;
363355
mustwait= false;
@@ -435,17 +427,20 @@ LWLockRelease(LWLockId lockid)
435427
if (lock->exclusive==0&&lock->shared==0)
436428
{
437429
/*
438-
* Remove the to-be-awakened PROCs from the queue. If the
439-
* front waiter wants exclusive lock, awaken him only.
440-
* Otherwise awaken as many waiters as want shared access.
430+
* Remove the to-be-awakened PROCs from the queue, and update
431+
* the lock state to show them as holding the lock.
441432
*/
442433
proc=head;
443-
if (!proc->lwExclusive)
434+
if (proc->lwExclusive)
435+
lock->exclusive++;
436+
else
444437
{
438+
lock->shared++;
445439
while (proc->lwWaitLink!=NULL&&
446440
!proc->lwWaitLink->lwExclusive)
447441
{
448442
proc=proc->lwWaitLink;
443+
lock->shared++;
449444
}
450445
}
451446
/* proc is now the last PROC to be released */

0 commit comments

Comments
 (0)

[8]ページ先頭

©2009-2025 Movatter.jp