diff options
author | Kevin Grittner | 2011-11-15 22:02:25 +0000 |
---|---|---|
committer | Kevin Grittner | 2011-11-15 22:02:25 +0000 |
commit | df02fedd3f7153b51771e36d63e1825db7083130 (patch) | |
tree | 3775c7f13c7a87b04f0e8f51d1b433b69e383a83 | |
parent | 04da3232907680caad3445928c97a246c626a14a (diff) |
Apply v1 patches from Robert Haas.
-rw-r--r-- | src/backend/storage/lmgr/flexlock.c | 366 | ||||
-rw-r--r-- | src/backend/storage/lmgr/procarraylock.c | 341 | ||||
-rw-r--r-- | src/include/storage/flexlock.h | 102 | ||||
-rw-r--r-- | src/include/storage/flexlock_internals.h | 89 | ||||
-rw-r--r-- | src/include/storage/procarraylock.h | 28 |
5 files changed, 926 insertions, 0 deletions
diff --git a/src/backend/storage/lmgr/flexlock.c b/src/backend/storage/lmgr/flexlock.c new file mode 100644 index 0000000000..c88bd24280 --- /dev/null +++ b/src/backend/storage/lmgr/flexlock.c @@ -0,0 +1,366 @@ +/*------------------------------------------------------------------------- + * + * flexlock.c + * Low-level routines for managing flex locks. + * + * Flex locks are intended primarily to provide mutual exclusion of access + * to shared-memory data structures. Most, but not all, flex locks are + * lightweight locks (LWLocks). This file contains support routines that + * are used for all types of flex locks, including lwlocks. User-level + * locking should be done with the full lock manager --- which depends on + * LWLocks to protect its shared state. + * + * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * IDENTIFICATION + * src/backend/storage/lmgr/flexlock.c + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "miscadmin.h" +#include "access/clog.h" +#include "access/multixact.h" +#include "access/subtrans.h" +#include "commands/async.h" +#include "storage/flexlock_internals.h" +#include "storage/lwlock.h" +#include "storage/predicate.h" +#include "storage/proc.h" +#include "storage/procarraylock.h" +#include "storage/spin.h" +#include "utils/elog.h" + +/* + * We use this structure to keep track of flex locks held, for release + * during error recovery. The maximum size could be determined at runtime + * if necessary, but it seems unlikely that more than a few locks could + * ever be held simultaneously. + */ +#define MAX_SIMUL_FLEXLOCKS 100 + +int num_held_flexlocks = 0; +FlexLockId held_flexlocks[MAX_SIMUL_FLEXLOCKS]; + +static int lock_addin_request = 0; +static bool lock_addin_request_allowed = true; + +#ifdef LOCK_DEBUG +bool Trace_flexlocks = false; +#endif + +/* + * This points to the array of FlexLocks in shared memory. Backends inherit + * the pointer by fork from the postmaster (except in the EXEC_BACKEND case, + * where we have special measures to pass it down). + */ +FlexLockPadded *FlexLockArray = NULL; + +/* We use the ShmemLock spinlock to protect LWLockAssign */ +extern slock_t *ShmemLock; + +static void FlexLockInit(FlexLock *flex, char locktype); + +/* + * Compute number of FlexLocks to allocate. + */ +int +NumFlexLocks(void) +{ + int numLocks; + + /* + * Possibly this logic should be spread out among the affected modules, + * the same way that shmem space estimation is done. But for now, there + * are few enough users of FlexLocks that we can get away with just keeping + * the knowledge here. + */ + + /* Predefined FlexLocks */ + numLocks = (int) NumFixedFlexLocks; + + /* bufmgr.c needs two for each shared buffer */ + numLocks += 2 * NBuffers; + + /* proc.c needs one for each backend or auxiliary process */ + numLocks += MaxBackends + NUM_AUXILIARY_PROCS; + + /* clog.c needs one per CLOG buffer */ + numLocks += NUM_CLOG_BUFFERS; + + /* subtrans.c needs one per SubTrans buffer */ + numLocks += NUM_SUBTRANS_BUFFERS; + + /* multixact.c needs two SLRU areas */ + numLocks += NUM_MXACTOFFSET_BUFFERS + NUM_MXACTMEMBER_BUFFERS; + + /* async.c needs one per Async buffer */ + numLocks += NUM_ASYNC_BUFFERS; + + /* predicate.c needs one per old serializable xid buffer */ + numLocks += NUM_OLDSERXID_BUFFERS; + + /* + * Add any requested by loadable modules; for backwards-compatibility + * reasons, allocate at least NUM_USER_DEFINED_FLEXLOCKS of them even if + * there are no explicit requests. + */ + lock_addin_request_allowed = false; + numLocks += Max(lock_addin_request, NUM_USER_DEFINED_FLEXLOCKS); + + return numLocks; +} + + +/* + * RequestAddinFlexLocks + * Request that extra FlexLocks be allocated for use by + * a loadable module. + * + * This is only useful if called from the _PG_init hook of a library that + * is loaded into the postmaster via shared_preload_libraries. Once + * shared memory has been allocated, calls will be ignored. (We could + * raise an error, but it seems better to make it a no-op, so that + * libraries containing such calls can be reloaded if needed.) + */ +void +RequestAddinFlexLocks(int n) +{ + if (IsUnderPostmaster || !lock_addin_request_allowed) + return; /* too late */ + lock_addin_request += n; +} + + +/* + * Compute shmem space needed for FlexLocks. + */ +Size +FlexLockShmemSize(void) +{ + Size size; + int numLocks = NumFlexLocks(); + + /* Space for the FlexLock array. */ + size = mul_size(numLocks, FLEX_LOCK_BYTES); + + /* Space for dynamic allocation counter, plus room for alignment. */ + size = add_size(size, 2 * sizeof(int) + FLEX_LOCK_BYTES); + + return size; +} + +/* + * Allocate shmem space for FlexLocks and initialize the locks. + */ +void +CreateFlexLocks(void) +{ + int numLocks = NumFlexLocks(); + Size spaceLocks = FlexLockShmemSize(); + FlexLockPadded *lock; + int *FlexLockCounter; + char *ptr; + int id; + + /* Allocate and zero space */ + ptr = (char *) ShmemAlloc(spaceLocks); + memset(ptr, 0, spaceLocks); + + /* Leave room for dynamic allocation counter */ + ptr += 2 * sizeof(int); + + /* Ensure desired alignment of FlexLock array */ + ptr += FLEX_LOCK_BYTES - ((uintptr_t) ptr) % FLEX_LOCK_BYTES; + + FlexLockArray = (FlexLockPadded *) ptr; + + /* All of the "fixed" FlexLocks are LWLocks - except ProcArrayLock. */ + for (id = 0, lock = FlexLockArray; id < NumFixedFlexLocks; id++, lock++) + { + if (id == ProcArrayLock) + FlexLockInit(&lock->flex, FLEXLOCK_TYPE_PROCARRAYLOCK); + else + FlexLockInit(&lock->flex, FLEXLOCK_TYPE_LWLOCK); + } + + /* + * Initialize the dynamic-allocation counter, which is stored just before + * the first FlexLock. + */ + FlexLockCounter = (int *) ((char *) FlexLockArray - 2 * sizeof(int)); + FlexLockCounter[0] = (int) NumFixedFlexLocks; + FlexLockCounter[1] = numLocks; +} + +/* + * FlexLockAssign - assign a dynamically-allocated FlexLock number + * + * We interlock this using the same spinlock that is used to protect + * ShmemAlloc(). Interlocking is not really necessary during postmaster + * startup, but it is needed if any user-defined code tries to allocate + * LWLocks after startup. + */ +FlexLockId +FlexLockAssign(char locktype) +{ + FlexLockId result; + + /* use volatile pointer to prevent code rearrangement */ + volatile int *FlexLockCounter; + + FlexLockCounter = (int *) ((char *) FlexLockArray - 2 * sizeof(int)); + SpinLockAcquire(ShmemLock); + if (FlexLockCounter[0] >= FlexLockCounter[1]) + { + SpinLockRelease(ShmemLock); + elog(ERROR, "no more FlexLockIds available"); + } + result = (FlexLockId) (FlexLockCounter[0]++); + SpinLockRelease(ShmemLock); + + FlexLockInit(&FlexLockArray[result].flex, locktype); + + return result; +} + +/* + * Initialize a FlexLock. + */ +static void +FlexLockInit(FlexLock *flex, char locktype) +{ + SpinLockInit(&flex->mutex); + flex->releaseOK = true; + flex->locktype = locktype; + /* + * We might need to think a little harder about what should happen here + * if some future type of FlexLock requires more initialization than this. + * For now, this will suffice. + */ +} + +/* + * Remove lock from list of locks held. Usually, but not always, it will + * be the latest-acquired lock; so search array backwards. + */ +void +FlexLockRemember(FlexLockId id) +{ + if (num_held_flexlocks >= MAX_SIMUL_FLEXLOCKS) + elog(PANIC, "too many FlexLocks taken"); + held_flexlocks[num_held_flexlocks++] = id; +} + +/* + * Remove lock from list of locks held. Usually, but not always, it will + * be the latest-acquired lock; so search array backwards. + */ +void +FlexLockForget(FlexLockId id) +{ + int i; + + for (i = num_held_flexlocks; --i >= 0;) + { + if (id == held_flexlocks[i]) + break; + } + if (i < 0) + elog(ERROR, "lock %d is not held", (int) id); + num_held_flexlocks--; + for (; i < num_held_flexlocks; i++) + held_flexlocks[i] = held_flexlocks[i + 1]; +} + +/* + * FlexLockWait - wait until awakened + * + * Since we share the process wait semaphore with the regular lock manager + * and ProcWaitForSignal, and we may need to acquire a FlexLock while one of + * those is pending, it is possible that we get awakened for a reason other + * than being signaled by a FlexLock release. If so, loop back and wait again. + * + * Returns the number of "extra" waits absorbed so that, once we've gotten the + * FlexLock, we can re-increment the sema by the number of additional signals + * received, so that the lock manager or signal manager will see the received + * signal when it next waits. + */ +int +FlexLockWait(FlexLockId id, int mode) +{ + int extraWaits = 0; + + FlexLockDebug("LWLockAcquire", id, "waiting"); + TRACE_POSTGRESQL_FLEXLOCK_WAIT_START(id, mode); + + for (;;) + { + /* "false" means cannot accept cancel/die interrupt here. */ + PGSemaphoreLock(&MyProc->sem, false); + /* + * FLEXTODO: I think we should return this, instead of ignoring it. + * Any non-zero value means "wake up". + */ + if (MyProc->flWaitResult) + break; + extraWaits++; + } + + TRACE_POSTGRESQL_FLEXLOCK_WAIT_DONE(id, mode); + FlexLockDebug("LWLockAcquire", id, "awakened"); + + return extraWaits; +} + +/* + * FlexLockReleaseAll - release all currently-held locks + * + * Used to clean up after ereport(ERROR). An important difference between this + * function and retail LWLockRelease calls is that InterruptHoldoffCount is + * unchanged by this operation. This is necessary since InterruptHoldoffCount + * has been set to an appropriate level earlier in error recovery. We could + * decrement it below zero if we allow it to drop for each released lock! + */ +void +FlexLockReleaseAll(void) +{ + while (num_held_flexlocks > 0) + { + FlexLockId id; + FlexLock *flex; + + HOLD_INTERRUPTS(); /* match the upcoming RESUME_INTERRUPTS */ + + id = held_flexlocks[num_held_flexlocks - 1]; + flex = &FlexLockArray[id].flex; + if (flex->locktype == FLEXLOCK_TYPE_LWLOCK) + LWLockRelease(id); + else + { + Assert(id == ProcArrayLock); + ProcArrayLockRelease(); + } + } +} + +/* + * FlexLockHeldByMe - test whether my process currently holds a lock + * + * This is meant as debug support only. We do not consider the lock mode. + */ +bool +FlexLockHeldByMe(FlexLockId id) +{ + int i; + + for (i = 0; i < num_held_flexlocks; i++) + { + if (held_flexlocks[i] == id) + return true; + } + return false; +} diff --git a/src/backend/storage/lmgr/procarraylock.c b/src/backend/storage/lmgr/procarraylock.c new file mode 100644 index 0000000000..6838ed650f --- /dev/null +++ b/src/backend/storage/lmgr/procarraylock.c @@ -0,0 +1,341 @@ +/*------------------------------------------------------------------------- + * + * procarraylock.c + * Lock management for the ProcArray + * + * Because the ProcArray data structure is highly trafficked, it is + * critical that mutual exclusion for ProcArray options be as efficient + * as possible. A particular problem is transaction end (commit or abort) + * which cannot be done in parallel with snapshot acquisition. We + * therefore include some special hacks to deal with this case efficiently. + * + * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * IDENTIFICATION + * src/backend/storage/lmgr/procarraylock.c + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "miscadmin.h" +#include "pg_trace.h" +#include "access/transam.h" +#include "storage/flexlock_internals.h" +#include "storage/ipc.h" +#include "storage/procarraylock.h" +#include "storage/proc.h" +#include "storage/spin.h" + +typedef struct ProcArrayLockStruct +{ + FlexLock flex; /* common FlexLock infrastructure */ + char exclusive; /* # of exclusive holders (0 or 1) */ + int shared; /* # of shared holders (0..MaxBackends) */ + PGPROC *ending; /* transactions wishing to clear state */ + TransactionId latest_ending_xid; /* latest ending XID */ +} ProcArrayLockStruct; + +/* There is only one ProcArrayLock. */ +#define ProcArrayLockPointer() \ + (AssertMacro(FlexLockArray[ProcArrayLock].flex.locktype == \ + FLEXLOCK_TYPE_PROCARRAYLOCK), \ + (volatile ProcArrayLockStruct *) &FlexLockArray[ProcArrayLock]) + +/* + * ProcArrayLockAcquire - acquire a lightweight lock in the specified mode + * + * If the lock is not available, sleep until it is. + * + * Side effect: cancel/die interrupts are held off until lock release. + */ +void +ProcArrayLockAcquire(ProcArrayLockMode mode) +{ + volatile ProcArrayLockStruct *lock = ProcArrayLockPointer(); + PGPROC *proc = MyProc; + bool retry = false; + int extraWaits = 0; + + /* + * We can't wait if we haven't got a PGPROC. This should only occur + * during bootstrap or shared memory initialization. Put an Assert here + * to catch unsafe coding practices. + */ + Assert(!(proc == NULL && IsUnderPostmaster)); + + /* + * Lock out cancel/die interrupts until we exit the code section protected + * by the ProcArrayLock. This ensures that interrupts will not interfere + * with manipulations of data structures in shared memory. + */ + HOLD_INTERRUPTS(); + + /* + * Loop here to try to acquire lock after each time we are signaled by + * ProcArrayLockRelease. See comments in LWLockAcquire for an explanation + * of why do we not attempt to hand off the lock directly. + */ + for (;;) + { + bool mustwait; + + /* Acquire mutex. Time spent holding mutex should be short! */ + SpinLockAcquire(&lock->flex.mutex); + + /* If retrying, allow LWLockRelease to release waiters again */ + if (retry) + lock->flex.releaseOK = true; + + /* If I can get the lock, do so quickly. */ + if (mode == PAL_EXCLUSIVE) + { + if (lock->exclusive == 0 && lock->shared == 0) + { + lock->exclusive++; + mustwait = false; + } + else + mustwait = true; + } + else + { + if (lock->exclusive == 0) + { + lock->shared++; + mustwait = false; + } + else + mustwait = true; + } + + if (!mustwait) + break; /* got the lock */ + + /* Add myself to wait queue. */ + FlexLockJoinWaitQueue(lock, (int) mode); + + /* Can release the mutex now */ + SpinLockRelease(&lock->flex.mutex); + + /* Wait until awakened. */ + extraWaits += FlexLockWait(ProcArrayLock, mode); + + /* Now loop back and try to acquire lock again. */ + retry = true; + } + + /* We are done updating shared state of the lock itself. */ + SpinLockRelease(&lock->flex.mutex); + + TRACE_POSTGRESQL_FLEXLOCK_ACQUIRE(lockid, mode); + + /* Add lock to list of locks held by this backend */ + FlexLockRemember(ProcArrayLock); + + /* + * Fix the process wait semaphore's count for any absorbed wakeups. + */ + while (extraWaits-- > 0) + PGSemaphoreUnlock(&proc->sem); +} + +/* + * ProcArrayLockClearTransaction - safely clear transaction details + * + * This can't be done while ProcArrayLock is held, but it's so fast that + * we can afford to do it while holding the spinlock, rather than acquiring + * and releasing the lock. + */ +void +ProcArrayLockClearTransaction(TransactionId latestXid) +{ + volatile ProcArrayLockStruct *lock = ProcArrayLockPointer(); + PGPROC *proc = MyProc; + int extraWaits = 0; + bool mustwait; + + HOLD_INTERRUPTS(); + + /* Acquire mutex. Time spent holding mutex should be short! */ + SpinLockAcquire(&lock->flex.mutex); + + if (lock->exclusive == 0 && lock->shared == 0) + { + { + volatile PGPROC *vproc = proc; + /* If there are no lockers, clar the critical PGPROC fields. */ + vproc->xid = InvalidTransactionId; + vproc->xmin = InvalidTransactionId; + /* must be cleared with xid/xmin: */ + vproc->vacuumFlags &= ~PROC_VACUUM_STATE_MASK; + vproc->subxids.nxids = 0; + vproc->subxids.overflowed = false; + } + mustwait = false; + + /* Also advance global latestCompletedXid while holding the lock */ + if (TransactionIdPrecedes(ShmemVariableCache->latestCompletedXid, + latestXid)) + ShmemVariableCache->latestCompletedXid = latestXid; + } + else + { + /* Rats, must wait. */ + proc->flWaitLink = lock->ending; + lock->ending = proc; + if (!TransactionIdIsValid(lock->latest_ending_xid) || + TransactionIdPrecedes(lock->latest_ending_xid, latestXid)) + lock->latest_ending_xid = latestXid; + mustwait = true; + } + + /* Can release the mutex now */ + SpinLockRelease(&lock->flex.mutex); + + /* + * If we were not able to perfom the operation immediately, we must wait. + * But we need not retry after being awoken, because the last lock holder + * to release the lock will do the work first, on our behalf. + */ + if (mustwait) + { + extraWaits += FlexLockWait(ProcArrayLock, 2); + while (extraWaits-- > 0) + PGSemaphoreUnlock(&proc->sem); + } + + RESUME_INTERRUPTS(); +} + +/* + * ProcArrayLockRelease - release a previously acquired lock + */ +void +ProcArrayLockRelease(void) +{ + volatile ProcArrayLockStruct *lock = ProcArrayLockPointer(); + PGPROC *head; + PGPROC *ending = NULL; + PGPROC *proc; + + FlexLockForget(ProcArrayLock); + + /* Acquire mutex. Time spent holding mutex should be short! */ + SpinLockAcquire(&lock->flex.mutex); + + /* Release my hold on lock */ + if (lock->exclusive > 0) + lock->exclusive--; + else + { + Assert(lock->shared > 0); + lock->shared--; + } + + /* + * If the lock is now free, but there are some transactions trying to + * end, we must clear the critical PGPROC fields for them, and save a + * list of them so we can wake them up. + */ + if (lock->exclusive == 0 && lock->shared == 0 && lock->ending != NULL) + { + volatile PGPROC *vproc; + + ending = lock->ending; + vproc = ending; + + while (vproc != NULL) + { + vproc->xid = InvalidTransactionId; + vproc->xmin = InvalidTransactionId; + /* must be cleared with xid/xmin: */ + vproc->vacuumFlags &= ~PROC_VACUUM_STATE_MASK; + vproc->subxids.nxids = 0; + vproc->subxids.overflowed = false; + vproc = vproc->flWaitLink; + } + + /* Also advance global latestCompletedXid */ + if (TransactionIdPrecedes(ShmemVariableCache->latestCompletedXid, + lock->latest_ending_xid)) + ShmemVariableCache->latestCompletedXid = lock->latest_ending_xid; + + /* Reset lock state. */ + lock->ending = NULL; + lock->latest_ending_xid = InvalidTransactionId; + } + + /* + * See if I need to awaken any waiters. If I released a non-last shared + * hold, there cannot be anything to do. Also, do not awaken any waiters + * if someone has already awakened waiters that haven't yet acquired the + * lock. + */ + head = lock->flex.head; + if (head != NULL) + { + if (lock->exclusive == 0 && lock->shared == 0 && lock->flex.releaseOK) + { + /* + * Remove the to-be-awakened PGPROCs from the queue. If the front + * waiter wants exclusive lock, awaken him only. Otherwise awaken + * as many waiters as want shared access. + */ + proc = head; + if (proc->flWaitMode != LW_EXCLUSIVE) + { + while (proc->flWaitLink != NULL && + proc->flWaitLink->flWaitMode != LW_EXCLUSIVE) + proc = proc->flWaitLink; + } + /* proc is now the last PGPROC to be released */ + lock->flex.head = proc->flWaitLink; + proc->flWaitLink = NULL; + /* prevent additional wakeups until retryer gets to run */ + lock->flex.releaseOK = false; + } + else + { + /* lock is still held, can't awaken anything */ + head = NULL; + } + } + + /* We are done updating shared state of the lock itself. */ + SpinLockRelease(&lock->flex.mutex); + + TRACE_POSTGRESQL_FLEXLOCK_RELEASE(lockid); + + /* + * Awaken any waiters I removed from the queue. + */ + while (head != NULL) + { + FlexLockDebug("LWLockRelease", lockid, "release waiter"); + proc = head; + head = proc->flWaitLink; + proc->flWaitLink = NULL; + proc->flWaitResult = 1; /* any non-zero value will do */ + PGSemaphoreUnlock(&proc->sem); + } + + /* + * Also awaken any processes whose critical PGPROC fields I cleared + */ + while (ending != NULL) + { + FlexLockDebug("LWLockRelease", lockid, "release ending"); + proc = ending; + ending = proc->flWaitLink; + proc->flWaitLink = NULL; + proc->flWaitResult = 1; /* any non-zero value will do */ + PGSemaphoreUnlock(&proc->sem); + } + + /* + * Now okay to allow cancel/die interrupts. + */ + RESUME_INTERRUPTS(); +} diff --git a/src/include/storage/flexlock.h b/src/include/storage/flexlock.h new file mode 100644 index 0000000000..612c21a5a2 --- /dev/null +++ b/src/include/storage/flexlock.h @@ -0,0 +1,102 @@ +/*------------------------------------------------------------------------- + * + * flexlock.h + * Flex lock manager + * + * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * src/include/storage/flexlock.h + * + *------------------------------------------------------------------------- + */ +#ifndef FLEXLOCK_H +#define FLEXLOCK_H + +/* + * It's a bit odd to declare NUM_BUFFER_PARTITIONS and NUM_LOCK_PARTITIONS + * here, but we need them to set up enum FlexLockId correctly, and having + * this file include lock.h or bufmgr.h would be backwards. + */ + +/* Number of partitions of the shared buffer mapping hashtable */ +#define NUM_BUFFER_PARTITIONS 16 + +/* Number of partitions the shared lock tables are divided into */ +#define LOG2_NUM_LOCK_PARTITIONS 4 +#define NUM_LOCK_PARTITIONS (1 << LOG2_NUM_LOCK_PARTITIONS) + +/* Number of partitions the shared predicate lock tables are divided into */ +#define LOG2_NUM_PREDICATELOCK_PARTITIONS 4 +#define NUM_PREDICATELOCK_PARTITIONS (1 << LOG2_NUM_PREDICATELOCK_PARTITIONS) + +/* + * We have a number of predefined FlexLocks, plus a bunch of locks that are + * dynamically assigned (e.g., for shared buffers). The FlexLock structures + * live in shared memory (since they contain shared data) and are identified + * by values of this enumerated type. We abuse the notion of an enum somewhat + * by allowing values not listed in the enum declaration to be assigned. + * The extra value MaxDynamicFlexLock is there to keep the compiler from + * deciding that the enum can be represented as char or short ... + * + * If you remove a lock, please replace it with a placeholder. This retains + * the lock numbering, which is helpful for DTrace and other external + * debugging scripts. + */ +typedef enum FlexLockId +{ + BufFreelistLock, + ShmemIndexLock, + OidGenLock, + XidGenLock, + ProcArrayLock, + SInvalReadLock, + SInvalWriteLock, + WALInsertLock, + WALWriteLock, + ControlFileLock, + CheckpointLock, + CLogControlLock, + SubtransControlLock, + MultiXactGenLock, + MultiXactOffsetControlLock, + MultiXactMemberControlLock, + RelCacheInitLock, + BgWriterCommLock, + TwoPhaseStateLock, + TablespaceCreateLock, + BtreeVacuumLock, + AddinShmemInitLock, + AutovacuumLock, + AutovacuumScheduleLock, + SyncScanLock, + RelationMappingLock, + AsyncCtlLock, + AsyncQueueLock, + SerializableXactHashLock, + SerializableFinishedListLock, + SerializablePredicateLockListLock, + OldSerXidLock, + SyncRepLock, + /* Individual lock IDs end here */ + FirstBufMappingLock, + FirstLockMgrLock = FirstBufMappingLock + NUM_BUFFER_PARTITIONS, + FirstPredicateLockMgrLock = FirstLockMgrLock + NUM_LOCK_PARTITIONS, + + /* must be last except for MaxDynamicFlexLock: */ + NumFixedFlexLocks = FirstPredicateLockMgrLock + NUM_PREDICATELOCK_PARTITIONS, + + MaxDynamicFlexLock = 1000000000 +} FlexLockId; + +/* Shared memory setup. */ +extern int NumFlexLocks(void); +extern Size FlexLockShmemSize(void); +extern void RequestAddinFlexLocks(int n); +extern void CreateFlexLocks(void); + +/* Error recovery and debugging support functions. */ +extern void FlexLockReleaseAll(void); +extern bool FlexLockHeldByMe(FlexLockId id); + +#endif /* FLEXLOCK_H */ diff --git a/src/include/storage/flexlock_internals.h b/src/include/storage/flexlock_internals.h new file mode 100644 index 0000000000..d1bca45212 --- /dev/null +++ b/src/include/storage/flexlock_internals.h @@ -0,0 +1,89 @@ +/*------------------------------------------------------------------------- + * + * flexlock_internals.h + * Flex lock internals. Only files which implement a FlexLock + * type should need to include this. Merging this with flexlock.h + * creates a circular header dependency, but even if it didn't, this + * is cleaner. + * + * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * src/include/storage/flexlock_internals.h + * + *------------------------------------------------------------------------- + */ +#ifndef FLEXLOCK_INTERNALS_H +#define FLEXLOCK_INTERNALS_H + +#include "pg_trace.h" +#include "storage/flexlock.h" +#include "storage/proc.h" +#include "storage/s_lock.h" + +/* + * Individual FlexLock implementations each get this many bytes to store + * its state; of course, a given implementation could also allocate additional + * shmem elsewhere, but we provide this many bytes within the array. The + * header fields common to all FlexLock types are included in this number. + * A power of two should probably be chosen, to avoid alignment issues and + * cache line splitting. It might be useful to increase this on systems where + * a cache line is more than 64 bytes in size. + */ +#define FLEX_LOCK_BYTES 64 + +typedef struct FlexLock +{ + char locktype; /* see FLEXLOCK_TYPE_* constants */ + slock_t mutex; /* Protects FlexLock state and wait queues */ + bool releaseOK; /* T if ok to release waiters */ + PGPROC *head; /* head of list of waiting PGPROCs */ + PGPROC *tail; /* tail of list of waiting PGPROCs */ + /* tail is undefined when head is NULL */ +} FlexLock; + +#define FLEXLOCK_TYPE_LWLOCK 'l' +#define FLEXLOCK_TYPE_PROCARRAYLOCK 'p' + +typedef union FlexLockPadded +{ + FlexLock flex; + char pad[FLEX_LOCK_BYTES]; +} FlexLockPadded; + +extern FlexLockPadded *FlexLockArray; + +extern FlexLockId FlexLockAssign(char locktype); +extern void FlexLockRemember(FlexLockId id); +extern void FlexLockForget(FlexLockId id); +extern int FlexLockWait(FlexLockId id, int mode); + +/* + * We must join the wait queue while holding the spinlock, so we define this + * as a macro, for speed. + */ +#define FlexLockJoinWaitQueue(lock, mode) \ + do { \ + Assert(MyProc != NULL); \ + MyProc->flWaitResult = 0; \ + MyProc->flWaitMode = mode; \ + MyProc->flWaitLink = NULL; \ + if (lock->flex.head == NULL) \ + lock->flex.head = MyProc; \ + else \ + lock->flex.tail->flWaitLink = MyProc; \ + lock->flex.tail = MyProc; \ + } while (0) + +#ifdef LOCK_DEBUG +extern bool Trace_flexlocks; +#define FlexLockDebug(where, id, msg) \ + do { \ + if (Trace_flexlocks) \ + elog(LOG, "%s(%d): %s", where, (int) id, msg); \ + } while (0) +#else +#define FlexLockDebug(where, id, msg) +#endif + +#endif /* FLEXLOCK_H */ diff --git a/src/include/storage/procarraylock.h b/src/include/storage/procarraylock.h new file mode 100644 index 0000000000..678ca6ffe9 --- /dev/null +++ b/src/include/storage/procarraylock.h @@ -0,0 +1,28 @@ +/*------------------------------------------------------------------------- + * + * procarraylock.h + * Lock management for the ProcArray + * + * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * src/include/storage/lwlock.h + * + *------------------------------------------------------------------------- + */ +#ifndef PROCARRAYLOCK_H +#define PROCARRAYLOCK_H + +#include "storage/flexlock.h" + +typedef enum ProcArrayLockMode +{ + PAL_EXCLUSIVE, + PAL_SHARED +} ProcArrayLockMode; + +extern void ProcArrayLockAcquire(ProcArrayLockMode mode); +extern void ProcArrayLockClearTransaction(TransactionId latestXid); +extern void ProcArrayLockRelease(void); + +#endif /* PROCARRAYLOCK_H */ |