diff options
author | Kevin Grittner | 2011-11-15 22:03:06 +0000 |
---|---|---|
committer | Kevin Grittner | 2011-11-15 22:03:06 +0000 |
commit | 02721bfa39f96284086743a3030c7fd7e6f007a6 (patch) | |
tree | ea8d9f9259c0933c362713238a7eb925c3db23f3 | |
parent | df02fedd3f7153b51771e36d63e1825db7083130 (diff) |
Apply v1 patches from Robert Haas.flexlock
30 files changed, 276 insertions, 721 deletions
diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c index 8dc3054e37..51b24d0b3e 100644 --- a/contrib/pg_stat_statements/pg_stat_statements.c +++ b/contrib/pg_stat_statements/pg_stat_statements.c @@ -105,7 +105,7 @@ typedef struct pgssEntry */ typedef struct pgssSharedState { - LWLockId lock; /* protects hashtable search/modification */ + FlexLockId lock; /* protects hashtable search/modification */ int query_size; /* max query length in bytes */ } pgssSharedState; diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml index d1e628fefc..8517b36331 100644 --- a/doc/src/sgml/config.sgml +++ b/doc/src/sgml/config.sgml @@ -6199,14 +6199,14 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1) </varlistentry> <varlistentry> - <term><varname>trace_lwlocks</varname> (<type>boolean</type>)</term> + <term><varname>trace_flexlocks</varname> (<type>boolean</type>)</term> <indexterm> - <primary><varname>trace_lwlocks</> configuration parameter</primary> + <primary><varname>trace_flexlocks</> configuration parameter</primary> </indexterm> <listitem> <para> - If on, emit information about lightweight lock usage. Lightweight - locks are intended primarily to provide mutual exclusion of access + If on, emit information about FlexLock usage. FlexLocks + are intended primarily to provide mutual exclusion of access to shared-memory data structures. </para> <para> diff --git a/doc/src/sgml/monitoring.sgml b/doc/src/sgml/monitoring.sgml index b9dc1d2001..98ed0d37ec 100644 --- a/doc/src/sgml/monitoring.sgml +++ b/doc/src/sgml/monitoring.sgml @@ -1724,49 +1724,49 @@ SELECT pg_stat_get_backend_pid(s.backendid) AS procpid, or kilobytes of memory used for an internal sort.</entry> </row> <row> - <entry>lwlock-acquire</entry> - <entry>(LWLockId, LWLockMode)</entry> - <entry>Probe that fires when an LWLock has been acquired. - arg0 is the LWLock's ID. - arg1 is the requested lock mode, either exclusive or shared.</entry> + <entry>flexlock-acquire</entry> + <entry>(FlexLockId, FlexLockMode)</entry> + <entry>Probe that fires when an FlexLock has been acquired. + arg0 is the FlexLock's ID. + arg1 is the requested lock mode.</entry> </row> <row> - <entry>lwlock-release</entry> - <entry>(LWLockId)</entry> - <entry>Probe that fires when an LWLock has been released (but note + <entry>flexlock-release</entry> + <entry>(FlexLockId)</entry> + <entry>Probe that fires when a FlexLock has been released (but note that any released waiters have not yet been awakened). - arg0 is the LWLock's ID.</entry> + arg0 is the FlexLock's ID.</entry> </row> <row> - <entry>lwlock-wait-start</entry> - <entry>(LWLockId, LWLockMode)</entry> - <entry>Probe that fires when an LWLock was not immediately available and + <entry>flexlock-wait-start</entry> + <entry>(FlexLockId, FlexLockMode)</entry> + <entry>Probe that fires when an FlexLock was not immediately available and a server process has begun to wait for the lock to become available. - arg0 is the LWLock's ID. + arg0 is the FlexLock's ID. arg1 is the requested lock mode, either exclusive or shared.</entry> </row> <row> - <entry>lwlock-wait-done</entry> - <entry>(LWLockId, LWLockMode)</entry> + <entry>flexlock-wait-done</entry> + <entry>(FlexLockId, FlexLockMode)</entry> <entry>Probe that fires when a server process has been released from its - wait for an LWLock (it does not actually have the lock yet). - arg0 is the LWLock's ID. + wait for an FlexLock (it does not actually have the lock yet). + arg0 is the FlexLock's ID. arg1 is the requested lock mode, either exclusive or shared.</entry> </row> <row> - <entry>lwlock-condacquire</entry> - <entry>(LWLockId, LWLockMode)</entry> - <entry>Probe that fires when an LWLock was successfully acquired when the - caller specified no waiting. - arg0 is the LWLock's ID. + <entry>flexlock-condacquire</entry> + <entry>(FlexLockId, FlexLockMode)</entry> + <entry>Probe that fires when an FlexLock was successfully acquired when + the caller specified no waiting. + arg0 is the FlexLock's ID. arg1 is the requested lock mode, either exclusive or shared.</entry> </row> <row> - <entry>lwlock-condacquire-fail</entry> - <entry>(LWLockId, LWLockMode)</entry> - <entry>Probe that fires when an LWLock was not successfully acquired when - the caller specified no waiting. - arg0 is the LWLock's ID. + <entry>flexlock-condacquire-fail</entry> + <entry>(FlexLockId, FlexLockMode)</entry> + <entry>Probe that fires when an FlexLock was not successfully acquired + when the caller specified no waiting. + arg0 is the FlexLock's ID. arg1 is the requested lock mode, either exclusive or shared.</entry> </row> <row> @@ -1813,11 +1813,11 @@ SELECT pg_stat_get_backend_pid(s.backendid) AS procpid, <entry>unsigned int</entry> </row> <row> - <entry>LWLockId</entry> + <entry>FlexLockId</entry> <entry>int</entry> </row> <row> - <entry>LWLockMode</entry> + <entry>FlexLockMode</entry> <entry>int</entry> </row> <row> diff --git a/src/backend/access/transam/slru.c b/src/backend/access/transam/slru.c index f7caa341e1..09d58626ba 100644 --- a/src/backend/access/transam/slru.c +++ b/src/backend/access/transam/slru.c @@ -151,7 +151,7 @@ SimpleLruShmemSize(int nslots, int nlsns) sz += MAXALIGN(nslots * sizeof(bool)); /* page_dirty[] */ sz += MAXALIGN(nslots * sizeof(int)); /* page_number[] */ sz += MAXALIGN(nslots * sizeof(int)); /* page_lru_count[] */ - sz += MAXALIGN(nslots * sizeof(LWLockId)); /* buffer_locks[] */ + sz += MAXALIGN(nslots * sizeof(FlexLockId)); /* buffer_locks[] */ if (nlsns > 0) sz += MAXALIGN(nslots * nlsns * sizeof(XLogRecPtr)); /* group_lsn[] */ @@ -161,7 +161,7 @@ SimpleLruShmemSize(int nslots, int nlsns) void SimpleLruInit(SlruCtl ctl, const char *name, int nslots, int nlsns, - LWLockId ctllock, const char *subdir) + FlexLockId ctllock, const char *subdir) { SlruShared shared; bool found; @@ -202,8 +202,8 @@ SimpleLruInit(SlruCtl ctl, const char *name, int nslots, int nlsns, offset += MAXALIGN(nslots * sizeof(int)); shared->page_lru_count = (int *) (ptr + offset); offset += MAXALIGN(nslots * sizeof(int)); - shared->buffer_locks = (LWLockId *) (ptr + offset); - offset += MAXALIGN(nslots * sizeof(LWLockId)); + shared->buffer_locks = (FlexLockId *) (ptr + offset); + offset += MAXALIGN(nslots * sizeof(FlexLockId)); if (nlsns > 0) { diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c index 477982d5fa..d5d1ee9dc7 100644 --- a/src/backend/access/transam/twophase.c +++ b/src/backend/access/transam/twophase.c @@ -318,9 +318,9 @@ MarkAsPreparing(TransactionId xid, const char *gid, gxact->proc.roleId = owner; gxact->proc.inCommit = false; gxact->proc.vacuumFlags = 0; - gxact->proc.lwWaiting = false; - gxact->proc.lwExclusive = false; - gxact->proc.lwWaitLink = NULL; + gxact->proc.flWaitResult = 0; + gxact->proc.flWaitMode = 0; + gxact->proc.flWaitLink = NULL; gxact->proc.waitLock = NULL; gxact->proc.waitProcLock = NULL; for (i = 0; i < NUM_LOCK_PARTITIONS; i++) diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c index c151d3be19..19b708c8cf 100644 --- a/src/backend/access/transam/xact.c +++ b/src/backend/access/transam/xact.c @@ -2248,7 +2248,7 @@ AbortTransaction(void) * Releasing LW locks is critical since we might try to grab them again * while cleaning up! */ - LWLockReleaseAll(); + FlexLockReleaseAll(); /* Clean up buffer I/O and buffer context locks, too */ AbortBufferIO(); @@ -4138,7 +4138,7 @@ AbortSubTransaction(void) * FIXME This may be incorrect --- Are there some locks we should keep? * Buffer locks, for example? I don't think so but I'm not sure. */ - LWLockReleaseAll(); + FlexLockReleaseAll(); AbortBufferIO(); UnlockBuffers(); diff --git a/src/backend/bootstrap/bootstrap.c b/src/backend/bootstrap/bootstrap.c index 6bf2421f65..9ceee9113a 100644 --- a/src/backend/bootstrap/bootstrap.c +++ b/src/backend/bootstrap/bootstrap.c @@ -562,13 +562,13 @@ bootstrap_signals(void) * Begin shutdown of an auxiliary process. This is approximately the equivalent * of ShutdownPostgres() in postinit.c. We can't run transactions in an * auxiliary process, so most of the work of AbortTransaction() is not needed, - * but we do need to make sure we've released any LWLocks we are holding. + * but we do need to make sure we've released any flex locks we are holding. * (This is only critical during an error exit.) */ static void ShutdownAuxiliaryProcess(int code, Datum arg) { - LWLockReleaseAll(); + FlexLockReleaseAll(); } /* ---------------------------------------------------------------- diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c index 32985a4a0a..d6bba6f3e4 100644 --- a/src/backend/commands/analyze.c +++ b/src/backend/commands/analyze.c @@ -40,6 +40,7 @@ #include "storage/lmgr.h" #include "storage/proc.h" #include "storage/procarray.h" +#include "storage/procarraylock.h" #include "utils/acl.h" #include "utils/attoptcache.h" #include "utils/datum.h" @@ -222,9 +223,9 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt, BufferAccessStrategy bstrategy) /* * OK, let's do it. First let other backends know I'm in ANALYZE. */ - LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); + ProcArrayLockAcquire(PAL_EXCLUSIVE); MyProc->vacuumFlags |= PROC_IN_ANALYZE; - LWLockRelease(ProcArrayLock); + ProcArrayLockRelease(); /* * Do the normal non-recursive ANALYZE. @@ -249,9 +250,9 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt, BufferAccessStrategy bstrategy) * Reset my PGPROC flag. Note: we need this here, and not in vacuum_rel, * because the vacuum flag is cleared by the end-of-xact code. */ - LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); + ProcArrayLockAcquire(PAL_EXCLUSIVE); MyProc->vacuumFlags &= ~PROC_IN_ANALYZE; - LWLockRelease(ProcArrayLock); + ProcArrayLockRelease(); } /* diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c index f42504cf9f..823dab9666 100644 --- a/src/backend/commands/vacuum.c +++ b/src/backend/commands/vacuum.c @@ -39,6 +39,7 @@ #include "storage/lmgr.h" #include "storage/proc.h" #include "storage/procarray.h" +#include "storage/procarraylock.h" #include "utils/acl.h" #include "utils/fmgroids.h" #include "utils/guc.h" @@ -892,11 +893,11 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, bool do_toast, bool for_wraparound) * MyProc->xid/xmin, else OldestXmin might appear to go backwards, * which is probably Not Good. */ - LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); + ProcArrayLockAcquire(PAL_EXCLUSIVE); MyProc->vacuumFlags |= PROC_IN_VACUUM; if (for_wraparound) MyProc->vacuumFlags |= PROC_VACUUM_FOR_WRAPAROUND; - LWLockRelease(ProcArrayLock); + ProcArrayLockRelease(); } /* diff --git a/src/backend/postmaster/bgwriter.c b/src/backend/postmaster/bgwriter.c index cacedab202..f33f573601 100644 --- a/src/backend/postmaster/bgwriter.c +++ b/src/backend/postmaster/bgwriter.c @@ -176,9 +176,10 @@ BackgroundWriterMain(void) /* * These operations are really just a minimal subset of * AbortTransaction(). We don't have very many resources to worry - * about in bgwriter, but we do have LWLocks, buffers, and temp files. + * about in bgwriter, but we do have flex locks, buffers, and temp + * files. */ - LWLockReleaseAll(); + FlexLockReleaseAll(); AbortBufferIO(); UnlockBuffers(); /* buffer pins are released here: */ diff --git a/src/backend/postmaster/checkpointer.c b/src/backend/postmaster/checkpointer.c index e9ae1e8ca0..2f1e8b3ada 100644 --- a/src/backend/postmaster/checkpointer.c +++ b/src/backend/postmaster/checkpointer.c @@ -281,9 +281,10 @@ CheckpointerMain(void) /* * These operations are really just a minimal subset of * AbortTransaction(). We don't have very many resources to worry - * about in checkpointer, but we do have LWLocks, buffers, and temp files. + * about in checkpointer, but we do have flex locks, buffers, and temp + * files. */ - LWLockReleaseAll(); + FlexLockReleaseAll(); AbortBufferIO(); UnlockBuffers(); /* buffer pins are released here: */ @@ -1109,7 +1110,7 @@ CompactCheckpointerRequestQueue() bool *skip_slot; /* must hold BgWriterCommLock in exclusive mode */ - Assert(LWLockHeldByMe(BgWriterCommLock)); + Assert(FlexLockHeldByMe(BgWriterCommLock)); /* Initialize temporary hash table */ MemSet(&ctl, 0, sizeof(ctl)); diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c index 6758083bdd..14b436868d 100644 --- a/src/backend/postmaster/postmaster.c +++ b/src/backend/postmaster/postmaster.c @@ -109,6 +109,7 @@ #include "postmaster/syslogger.h" #include "replication/walsender.h" #include "storage/fd.h" +#include "storage/flexlock_internals.h" #include "storage/ipc.h" #include "storage/pg_shmem.h" #include "storage/pmsignal.h" @@ -404,8 +405,6 @@ typedef struct typedef int InheritableSocket; #endif -typedef struct LWLock LWLock; /* ugly kluge */ - /* * Structure contains all variables passed to exec:ed backends */ @@ -426,7 +425,7 @@ typedef struct slock_t *ShmemLock; VariableCache ShmemVariableCache; Backend *ShmemBackendArray; - LWLock *LWLockArray; + FlexLock *FlexLockArray; slock_t *ProcStructLock; PROC_HDR *ProcGlobal; PGPROC *AuxiliaryProcs; @@ -4675,7 +4674,6 @@ MaxLivePostmasterChildren(void) * functions */ extern slock_t *ShmemLock; -extern LWLock *LWLockArray; extern slock_t *ProcStructLock; extern PGPROC *AuxiliaryProcs; extern PMSignalData *PMSignalState; @@ -4720,7 +4718,7 @@ save_backend_variables(BackendParameters *param, Port *port, param->ShmemVariableCache = ShmemVariableCache; param->ShmemBackendArray = ShmemBackendArray; - param->LWLockArray = LWLockArray; + param->FlexLockArray = FlexLockArray; param->ProcStructLock = ProcStructLock; param->ProcGlobal = ProcGlobal; param->AuxiliaryProcs = AuxiliaryProcs; @@ -4943,7 +4941,7 @@ restore_backend_variables(BackendParameters *param, Port *port) ShmemVariableCache = param->ShmemVariableCache; ShmemBackendArray = param->ShmemBackendArray; - LWLockArray = param->LWLockArray; + FlexLockArray = param->FlexLockArray; ProcStructLock = param->ProcStructLock; ProcGlobal = param->ProcGlobal; AuxiliaryProcs = param->AuxiliaryProcs; diff --git a/src/backend/postmaster/walwriter.c b/src/backend/postmaster/walwriter.c index 157728e20e..587443d3a7 100644 --- a/src/backend/postmaster/walwriter.c +++ b/src/backend/postmaster/walwriter.c @@ -167,9 +167,9 @@ WalWriterMain(void) /* * These operations are really just a minimal subset of * AbortTransaction(). We don't have very many resources to worry - * about in walwriter, but we do have LWLocks, and perhaps buffers? + * about in walwriter, but we do have flex locks, and perhaps buffers? */ - LWLockReleaseAll(); + FlexLockReleaseAll(); AbortBufferIO(); UnlockBuffers(); /* buffer pins are released here: */ diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c index e59af33e72..73b4cfb8e9 100644 --- a/src/backend/storage/buffer/bufmgr.c +++ b/src/backend/storage/buffer/bufmgr.c @@ -141,7 +141,7 @@ PrefetchBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum) { BufferTag newTag; /* identity of requested block */ uint32 newHash; /* hash value for newTag */ - LWLockId newPartitionLock; /* buffer partition lock for it */ + FlexLockId newPartitionLock; /* buffer partition lock for it */ int buf_id; /* create a tag so we can lookup the buffer */ @@ -512,10 +512,10 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, { BufferTag newTag; /* identity of requested block */ uint32 newHash; /* hash value for newTag */ - LWLockId newPartitionLock; /* buffer partition lock for it */ + FlexLockId newPartitionLock; /* buffer partition lock for it */ BufferTag oldTag; /* previous identity of selected buffer */ uint32 oldHash; /* hash value for oldTag */ - LWLockId oldPartitionLock; /* buffer partition lock for it */ + FlexLockId oldPartitionLock; /* buffer partition lock for it */ BufFlags oldFlags; int buf_id; volatile BufferDesc *buf; @@ -855,7 +855,7 @@ InvalidateBuffer(volatile BufferDesc *buf) { BufferTag oldTag; uint32 oldHash; /* hash value for oldTag */ - LWLockId oldPartitionLock; /* buffer partition lock for it */ + FlexLockId oldPartitionLock; /* buffer partition lock for it */ BufFlags oldFlags; /* Save the original buffer tag before dropping the spinlock */ @@ -965,7 +965,7 @@ MarkBufferDirty(Buffer buffer) Assert(PrivateRefCount[buffer - 1] > 0); /* unfortunately we can't check if the lock is held exclusively */ - Assert(LWLockHeldByMe(bufHdr->content_lock)); + Assert(FlexLockHeldByMe(bufHdr->content_lock)); LockBufHdr(bufHdr); @@ -1134,8 +1134,8 @@ UnpinBuffer(volatile BufferDesc *buf, bool fixOwner) if (PrivateRefCount[b] == 0) { /* I'd better not still hold any locks on the buffer */ - Assert(!LWLockHeldByMe(buf->content_lock)); - Assert(!LWLockHeldByMe(buf->io_in_progress_lock)); + Assert(!FlexLockHeldByMe(buf->content_lock)); + Assert(!FlexLockHeldByMe(buf->io_in_progress_lock)); LockBufHdr(buf); @@ -2310,7 +2310,7 @@ SetBufferCommitInfoNeedsSave(Buffer buffer) Assert(PrivateRefCount[buffer - 1] > 0); /* here, either share or exclusive lock is OK */ - Assert(LWLockHeldByMe(bufHdr->content_lock)); + Assert(FlexLockHeldByMe(bufHdr->content_lock)); /* * This routine might get called many times on the same page, if we are diff --git a/src/backend/storage/ipc/ipci.c b/src/backend/storage/ipc/ipci.c index 56c0bd8d49..02ee8d8962 100644 --- a/src/backend/storage/ipc/ipci.c +++ b/src/backend/storage/ipc/ipci.c @@ -113,7 +113,7 @@ CreateSharedMemoryAndSemaphores(bool makePrivate, int port) size = add_size(size, SUBTRANSShmemSize()); size = add_size(size, TwoPhaseShmemSize()); size = add_size(size, MultiXactShmemSize()); - size = add_size(size, LWLockShmemSize()); + size = add_size(size, FlexLockShmemSize()); size = add_size(size, ProcArrayShmemSize()); size = add_size(size, BackendStatusShmemSize()); size = add_size(size, SInvalShmemSize()); @@ -179,7 +179,7 @@ CreateSharedMemoryAndSemaphores(bool makePrivate, int port) * needed for InitShmemIndex. */ if (!IsUnderPostmaster) - CreateLWLocks(); + CreateFlexLocks(); /* * Set up shmem.c index hashtable diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c index 1a48485f97..39c5080bcf 100644 --- a/src/backend/storage/ipc/procarray.c +++ b/src/backend/storage/ipc/procarray.c @@ -52,6 +52,7 @@ #include "access/twophase.h" #include "miscadmin.h" #include "storage/procarray.h" +#include "storage/procarraylock.h" #include "storage/spin.h" #include "utils/builtins.h" #include "utils/snapmgr.h" @@ -254,7 +255,7 @@ ProcArrayAdd(PGPROC *proc) { ProcArrayStruct *arrayP = procArray; - LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); + ProcArrayLockAcquire(PAL_EXCLUSIVE); if (arrayP->numProcs >= arrayP->maxProcs) { @@ -263,7 +264,7 @@ ProcArrayAdd(PGPROC *proc) * fixed supply of PGPROC structs too, and so we should have failed * earlier.) */ - LWLockRelease(ProcArrayLock); + ProcArrayLockRelease(); ereport(FATAL, (errcode(ERRCODE_TOO_MANY_CONNECTIONS), errmsg("sorry, too many clients already"))); @@ -272,7 +273,7 @@ ProcArrayAdd(PGPROC *proc) arrayP->procs[arrayP->numProcs] = proc; arrayP->numProcs++; - LWLockRelease(ProcArrayLock); + ProcArrayLockRelease(); } /* @@ -297,7 +298,7 @@ ProcArrayRemove(PGPROC *proc, TransactionId latestXid) DisplayXidCache(); #endif - LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); + ProcArrayLockAcquire(PAL_EXCLUSIVE); if (TransactionIdIsValid(latestXid)) { @@ -321,13 +322,13 @@ ProcArrayRemove(PGPROC *proc, TransactionId latestXid) arrayP->procs[index] = arrayP->procs[arrayP->numProcs - 1]; arrayP->procs[arrayP->numProcs - 1] = NULL; /* for debugging */ arrayP->numProcs--; - LWLockRelease(ProcArrayLock); + ProcArrayLockRelease(); return; } } /* Ooops */ - LWLockRelease(ProcArrayLock); + ProcArrayLockRelease(); elog(LOG, "failed to find proc %p in ProcArray", proc); } @@ -351,54 +352,15 @@ ProcArrayEndTransaction(PGPROC *proc, TransactionId latestXid) { if (TransactionIdIsValid(latestXid)) { - /* - * We must lock ProcArrayLock while clearing proc->xid, so that we do - * not exit the set of "running" transactions while someone else is - * taking a snapshot. See discussion in - * src/backend/access/transam/README. - */ - Assert(TransactionIdIsValid(proc->xid)); - - LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); - - proc->xid = InvalidTransactionId; - proc->lxid = InvalidLocalTransactionId; - proc->xmin = InvalidTransactionId; - /* must be cleared with xid/xmin: */ - proc->vacuumFlags &= ~PROC_VACUUM_STATE_MASK; - proc->inCommit = false; /* be sure this is cleared in abort */ - proc->recoveryConflictPending = false; - - /* Clear the subtransaction-XID cache too while holding the lock */ - proc->subxids.nxids = 0; - proc->subxids.overflowed = false; - - /* Also advance global latestCompletedXid while holding the lock */ - if (TransactionIdPrecedes(ShmemVariableCache->latestCompletedXid, - latestXid)) - ShmemVariableCache->latestCompletedXid = latestXid; - - LWLockRelease(ProcArrayLock); + Assert(proc == MyProc); + ProcArrayLockClearTransaction(latestXid); } else - { - /* - * If we have no XID, we don't need to lock, since we won't affect - * anyone else's calculation of a snapshot. We might change their - * estimate of global xmin, but that's OK. - */ - Assert(!TransactionIdIsValid(proc->xid)); - - proc->lxid = InvalidLocalTransactionId; proc->xmin = InvalidTransactionId; - /* must be cleared with xid/xmin: */ - proc->vacuumFlags &= ~PROC_VACUUM_STATE_MASK; - proc->inCommit = false; /* be sure this is cleared in abort */ - proc->recoveryConflictPending = false; - Assert(proc->subxids.nxids == 0); - Assert(proc->subxids.overflowed == false); - } + proc->lxid = InvalidLocalTransactionId; + proc->inCommit = false; /* be sure this is cleared in abort */ + proc->recoveryConflictPending = false; } @@ -528,7 +490,7 @@ ProcArrayApplyRecoveryInfo(RunningTransactions running) /* * Nobody else is running yet, but take locks anyhow */ - LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); + ProcArrayLockAcquire(PAL_EXCLUSIVE); /* * KnownAssignedXids is sorted so we cannot just add the xids, we have to @@ -635,7 +597,7 @@ ProcArrayApplyRecoveryInfo(RunningTransactions running) Assert(TransactionIdIsNormal(ShmemVariableCache->latestCompletedXid)); Assert(TransactionIdIsValid(ShmemVariableCache->nextXid)); - LWLockRelease(ProcArrayLock); + ProcArrayLockRelease(); KnownAssignedXidsDisplay(trace_recovery(DEBUG3)); if (standbyState == STANDBY_SNAPSHOT_READY) @@ -690,7 +652,7 @@ ProcArrayApplyXidAssignment(TransactionId topxid, /* * Uses same locking as transaction commit */ - LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); + ProcArrayLockAcquire(PAL_EXCLUSIVE); /* * Remove subxids from known-assigned-xacts. @@ -703,7 +665,7 @@ ProcArrayApplyXidAssignment(TransactionId topxid, if (TransactionIdPrecedes(procArray->lastOverflowedXid, max_xid)) procArray->lastOverflowedXid = max_xid; - LWLockRelease(ProcArrayLock); + ProcArrayLockRelease(); } /* @@ -795,7 +757,7 @@ TransactionIdIsInProgress(TransactionId xid) errmsg("out of memory"))); } - LWLockAcquire(ProcArrayLock, LW_SHARED); + ProcArrayLockAcquire(PAL_SHARED); /* * Now that we have the lock, we can check latestCompletedXid; if the @@ -803,7 +765,7 @@ TransactionIdIsInProgress(TransactionId xid) */ if (TransactionIdPrecedes(ShmemVariableCache->latestCompletedXid, xid)) { - LWLockRelease(ProcArrayLock); + ProcArrayLockRelease(); xc_by_latest_xid_inc(); return true; } @@ -829,7 +791,7 @@ TransactionIdIsInProgress(TransactionId xid) */ if (TransactionIdEquals(pxid, xid)) { - LWLockRelease(ProcArrayLock); + ProcArrayLockRelease(); xc_by_main_xid_inc(); return true; } @@ -851,7 +813,7 @@ TransactionIdIsInProgress(TransactionId xid) if (TransactionIdEquals(cxid, xid)) { - LWLockRelease(ProcArrayLock); + ProcArrayLockRelease(); xc_by_child_xid_inc(); return true; } @@ -879,7 +841,7 @@ TransactionIdIsInProgress(TransactionId xid) if (KnownAssignedXidExists(xid)) { - LWLockRelease(ProcArrayLock); + ProcArrayLockRelease(); xc_by_known_assigned_inc(); return true; } @@ -895,7 +857,7 @@ TransactionIdIsInProgress(TransactionId xid) nxids = KnownAssignedXidsGet(xids, xid); } - LWLockRelease(ProcArrayLock); + ProcArrayLockRelease(); /* * If none of the relevant caches overflowed, we know the Xid is not @@ -961,7 +923,7 @@ TransactionIdIsActive(TransactionId xid) if (TransactionIdPrecedes(xid, RecentXmin)) return false; - LWLockAcquire(ProcArrayLock, LW_SHARED); + ProcArrayLockAcquire(PAL_SHARED); for (i = 0; i < arrayP->numProcs; i++) { @@ -983,7 +945,7 @@ TransactionIdIsActive(TransactionId xid) } } - LWLockRelease(ProcArrayLock); + ProcArrayLockRelease(); return result; } @@ -1046,7 +1008,7 @@ GetOldestXmin(bool allDbs, bool ignoreVacuum) /* Cannot look for individual databases during recovery */ Assert(allDbs || !RecoveryInProgress()); - LWLockAcquire(ProcArrayLock, LW_SHARED); + ProcArrayLockAcquire(PAL_SHARED); /* * We initialize the MIN() calculation with latestCompletedXid + 1. This @@ -1099,7 +1061,7 @@ GetOldestXmin(bool allDbs, bool ignoreVacuum) */ TransactionId kaxmin = KnownAssignedXidsGetOldestXmin(); - LWLockRelease(ProcArrayLock); + ProcArrayLockRelease(); if (TransactionIdIsNormal(kaxmin) && TransactionIdPrecedes(kaxmin, result)) @@ -1110,7 +1072,7 @@ GetOldestXmin(bool allDbs, bool ignoreVacuum) /* * No other information needed, so release the lock immediately. */ - LWLockRelease(ProcArrayLock); + ProcArrayLockRelease(); /* * Compute the cutoff XID by subtracting vacuum_defer_cleanup_age, @@ -1239,7 +1201,7 @@ GetSnapshotData(Snapshot snapshot) * It is sufficient to get shared lock on ProcArrayLock, even if we are * going to set MyProc->xmin. */ - LWLockAcquire(ProcArrayLock, LW_SHARED); + ProcArrayLockAcquire(PAL_SHARED); /* xmax is always latestCompletedXid + 1 */ xmax = ShmemVariableCache->latestCompletedXid; @@ -1375,7 +1337,7 @@ GetSnapshotData(Snapshot snapshot) if (!TransactionIdIsValid(MyProc->xmin)) MyProc->xmin = TransactionXmin = xmin; - LWLockRelease(ProcArrayLock); + ProcArrayLockRelease(); /* * Update globalxmin to include actual process xids. This is a slightly @@ -1432,7 +1394,7 @@ ProcArrayInstallImportedXmin(TransactionId xmin, TransactionId sourcexid) return false; /* Get lock so source xact can't end while we're doing this */ - LWLockAcquire(ProcArrayLock, LW_SHARED); + ProcArrayLockAcquire(PAL_SHARED); for (index = 0; index < arrayP->numProcs; index++) { @@ -1476,7 +1438,7 @@ ProcArrayInstallImportedXmin(TransactionId xmin, TransactionId sourcexid) break; } - LWLockRelease(ProcArrayLock); + ProcArrayLockRelease(); return result; } @@ -1550,7 +1512,7 @@ GetRunningTransactionData(void) * Ensure that no xids enter or leave the procarray while we obtain * snapshot. */ - LWLockAcquire(ProcArrayLock, LW_SHARED); + ProcArrayLockAcquire(PAL_SHARED); LWLockAcquire(XidGenLock, LW_SHARED); latestCompletedXid = ShmemVariableCache->latestCompletedXid; @@ -1611,7 +1573,7 @@ GetRunningTransactionData(void) CurrentRunningXacts->latestCompletedXid = latestCompletedXid; /* We don't release XidGenLock here, the caller is responsible for that */ - LWLockRelease(ProcArrayLock); + ProcArrayLockRelease(); Assert(TransactionIdIsValid(CurrentRunningXacts->nextXid)); Assert(TransactionIdIsValid(CurrentRunningXacts->oldestRunningXid)); @@ -1644,7 +1606,7 @@ GetOldestActiveTransactionId(void) Assert(!RecoveryInProgress()); - LWLockAcquire(ProcArrayLock, LW_SHARED); + ProcArrayLockAcquire(PAL_SHARED); oldestRunningXid = ShmemVariableCache->nextXid; @@ -1672,7 +1634,7 @@ GetOldestActiveTransactionId(void) */ } - LWLockRelease(ProcArrayLock); + ProcArrayLockRelease(); return oldestRunningXid; } @@ -1705,7 +1667,7 @@ GetTransactionsInCommit(TransactionId **xids_p) xids = (TransactionId *) palloc(arrayP->maxProcs * sizeof(TransactionId)); nxids = 0; - LWLockAcquire(ProcArrayLock, LW_SHARED); + ProcArrayLockAcquire(PAL_SHARED); for (index = 0; index < arrayP->numProcs; index++) { @@ -1718,7 +1680,7 @@ GetTransactionsInCommit(TransactionId **xids_p) xids[nxids++] = pxid; } - LWLockRelease(ProcArrayLock); + ProcArrayLockRelease(); *xids_p = xids; return nxids; @@ -1740,7 +1702,7 @@ HaveTransactionsInCommit(TransactionId *xids, int nxids) ProcArrayStruct *arrayP = procArray; int index; - LWLockAcquire(ProcArrayLock, LW_SHARED); + ProcArrayLockAcquire(PAL_SHARED); for (index = 0; index < arrayP->numProcs; index++) { @@ -1766,7 +1728,7 @@ HaveTransactionsInCommit(TransactionId *xids, int nxids) } } - LWLockRelease(ProcArrayLock); + ProcArrayLockRelease(); return result; } @@ -1788,7 +1750,7 @@ BackendPidGetProc(int pid) if (pid == 0) /* never match dummy PGPROCs */ return NULL; - LWLockAcquire(ProcArrayLock, LW_SHARED); + ProcArrayLockAcquire(PAL_SHARED); for (index = 0; index < arrayP->numProcs; index++) { @@ -1801,7 +1763,7 @@ BackendPidGetProc(int pid) } } - LWLockRelease(ProcArrayLock); + ProcArrayLockRelease(); return result; } @@ -1829,7 +1791,7 @@ BackendXidGetPid(TransactionId xid) if (xid == InvalidTransactionId) /* never match invalid xid */ return 0; - LWLockAcquire(ProcArrayLock, LW_SHARED); + ProcArrayLockAcquire(PAL_SHARED); for (index = 0; index < arrayP->numProcs; index++) { @@ -1842,7 +1804,7 @@ BackendXidGetPid(TransactionId xid) } } - LWLockRelease(ProcArrayLock); + ProcArrayLockRelease(); return result; } @@ -1897,7 +1859,7 @@ GetCurrentVirtualXIDs(TransactionId limitXmin, bool excludeXmin0, vxids = (VirtualTransactionId *) palloc(sizeof(VirtualTransactionId) * arrayP->maxProcs); - LWLockAcquire(ProcArrayLock, LW_SHARED); + ProcArrayLockAcquire(PAL_SHARED); for (index = 0; index < arrayP->numProcs; index++) { @@ -1933,7 +1895,7 @@ GetCurrentVirtualXIDs(TransactionId limitXmin, bool excludeXmin0, } } - LWLockRelease(ProcArrayLock); + ProcArrayLockRelease(); *nvxids = count; return vxids; @@ -1992,7 +1954,7 @@ GetConflictingVirtualXIDs(TransactionId limitXmin, Oid dbOid) errmsg("out of memory"))); } - LWLockAcquire(ProcArrayLock, LW_SHARED); + ProcArrayLockAcquire(PAL_SHARED); for (index = 0; index < arrayP->numProcs; index++) { @@ -2025,7 +1987,7 @@ GetConflictingVirtualXIDs(TransactionId limitXmin, Oid dbOid) } } - LWLockRelease(ProcArrayLock); + ProcArrayLockRelease(); /* add the terminator */ vxids[count].backendId = InvalidBackendId; @@ -2046,7 +2008,7 @@ CancelVirtualTransaction(VirtualTransactionId vxid, ProcSignalReason sigmode) int index; pid_t pid = 0; - LWLockAcquire(ProcArrayLock, LW_SHARED); + ProcArrayLockAcquire(PAL_SHARED); for (index = 0; index < arrayP->numProcs; index++) { @@ -2072,7 +2034,7 @@ CancelVirtualTransaction(VirtualTransactionId vxid, ProcSignalReason sigmode) } } - LWLockRelease(ProcArrayLock); + ProcArrayLockRelease(); return pid; } @@ -2146,7 +2108,7 @@ CountDBBackends(Oid databaseid) int count = 0; int index; - LWLockAcquire(ProcArrayLock, LW_SHARED); + ProcArrayLockAcquire(PAL_SHARED); for (index = 0; index < arrayP->numProcs; index++) { @@ -2159,7 +2121,7 @@ CountDBBackends(Oid databaseid) count++; } - LWLockRelease(ProcArrayLock); + ProcArrayLockRelease(); return count; } @@ -2175,7 +2137,7 @@ CancelDBBackends(Oid databaseid, ProcSignalReason sigmode, bool conflictPending) pid_t pid = 0; /* tell all backends to die */ - LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); + ProcArrayLockAcquire(PAL_EXCLUSIVE); for (index = 0; index < arrayP->numProcs; index++) { @@ -2200,7 +2162,7 @@ CancelDBBackends(Oid databaseid, ProcSignalReason sigmode, bool conflictPending) } } - LWLockRelease(ProcArrayLock); + ProcArrayLockRelease(); } /* @@ -2213,7 +2175,7 @@ CountUserBackends(Oid roleid) int count = 0; int index; - LWLockAcquire(ProcArrayLock, LW_SHARED); + ProcArrayLockAcquire(PAL_SHARED); for (index = 0; index < arrayP->numProcs; index++) { @@ -2225,7 +2187,7 @@ CountUserBackends(Oid roleid) count++; } - LWLockRelease(ProcArrayLock); + ProcArrayLockRelease(); return count; } @@ -2273,7 +2235,7 @@ CountOtherDBBackends(Oid databaseId, int *nbackends, int *nprepared) *nbackends = *nprepared = 0; - LWLockAcquire(ProcArrayLock, LW_SHARED); + ProcArrayLockAcquire(PAL_SHARED); for (index = 0; index < arrayP->numProcs; index++) { @@ -2297,7 +2259,7 @@ CountOtherDBBackends(Oid databaseId, int *nbackends, int *nprepared) } } - LWLockRelease(ProcArrayLock); + ProcArrayLockRelease(); if (!found) return false; /* no conflicting backends, so done */ @@ -2350,7 +2312,7 @@ XidCacheRemoveRunningXids(TransactionId xid, * to abort subtransactions, but pending closer analysis we'd best be * conservative. */ - LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); + ProcArrayLockAcquire(PAL_EXCLUSIVE); /* * Under normal circumstances xid and xids[] will be in increasing order, @@ -2398,7 +2360,7 @@ XidCacheRemoveRunningXids(TransactionId xid, latestXid)) ShmemVariableCache->latestCompletedXid = latestXid; - LWLockRelease(ProcArrayLock); + ProcArrayLockRelease(); } #ifdef XIDCACHE_DEBUG @@ -2565,7 +2527,7 @@ ExpireTreeKnownAssignedTransactionIds(TransactionId xid, int nsubxids, /* * Uses same locking as transaction commit */ - LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); + ProcArrayLockAcquire(PAL_EXCLUSIVE); KnownAssignedXidsRemoveTree(xid, nsubxids, subxids); @@ -2574,7 +2536,7 @@ ExpireTreeKnownAssignedTransactionIds(TransactionId xid, int nsubxids, max_xid)) ShmemVariableCache->latestCompletedXid = max_xid; - LWLockRelease(ProcArrayLock); + ProcArrayLockRelease(); } /* @@ -2584,9 +2546,9 @@ ExpireTreeKnownAssignedTransactionIds(TransactionId xid, int nsubxids, void ExpireAllKnownAssignedTransactionIds(void) { - LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); + ProcArrayLockAcquire(PAL_EXCLUSIVE); KnownAssignedXidsRemovePreceding(InvalidTransactionId); - LWLockRelease(ProcArrayLock); + ProcArrayLockRelease(); } /* @@ -2596,9 +2558,9 @@ ExpireAllKnownAssignedTransactionIds(void) void ExpireOldKnownAssignedTransactionIds(TransactionId xid) { - LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); + ProcArrayLockAcquire(PAL_EXCLUSIVE); KnownAssignedXidsRemovePreceding(xid); - LWLockRelease(ProcArrayLock); + ProcArrayLockRelease(); } @@ -2820,7 +2782,7 @@ KnownAssignedXidsAdd(TransactionId from_xid, TransactionId to_xid, { /* must hold lock to compress */ if (!exclusive_lock) - LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); + ProcArrayLockAcquire(PAL_EXCLUSIVE); KnownAssignedXidsCompress(true); @@ -2828,7 +2790,7 @@ KnownAssignedXidsAdd(TransactionId from_xid, TransactionId to_xid, /* note: we no longer care about the tail pointer */ if (!exclusive_lock) - LWLockRelease(ProcArrayLock); + ProcArrayLockRelease(); /* * If it still won't fit then we're out of memory diff --git a/src/backend/storage/lmgr/Makefile b/src/backend/storage/lmgr/Makefile index e12a8549f7..27eaa97020 100644 --- a/src/backend/storage/lmgr/Makefile +++ b/src/backend/storage/lmgr/Makefile @@ -12,7 +12,8 @@ subdir = src/backend/storage/lmgr top_builddir = ../../../.. include $(top_builddir)/src/Makefile.global -OBJS = lmgr.o lock.o proc.o deadlock.o lwlock.o spin.o s_lock.o predicate.o +OBJS = flexlock.o lmgr.o lock.o proc.o deadlock.o lwlock.o spin.o s_lock.o \ + procarraylock.o predicate.o include $(top_srcdir)/src/backend/common.mk diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c index 905502f145..adc5fd9dab 100644 --- a/src/backend/storage/lmgr/lock.c +++ b/src/backend/storage/lmgr/lock.c @@ -591,7 +591,7 @@ LockAcquireExtended(const LOCKTAG *locktag, bool found; ResourceOwner owner; uint32 hashcode; - LWLockId partitionLock; + FlexLockId partitionLock; int status; bool log_lock = false; @@ -1546,7 +1546,7 @@ LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock) LOCALLOCK *locallock; LOCK *lock; PROCLOCK *proclock; - LWLockId partitionLock; + FlexLockId partitionLock; bool wakeupNeeded; if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods)) @@ -1912,7 +1912,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks) */ for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++) { - LWLockId partitionLock = FirstLockMgrLock + partition; + FlexLockId partitionLock = FirstLockMgrLock + partition; SHM_QUEUE *procLocks = &(MyProc->myProcLocks[partition]); proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks, @@ -2197,7 +2197,7 @@ static bool FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag, uint32 hashcode) { - LWLockId partitionLock = LockHashPartitionLock(hashcode); + FlexLockId partitionLock = LockHashPartitionLock(hashcode); Oid relid = locktag->locktag_field2; uint32 i; @@ -2281,7 +2281,7 @@ FastPathGetRelationLockEntry(LOCALLOCK *locallock) LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD]; LOCKTAG *locktag = &locallock->tag.lock; PROCLOCK *proclock = NULL; - LWLockId partitionLock = LockHashPartitionLock(locallock->hashcode); + FlexLockId partitionLock = LockHashPartitionLock(locallock->hashcode); Oid relid = locktag->locktag_field2; uint32 f; @@ -2382,7 +2382,7 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode) SHM_QUEUE *procLocks; PROCLOCK *proclock; uint32 hashcode; - LWLockId partitionLock; + FlexLockId partitionLock; int count = 0; int fast_count = 0; @@ -2593,7 +2593,7 @@ LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc, PROCLOCKTAG proclocktag; uint32 hashcode; uint32 proclock_hashcode; - LWLockId partitionLock; + FlexLockId partitionLock; bool wakeupNeeded; hashcode = LockTagHashCode(locktag); @@ -2827,7 +2827,7 @@ PostPrepare_Locks(TransactionId xid) */ for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++) { - LWLockId partitionLock = FirstLockMgrLock + partition; + FlexLockId partitionLock = FirstLockMgrLock + partition; SHM_QUEUE *procLocks = &(MyProc->myProcLocks[partition]); proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks, @@ -3342,7 +3342,7 @@ lock_twophase_recover(TransactionId xid, uint16 info, uint32 hashcode; uint32 proclock_hashcode; int partition; - LWLockId partitionLock; + FlexLockId partitionLock; LockMethod lockMethodTable; Assert(len == sizeof(TwoPhaseLockRecord)); diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c index 079eb29163..e3cebb2aeb 100644 --- a/src/backend/storage/lmgr/lwlock.c +++ b/src/backend/storage/lmgr/lwlock.c @@ -21,74 +21,23 @@ */ #include "postgres.h" -#include "access/clog.h" -#include "access/multixact.h" -#include "access/subtrans.h" -#include "commands/async.h" #include "miscadmin.h" #include "pg_trace.h" +#include "storage/flexlock_internals.h" #include "storage/ipc.h" -#include "storage/predicate.h" #include "storage/proc.h" #include "storage/spin.h" - -/* We use the ShmemLock spinlock to protect LWLockAssign */ -extern slock_t *ShmemLock; - - typedef struct LWLock { - slock_t mutex; /* Protects LWLock and queue of PGPROCs */ - bool releaseOK; /* T if ok to release waiters */ + FlexLock flex; /* common FlexLock infrastructure */ char exclusive; /* # of exclusive holders (0 or 1) */ int shared; /* # of shared holders (0..MaxBackends) */ - PGPROC *head; /* head of list of waiting PGPROCs */ - PGPROC *tail; /* tail of list of waiting PGPROCs */ - /* tail is undefined when head is NULL */ } LWLock; -/* - * All the LWLock structs are allocated as an array in shared memory. - * (LWLockIds are indexes into the array.) We force the array stride to - * be a power of 2, which saves a few cycles in indexing, but more - * importantly also ensures that individual LWLocks don't cross cache line - * boundaries. This reduces cache contention problems, especially on AMD - * Opterons. (Of course, we have to also ensure that the array start - * address is suitably aligned.) - * - * LWLock is between 16 and 32 bytes on all known platforms, so these two - * cases are sufficient. - */ -#define LWLOCK_PADDED_SIZE (sizeof(LWLock) <= 16 ? 16 : 32) - -typedef union LWLockPadded -{ - LWLock lock; - char pad[LWLOCK_PADDED_SIZE]; -} LWLockPadded; - -/* - * This points to the array of LWLocks in shared memory. Backends inherit - * the pointer by fork from the postmaster (except in the EXEC_BACKEND case, - * where we have special measures to pass it down). - */ -NON_EXEC_STATIC LWLockPadded *LWLockArray = NULL; - - -/* - * We use this structure to keep track of locked LWLocks for release - * during error recovery. The maximum size could be determined at runtime - * if necessary, but it seems unlikely that more than a few locks could - * ever be held simultaneously. - */ -#define MAX_SIMUL_LWLOCKS 100 - -static int num_held_lwlocks = 0; -static LWLockId held_lwlocks[MAX_SIMUL_LWLOCKS]; - -static int lock_addin_request = 0; -static bool lock_addin_request_allowed = true; +#define LWLockPointer(lockid) \ + (AssertMacro(FlexLockArray[lockid].flex.locktype == FLEXLOCK_TYPE_LWLOCK), \ + (volatile LWLock *) &FlexLockArray[lockid]) #ifdef LWLOCK_STATS static int counts_for_pid = 0; @@ -98,27 +47,17 @@ static int *block_counts; #endif #ifdef LOCK_DEBUG -bool Trace_lwlocks = false; - inline static void -PRINT_LWDEBUG(const char *where, LWLockId lockid, const volatile LWLock *lock) +PRINT_LWDEBUG(const char *where, FlexLockId lockid, const volatile LWLock *lock) { - if (Trace_lwlocks) + if (Trace_flexlocks) elog(LOG, "%s(%d): excl %d shared %d head %p rOK %d", where, (int) lockid, - (int) lock->exclusive, lock->shared, lock->head, - (int) lock->releaseOK); -} - -inline static void -LOG_LWDEBUG(const char *where, LWLockId lockid, const char *msg) -{ - if (Trace_lwlocks) - elog(LOG, "%s(%d): %s", where, (int) lockid, msg); + (int) lock->exclusive, lock->shared, lock->flex.head, + (int) lock->flex.releaseOK); } #else /* not LOCK_DEBUG */ #define PRINT_LWDEBUG(a,b,c) -#define LOG_LWDEBUG(a,b,c) #endif /* LOCK_DEBUG */ #ifdef LWLOCK_STATS @@ -127,8 +66,8 @@ static void print_lwlock_stats(int code, Datum arg) { int i; - int *LWLockCounter = (int *) ((char *) LWLockArray - 2 * sizeof(int)); - int numLocks = LWLockCounter[1]; + int *FlexLockCounter = (int *) ((char *) FlexLockArray - 2 * sizeof(int)); + int numLocks = FlexLockCounter[1]; /* Grab an LWLock to keep different backends from mixing reports */ LWLockAcquire(0, LW_EXCLUSIVE); @@ -145,173 +84,15 @@ print_lwlock_stats(int code, Datum arg) } #endif /* LWLOCK_STATS */ - -/* - * Compute number of LWLocks to allocate. - */ -int -NumLWLocks(void) -{ - int numLocks; - - /* - * Possibly this logic should be spread out among the affected modules, - * the same way that shmem space estimation is done. But for now, there - * are few enough users of LWLocks that we can get away with just keeping - * the knowledge here. - */ - - /* Predefined LWLocks */ - numLocks = (int) NumFixedLWLocks; - - /* bufmgr.c needs two for each shared buffer */ - numLocks += 2 * NBuffers; - - /* proc.c needs one for each backend or auxiliary process */ - numLocks += MaxBackends + NUM_AUXILIARY_PROCS; - - /* clog.c needs one per CLOG buffer */ - numLocks += NUM_CLOG_BUFFERS; - - /* subtrans.c needs one per SubTrans buffer */ - numLocks += NUM_SUBTRANS_BUFFERS; - - /* multixact.c needs two SLRU areas */ - numLocks += NUM_MXACTOFFSET_BUFFERS + NUM_MXACTMEMBER_BUFFERS; - - /* async.c needs one per Async buffer */ - numLocks += NUM_ASYNC_BUFFERS; - - /* predicate.c needs one per old serializable xid buffer */ - numLocks += NUM_OLDSERXID_BUFFERS; - - /* - * Add any requested by loadable modules; for backwards-compatibility - * reasons, allocate at least NUM_USER_DEFINED_LWLOCKS of them even if - * there are no explicit requests. - */ - lock_addin_request_allowed = false; - numLocks += Max(lock_addin_request, NUM_USER_DEFINED_LWLOCKS); - - return numLocks; -} - - -/* - * RequestAddinLWLocks - * Request that extra LWLocks be allocated for use by - * a loadable module. - * - * This is only useful if called from the _PG_init hook of a library that - * is loaded into the postmaster via shared_preload_libraries. Once - * shared memory has been allocated, calls will be ignored. (We could - * raise an error, but it seems better to make it a no-op, so that - * libraries containing such calls can be reloaded if needed.) - */ -void -RequestAddinLWLocks(int n) -{ - if (IsUnderPostmaster || !lock_addin_request_allowed) - return; /* too late */ - lock_addin_request += n; -} - - -/* - * Compute shmem space needed for LWLocks. - */ -Size -LWLockShmemSize(void) -{ - Size size; - int numLocks = NumLWLocks(); - - /* Space for the LWLock array. */ - size = mul_size(numLocks, sizeof(LWLockPadded)); - - /* Space for dynamic allocation counter, plus room for alignment. */ - size = add_size(size, 2 * sizeof(int) + LWLOCK_PADDED_SIZE); - - return size; -} - - -/* - * Allocate shmem space for LWLocks and initialize the locks. - */ -void -CreateLWLocks(void) -{ - int numLocks = NumLWLocks(); - Size spaceLocks = LWLockShmemSize(); - LWLockPadded *lock; - int *LWLockCounter; - char *ptr; - int id; - - /* Allocate space */ - ptr = (char *) ShmemAlloc(spaceLocks); - - /* Leave room for dynamic allocation counter */ - ptr += 2 * sizeof(int); - - /* Ensure desired alignment of LWLock array */ - ptr += LWLOCK_PADDED_SIZE - ((uintptr_t) ptr) % LWLOCK_PADDED_SIZE; - - LWLockArray = (LWLockPadded *) ptr; - - /* - * Initialize all LWLocks to "unlocked" state - */ - for (id = 0, lock = LWLockArray; id < numLocks; id++, lock++) - { - SpinLockInit(&lock->lock.mutex); - lock->lock.releaseOK = true; - lock->lock.exclusive = 0; - lock->lock.shared = 0; - lock->lock.head = NULL; - lock->lock.tail = NULL; - } - - /* - * Initialize the dynamic-allocation counter, which is stored just before - * the first LWLock. - */ - LWLockCounter = (int *) ((char *) LWLockArray - 2 * sizeof(int)); - LWLockCounter[0] = (int) NumFixedLWLocks; - LWLockCounter[1] = numLocks; -} - - /* - * LWLockAssign - assign a dynamically-allocated LWLock number - * - * We interlock this using the same spinlock that is used to protect - * ShmemAlloc(). Interlocking is not really necessary during postmaster - * startup, but it is needed if any user-defined code tries to allocate - * LWLocks after startup. + * LWLockAssign - initialize a new lwlock and return its ID */ -LWLockId +FlexLockId LWLockAssign(void) { - LWLockId result; - - /* use volatile pointer to prevent code rearrangement */ - volatile int *LWLockCounter; - - LWLockCounter = (int *) ((char *) LWLockArray - 2 * sizeof(int)); - SpinLockAcquire(ShmemLock); - if (LWLockCounter[0] >= LWLockCounter[1]) - { - SpinLockRelease(ShmemLock); - elog(ERROR, "no more LWLockIds available"); - } - result = (LWLockId) (LWLockCounter[0]++); - SpinLockRelease(ShmemLock); - return result; + return FlexLockAssign(FLEXLOCK_TYPE_LWLOCK); } - /* * LWLockAcquire - acquire a lightweight lock in the specified mode * @@ -320,9 +101,9 @@ LWLockAssign(void) * Side effect: cancel/die interrupts are held off until lock release. */ void -LWLockAcquire(LWLockId lockid, LWLockMode mode) +LWLockAcquire(FlexLockId lockid, LWLockMode mode) { - volatile LWLock *lock = &(LWLockArray[lockid].lock); + volatile LWLock *lock = LWLockPointer(lockid); PGPROC *proc = MyProc; bool retry = false; int extraWaits = 0; @@ -333,8 +114,8 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode) /* Set up local count state first time through in a given process */ if (counts_for_pid != MyProcPid) { - int *LWLockCounter = (int *) ((char *) LWLockArray - 2 * sizeof(int)); - int numLocks = LWLockCounter[1]; + int *FlexLockCounter = (int *) ((char *) FlexLockArray - 2 * sizeof(int)); + int numLocks = FlexLockCounter[1]; sh_acquire_counts = calloc(numLocks, sizeof(int)); ex_acquire_counts = calloc(numLocks, sizeof(int)); @@ -356,10 +137,6 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode) */ Assert(!(proc == NULL && IsUnderPostmaster)); - /* Ensure we will have room to remember the lock */ - if (num_held_lwlocks >= MAX_SIMUL_LWLOCKS) - elog(ERROR, "too many LWLocks taken"); - /* * Lock out cancel/die interrupts until we exit the code section protected * by the LWLock. This ensures that interrupts will not interfere with @@ -388,11 +165,11 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode) bool mustwait; /* Acquire mutex. Time spent holding mutex should be short! */ - SpinLockAcquire(&lock->mutex); + SpinLockAcquire(&lock->flex.mutex); /* If retrying, allow LWLockRelease to release waiters again */ if (retry) - lock->releaseOK = true; + lock->flex.releaseOK = true; /* If I can get the lock, do so quickly. */ if (mode == LW_EXCLUSIVE) @@ -419,72 +196,30 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode) if (!mustwait) break; /* got the lock */ - /* - * Add myself to wait queue. - * - * If we don't have a PGPROC structure, there's no way to wait. This - * should never occur, since MyProc should only be null during shared - * memory initialization. - */ - if (proc == NULL) - elog(PANIC, "cannot wait without a PGPROC structure"); - - proc->lwWaiting = true; - proc->lwExclusive = (mode == LW_EXCLUSIVE); - proc->lwWaitLink = NULL; - if (lock->head == NULL) - lock->head = proc; - else - lock->tail->lwWaitLink = proc; - lock->tail = proc; + /* Add myself to wait queue. */ + FlexLockJoinWaitQueue(lock, (int) mode); /* Can release the mutex now */ - SpinLockRelease(&lock->mutex); - - /* - * Wait until awakened. - * - * Since we share the process wait semaphore with the regular lock - * manager and ProcWaitForSignal, and we may need to acquire an LWLock - * while one of those is pending, it is possible that we get awakened - * for a reason other than being signaled by LWLockRelease. If so, - * loop back and wait again. Once we've gotten the LWLock, - * re-increment the sema by the number of additional signals received, - * so that the lock manager or signal manager will see the received - * signal when it next waits. - */ - LOG_LWDEBUG("LWLockAcquire", lockid, "waiting"); + SpinLockRelease(&lock->flex.mutex); + + /* Wait until awakened. */ + extraWaits += FlexLockWait(lockid, mode); #ifdef LWLOCK_STATS block_counts[lockid]++; #endif - TRACE_POSTGRESQL_LWLOCK_WAIT_START(lockid, mode); - - for (;;) - { - /* "false" means cannot accept cancel/die interrupt here. */ - PGSemaphoreLock(&proc->sem, false); - if (!proc->lwWaiting) - break; - extraWaits++; - } - - TRACE_POSTGRESQL_LWLOCK_WAIT_DONE(lockid, mode); - - LOG_LWDEBUG("LWLockAcquire", lockid, "awakened"); - /* Now loop back and try to acquire lock again. */ retry = true; } /* We are done updating shared state of the lock itself. */ - SpinLockRelease(&lock->mutex); + SpinLockRelease(&lock->flex.mutex); - TRACE_POSTGRESQL_LWLOCK_ACQUIRE(lockid, mode); + TRACE_POSTGRESQL_FLEXLOCK_ACQUIRE(lockid, mode); /* Add lock to list of locks held by this backend */ - held_lwlocks[num_held_lwlocks++] = lockid; + FlexLockRemember(lockid); /* * Fix the process wait semaphore's count for any absorbed wakeups. @@ -501,17 +236,13 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode) * If successful, cancel/die interrupts are held off until lock release. */ bool -LWLockConditionalAcquire(LWLockId lockid, LWLockMode mode) +LWLockConditionalAcquire(FlexLockId lockid, LWLockMode mode) { - volatile LWLock *lock = &(LWLockArray[lockid].lock); + volatile LWLock *lock = LWLockPointer(lockid); bool mustwait; PRINT_LWDEBUG("LWLockConditionalAcquire", lockid, lock); - /* Ensure we will have room to remember the lock */ - if (num_held_lwlocks >= MAX_SIMUL_LWLOCKS) - elog(ERROR, "too many LWLocks taken"); - /* * Lock out cancel/die interrupts until we exit the code section protected * by the LWLock. This ensures that interrupts will not interfere with @@ -520,7 +251,7 @@ LWLockConditionalAcquire(LWLockId lockid, LWLockMode mode) HOLD_INTERRUPTS(); /* Acquire mutex. Time spent holding mutex should be short! */ - SpinLockAcquire(&lock->mutex); + SpinLockAcquire(&lock->flex.mutex); /* If I can get the lock, do so quickly. */ if (mode == LW_EXCLUSIVE) @@ -545,20 +276,20 @@ LWLockConditionalAcquire(LWLockId lockid, LWLockMode mode) } /* We are done updating shared state of the lock itself. */ - SpinLockRelease(&lock->mutex); + SpinLockRelease(&lock->flex.mutex); if (mustwait) { /* Failed to get lock, so release interrupt holdoff */ RESUME_INTERRUPTS(); - LOG_LWDEBUG("LWLockConditionalAcquire", lockid, "failed"); - TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE_FAIL(lockid, mode); + FlexLockDebug("LWLockConditionalAcquire", lockid, "failed"); + TRACE_POSTGRESQL_FLEXLOCK_CONDACQUIRE_FAIL(lockid, mode); } else { /* Add lock to list of locks held by this backend */ - held_lwlocks[num_held_lwlocks++] = lockid; - TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE(lockid, mode); + FlexLockRemember(lockid); + TRACE_POSTGRESQL_FLEXLOCK_CONDACQUIRE(lockid, mode); } return !mustwait; @@ -568,32 +299,18 @@ LWLockConditionalAcquire(LWLockId lockid, LWLockMode mode) * LWLockRelease - release a previously acquired lock */ void -LWLockRelease(LWLockId lockid) +LWLockRelease(FlexLockId lockid) { - volatile LWLock *lock = &(LWLockArray[lockid].lock); + volatile LWLock *lock = LWLockPointer(lockid); PGPROC *head; PGPROC *proc; - int i; PRINT_LWDEBUG("LWLockRelease", lockid, lock); - /* - * Remove lock from list of locks held. Usually, but not always, it will - * be the latest-acquired lock; so search array backwards. - */ - for (i = num_held_lwlocks; --i >= 0;) - { - if (lockid == held_lwlocks[i]) - break; - } - if (i < 0) - elog(ERROR, "lock %d is not held", (int) lockid); - num_held_lwlocks--; - for (; i < num_held_lwlocks; i++) - held_lwlocks[i] = held_lwlocks[i + 1]; + FlexLockForget(lockid); /* Acquire mutex. Time spent holding mutex should be short! */ - SpinLockAcquire(&lock->mutex); + SpinLockAcquire(&lock->flex.mutex); /* Release my hold on lock */ if (lock->exclusive > 0) @@ -610,10 +327,10 @@ LWLockRelease(LWLockId lockid) * if someone has already awakened waiters that haven't yet acquired the * lock. */ - head = lock->head; + head = lock->flex.head; if (head != NULL) { - if (lock->exclusive == 0 && lock->shared == 0 && lock->releaseOK) + if (lock->exclusive == 0 && lock->shared == 0 && lock->flex.releaseOK) { /* * Remove the to-be-awakened PGPROCs from the queue. If the front @@ -621,17 +338,17 @@ LWLockRelease(LWLockId lockid) * as many waiters as want shared access. */ proc = head; - if (!proc->lwExclusive) + if (proc->flWaitMode != LW_EXCLUSIVE) { - while (proc->lwWaitLink != NULL && - !proc->lwWaitLink->lwExclusive) - proc = proc->lwWaitLink; + while (proc->flWaitLink != NULL && + proc->flWaitLink->flWaitMode != LW_EXCLUSIVE) + proc = proc->flWaitLink; } /* proc is now the last PGPROC to be released */ - lock->head = proc->lwWaitLink; - proc->lwWaitLink = NULL; + lock->flex.head = proc->flWaitLink; + proc->flWaitLink = NULL; /* prevent additional wakeups until retryer gets to run */ - lock->releaseOK = false; + lock->flex.releaseOK = false; } else { @@ -641,20 +358,20 @@ LWLockRelease(LWLockId lockid) } /* We are done updating shared state of the lock itself. */ - SpinLockRelease(&lock->mutex); + SpinLockRelease(&lock->flex.mutex); - TRACE_POSTGRESQL_LWLOCK_RELEASE(lockid); + TRACE_POSTGRESQL_FLEXLOCK_RELEASE(lockid); /* * Awaken any waiters I removed from the queue. */ while (head != NULL) { - LOG_LWDEBUG("LWLockRelease", lockid, "release waiter"); + FlexLockDebug("LWLockRelease", lockid, "release waiter"); proc = head; - head = proc->lwWaitLink; - proc->lwWaitLink = NULL; - proc->lwWaiting = false; + head = proc->flWaitLink; + proc->flWaitLink = NULL; + proc->flWaitResult = 1; /* any non-zero value will do */ PGSemaphoreUnlock(&proc->sem); } @@ -663,44 +380,3 @@ LWLockRelease(LWLockId lockid) */ RESUME_INTERRUPTS(); } - - -/* - * LWLockReleaseAll - release all currently-held locks - * - * Used to clean up after ereport(ERROR). An important difference between this - * function and retail LWLockRelease calls is that InterruptHoldoffCount is - * unchanged by this operation. This is necessary since InterruptHoldoffCount - * has been set to an appropriate level earlier in error recovery. We could - * decrement it below zero if we allow it to drop for each released lock! - */ -void -LWLockReleaseAll(void) -{ - while (num_held_lwlocks > 0) - { - HOLD_INTERRUPTS(); /* match the upcoming RESUME_INTERRUPTS */ - - LWLockRelease(held_lwlocks[num_held_lwlocks - 1]); - } -} - - -/* - * LWLockHeldByMe - test whether my process currently holds a lock - * - * This is meant as debug support only. We do not distinguish whether the - * lock is held shared or exclusive. - */ -bool -LWLockHeldByMe(LWLockId lockid) -{ - int i; - - for (i = 0; i < num_held_lwlocks; i++) - { - if (held_lwlocks[i] == lockid) - return true; - } - return false; -} diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c index 345f6f56a6..02ef963a5f 100644 --- a/src/backend/storage/lmgr/predicate.c +++ b/src/backend/storage/lmgr/predicate.c @@ -239,7 +239,7 @@ #define PredicateLockHashPartition(hashcode) \ ((hashcode) % NUM_PREDICATELOCK_PARTITIONS) #define PredicateLockHashPartitionLock(hashcode) \ - ((LWLockId) (FirstPredicateLockMgrLock + PredicateLockHashPartition(hashcode))) + ((FlexLockId) (FirstPredicateLockMgrLock + PredicateLockHashPartition(hashcode))) #define NPREDICATELOCKTARGETENTS() \ mul_size(max_predicate_locks_per_xact, add_size(MaxBackends, max_prepared_xacts)) @@ -1840,7 +1840,7 @@ PageIsPredicateLocked(Relation relation, BlockNumber blkno) { PREDICATELOCKTARGETTAG targettag; uint32 targettaghash; - LWLockId partitionLock; + FlexLockId partitionLock; PREDICATELOCKTARGET *target; SET_PREDICATELOCKTARGETTAG_PAGE(targettag, @@ -1972,7 +1972,7 @@ RemoveScratchTarget(bool lockheld) { bool found; - Assert(LWLockHeldByMe(SerializablePredicateLockListLock)); + Assert(FlexLockHeldByMe(SerializablePredicateLockListLock)); if (!lockheld) LWLockAcquire(ScratchPartitionLock, LW_EXCLUSIVE); @@ -1993,7 +1993,7 @@ RestoreScratchTarget(bool lockheld) { bool found; - Assert(LWLockHeldByMe(SerializablePredicateLockListLock)); + Assert(FlexLockHeldByMe(SerializablePredicateLockListLock)); if (!lockheld) LWLockAcquire(ScratchPartitionLock, LW_EXCLUSIVE); @@ -2015,7 +2015,7 @@ RemoveTargetIfNoLongerUsed(PREDICATELOCKTARGET *target, uint32 targettaghash) { PREDICATELOCKTARGET *rmtarget; - Assert(LWLockHeldByMe(SerializablePredicateLockListLock)); + Assert(FlexLockHeldByMe(SerializablePredicateLockListLock)); /* Can't remove it until no locks at this target. */ if (!SHMQueueEmpty(&target->predicateLocks)) @@ -2073,7 +2073,7 @@ DeleteChildTargetLocks(const PREDICATELOCKTARGETTAG *newtargettag) if (TargetTagIsCoveredBy(oldtargettag, *newtargettag)) { uint32 oldtargettaghash; - LWLockId partitionLock; + FlexLockId partitionLock; PREDICATELOCK *rmpredlock; oldtargettaghash = PredicateLockTargetTagHashCode(&oldtargettag); @@ -2285,7 +2285,7 @@ CreatePredicateLock(const PREDICATELOCKTARGETTAG *targettag, PREDICATELOCKTARGET *target; PREDICATELOCKTAG locktag; PREDICATELOCK *lock; - LWLockId partitionLock; + FlexLockId partitionLock; bool found; partitionLock = PredicateLockHashPartitionLock(targettaghash); @@ -2518,8 +2518,8 @@ DeleteLockTarget(PREDICATELOCKTARGET *target, uint32 targettaghash) PREDICATELOCK *nextpredlock; bool found; - Assert(LWLockHeldByMe(SerializablePredicateLockListLock)); - Assert(LWLockHeldByMe(PredicateLockHashPartitionLock(targettaghash))); + Assert(FlexLockHeldByMe(SerializablePredicateLockListLock)); + Assert(FlexLockHeldByMe(PredicateLockHashPartitionLock(targettaghash))); predlock = (PREDICATELOCK *) SHMQueueNext(&(target->predicateLocks), @@ -2586,14 +2586,14 @@ TransferPredicateLocksToNewTarget(PREDICATELOCKTARGETTAG oldtargettag, bool removeOld) { uint32 oldtargettaghash; - LWLockId oldpartitionLock; + FlexLockId oldpartitionLock; PREDICATELOCKTARGET *oldtarget; uint32 newtargettaghash; - LWLockId newpartitionLock; + FlexLockId newpartitionLock; bool found; bool outOfShmem = false; - Assert(LWLockHeldByMe(SerializablePredicateLockListLock)); + Assert(FlexLockHeldByMe(SerializablePredicateLockListLock)); oldtargettaghash = PredicateLockTargetTagHashCode(&oldtargettag); newtargettaghash = PredicateLockTargetTagHashCode(&newtargettag); @@ -3125,7 +3125,7 @@ SetNewSxactGlobalXmin(void) { SERIALIZABLEXACT *sxact; - Assert(LWLockHeldByMe(SerializableXactHashLock)); + Assert(FlexLockHeldByMe(SerializableXactHashLock)); PredXact->SxactGlobalXmin = InvalidTransactionId; PredXact->SxactGlobalXminCount = 0; @@ -3578,7 +3578,7 @@ ClearOldPredicateLocks(void) PREDICATELOCKTARGET *target; PREDICATELOCKTARGETTAG targettag; uint32 targettaghash; - LWLockId partitionLock; + FlexLockId partitionLock; tag = predlock->tag; target = tag.myTarget; @@ -3637,7 +3637,7 @@ ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial, Assert(sxact != NULL); Assert(SxactIsRolledBack(sxact) || SxactIsCommitted(sxact)); - Assert(LWLockHeldByMe(SerializableFinishedListLock)); + Assert(FlexLockHeldByMe(SerializableFinishedListLock)); /* * First release all the predicate locks held by this xact (or transfer @@ -3656,7 +3656,7 @@ ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial, PREDICATELOCKTARGET *target; PREDICATELOCKTARGETTAG targettag; uint32 targettaghash; - LWLockId partitionLock; + FlexLockId partitionLock; nextpredlock = (PREDICATELOCK *) SHMQueueNext(&(sxact->predicateLocks), @@ -4034,7 +4034,7 @@ static void CheckTargetForConflictsIn(PREDICATELOCKTARGETTAG *targettag) { uint32 targettaghash; - LWLockId partitionLock; + FlexLockId partitionLock; PREDICATELOCKTARGET *target; PREDICATELOCK *predlock; PREDICATELOCK *mypredlock = NULL; @@ -4427,7 +4427,7 @@ OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader, bool failure; RWConflict conflict; - Assert(LWLockHeldByMe(SerializableXactHashLock)); + Assert(FlexLockHeldByMe(SerializableXactHashLock)); failure = false; diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c index eda3a98a85..510a4c22e0 100644 --- a/src/backend/storage/lmgr/proc.c +++ b/src/backend/storage/lmgr/proc.c @@ -45,6 +45,7 @@ #include "storage/pmsignal.h" #include "storage/proc.h" #include "storage/procarray.h" +#include "storage/procarraylock.h" #include "storage/procsignal.h" #include "storage/spin.h" #include "utils/timestamp.h" @@ -325,9 +326,9 @@ InitProcess(void) /* NB -- autovac launcher intentionally does not set IS_AUTOVACUUM */ if (IsAutoVacuumWorkerProcess()) MyProc->vacuumFlags |= PROC_IS_AUTOVACUUM; - MyProc->lwWaiting = false; - MyProc->lwExclusive = false; - MyProc->lwWaitLink = NULL; + MyProc->flWaitResult = 0; + MyProc->flWaitMode = 0; + MyProc->flWaitLink = NULL; MyProc->waitLock = NULL; MyProc->waitProcLock = NULL; #ifdef USE_ASSERT_CHECKING @@ -479,9 +480,9 @@ InitAuxiliaryProcess(void) MyProc->roleId = InvalidOid; MyProc->inCommit = false; MyProc->vacuumFlags = 0; - MyProc->lwWaiting = false; - MyProc->lwExclusive = false; - MyProc->lwWaitLink = NULL; + MyProc->flWaitMode = 0; + MyProc->flWaitResult = 0; + MyProc->flWaitLink = NULL; MyProc->waitLock = NULL; MyProc->waitProcLock = NULL; #ifdef USE_ASSERT_CHECKING @@ -607,7 +608,7 @@ IsWaitingForLock(void) void LockWaitCancel(void) { - LWLockId partitionLock; + FlexLockId partitionLock; /* Nothing to do if we weren't waiting for a lock */ if (lockAwaited == NULL) @@ -718,11 +719,11 @@ ProcKill(int code, Datum arg) #endif /* - * Release any LW locks I am holding. There really shouldn't be any, but - * it's cheap to check again before we cut the knees off the LWLock + * Release any felx locks I am holding. There really shouldn't be any, but + * it's cheap to check again before we cut the knees off the flex lock * facility by releasing our PGPROC ... */ - LWLockReleaseAll(); + FlexLockReleaseAll(); /* Release ownership of the process's latch, too */ DisownLatch(&MyProc->procLatch); @@ -779,8 +780,8 @@ AuxiliaryProcKill(int code, Datum arg) Assert(MyProc == auxproc); - /* Release any LW locks I am holding (see notes above) */ - LWLockReleaseAll(); + /* Release any flex locks I am holding (see notes above) */ + FlexLockReleaseAll(); /* Release ownership of the process's latch, too */ DisownLatch(&MyProc->procLatch); @@ -865,7 +866,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable) LOCK *lock = locallock->lock; PROCLOCK *proclock = locallock->proclock; uint32 hashcode = locallock->hashcode; - LWLockId partitionLock = LockHashPartitionLock(hashcode); + FlexLockId partitionLock = LockHashPartitionLock(hashcode); PROC_QUEUE *waitQueue = &(lock->waitProcs); LOCKMASK myHeldLocks = MyProc->heldLocks; bool early_deadlock = false; @@ -1046,7 +1047,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable) { PGPROC *autovac = GetBlockingAutoVacuumPgproc(); - LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); + ProcArrayLockAcquire(PAL_EXCLUSIVE); /* * Only do it if the worker is not working to protect against Xid @@ -1062,7 +1063,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable) pid); /* don't hold the lock across the kill() syscall */ - LWLockRelease(ProcArrayLock); + ProcArrayLockRelease(); /* send the autovacuum worker Back to Old Kent Road */ if (kill(pid, SIGINT) < 0) @@ -1074,7 +1075,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable) } } else - LWLockRelease(ProcArrayLock); + ProcArrayLockRelease(); /* prevent signal from being resent more than once */ allow_autovacuum_cancel = false; diff --git a/src/backend/utils/misc/check_guc b/src/backend/utils/misc/check_guc index 293fb0363f..1a19e36826 100755 --- a/src/backend/utils/misc/check_guc +++ b/src/backend/utils/misc/check_guc @@ -19,7 +19,7 @@ INTENTIONALLY_NOT_INCLUDED="autocommit debug_deadlocks \ is_superuser lc_collate lc_ctype lc_messages lc_monetary lc_numeric lc_time \ pre_auth_delay role seed server_encoding server_version server_version_int \ -session_authorization trace_lock_oidmin trace_lock_table trace_locks trace_lwlocks \ +session_authorization trace_lock_oidmin trace_lock_table trace_locks trace_flexlocks \ trace_notify trace_userlocks transaction_isolation transaction_read_only \ zero_damaged_pages" diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c index da7b6d4e90..52de233ccf 100644 --- a/src/backend/utils/misc/guc.c +++ b/src/backend/utils/misc/guc.c @@ -59,6 +59,7 @@ #include "replication/walreceiver.h" #include "replication/walsender.h" #include "storage/bufmgr.h" +#include "storage/flexlock_internals.h" #include "storage/standby.h" #include "storage/fd.h" #include "storage/predicate.h" @@ -1071,12 +1072,12 @@ static struct config_bool ConfigureNamesBool[] = NULL, NULL, NULL }, { - {"trace_lwlocks", PGC_SUSET, DEVELOPER_OPTIONS, + {"trace_flexlocks", PGC_SUSET, DEVELOPER_OPTIONS, gettext_noop("No description available."), NULL, GUC_NOT_IN_SAMPLE }, - &Trace_lwlocks, + &Trace_flexlocks, false, NULL, NULL, NULL }, diff --git a/src/backend/utils/probes.d b/src/backend/utils/probes.d index 71c5ab0bee..5b9cfe695a 100644 --- a/src/backend/utils/probes.d +++ b/src/backend/utils/probes.d @@ -15,8 +15,8 @@ * in probe definitions, as they cause compilation errors on Mac OS X 10.5. */ #define LocalTransactionId unsigned int -#define LWLockId int -#define LWLockMode int +#define FlexLockId int +#define FlexLockMode int #define LOCKMODE int #define BlockNumber unsigned int #define Oid unsigned int @@ -29,12 +29,12 @@ provider postgresql { probe transaction__commit(LocalTransactionId); probe transaction__abort(LocalTransactionId); - probe lwlock__acquire(LWLockId, LWLockMode); - probe lwlock__release(LWLockId); - probe lwlock__wait__start(LWLockId, LWLockMode); - probe lwlock__wait__done(LWLockId, LWLockMode); - probe lwlock__condacquire(LWLockId, LWLockMode); - probe lwlock__condacquire__fail(LWLockId, LWLockMode); + probe flexlock__acquire(FlexLockId, FlexLockMode); + probe flexlock__release(FlexLockId); + probe flexlock__wait__start(FlexLockId, FlexLockMode); + probe flexlock__wait__done(FlexLockId, FlexLockMode); + probe flexlock__condacquire(FlexLockId, FlexLockMode); + probe flexlock__condacquire__fail(FlexLockId, FlexLockMode); probe lock__wait__start(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, LOCKMODE); probe lock__wait__done(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, LOCKMODE); diff --git a/src/include/access/slru.h b/src/include/access/slru.h index e48743f55d..680a87f550 100644 --- a/src/include/access/slru.h +++ b/src/include/access/slru.h @@ -55,7 +55,7 @@ typedef enum */ typedef struct SlruSharedData { - LWLockId ControlLock; + FlexLockId ControlLock; /* Number of buffers managed by this SLRU structure */ int num_slots; @@ -69,7 +69,7 @@ typedef struct SlruSharedData bool *page_dirty; int *page_number; int *page_lru_count; - LWLockId *buffer_locks; + FlexLockId *buffer_locks; /* * Optional array of WAL flush LSNs associated with entries in the SLRU @@ -136,7 +136,7 @@ typedef SlruCtlData *SlruCtl; extern Size SimpleLruShmemSize(int nslots, int nlsns); extern void SimpleLruInit(SlruCtl ctl, const char *name, int nslots, int nlsns, - LWLockId ctllock, const char *subdir); + FlexLockId ctllock, const char *subdir); extern int SimpleLruZeroPage(SlruCtl ctl, int pageno); extern int SimpleLruReadPage(SlruCtl ctl, int pageno, bool write_ok, TransactionId xid); diff --git a/src/include/pg_config_manual.h b/src/include/pg_config_manual.h index 6c8e31269c..d3b74db5fa 100644 --- a/src/include/pg_config_manual.h +++ b/src/include/pg_config_manual.h @@ -49,9 +49,9 @@ #define SEQ_MINVALUE (-SEQ_MAXVALUE) /* - * Number of spare LWLocks to allocate for user-defined add-on code. + * Number of spare FlexLocks to allocate for user-defined add-on code. */ -#define NUM_USER_DEFINED_LWLOCKS 4 +#define NUM_USER_DEFINED_FLEXLOCKS 4 /* * Define this if you want to allow the lo_import and lo_export SQL diff --git a/src/include/storage/buf_internals.h b/src/include/storage/buf_internals.h index b7d4ea53a4..ac7f66501f 100644 --- a/src/include/storage/buf_internals.h +++ b/src/include/storage/buf_internals.h @@ -103,7 +103,7 @@ typedef struct buftag #define BufTableHashPartition(hashcode) \ ((hashcode) % NUM_BUFFER_PARTITIONS) #define BufMappingPartitionLock(hashcode) \ - ((LWLockId) (FirstBufMappingLock + BufTableHashPartition(hashcode))) + ((FlexLockId) (FirstBufMappingLock + BufTableHashPartition(hashcode))) /* * BufferDesc -- shared descriptor/state data for a single shared buffer. @@ -143,8 +143,8 @@ typedef struct sbufdesc int buf_id; /* buffer's index number (from 0) */ int freeNext; /* link in freelist chain */ - LWLockId io_in_progress_lock; /* to wait for I/O to complete */ - LWLockId content_lock; /* to lock access to buffer contents */ + FlexLockId io_in_progress_lock; /* to wait for I/O to complete */ + FlexLockId content_lock; /* to lock access to buffer contents */ } BufferDesc; #define BufferDescriptorGetBuffer(bdesc) ((bdesc)->buf_id + 1) diff --git a/src/include/storage/lock.h b/src/include/storage/lock.h index e106ad5401..ba87db2817 100644 --- a/src/include/storage/lock.h +++ b/src/include/storage/lock.h @@ -471,7 +471,7 @@ typedef enum #define LockHashPartition(hashcode) \ ((hashcode) % NUM_LOCK_PARTITIONS) #define LockHashPartitionLock(hashcode) \ - ((LWLockId) (FirstLockMgrLock + LockHashPartition(hashcode))) + ((FlexLockId) (FirstLockMgrLock + LockHashPartition(hashcode))) /* diff --git a/src/include/storage/lwlock.h b/src/include/storage/lwlock.h index 438a48d8dc..69c72f18c9 100644 --- a/src/include/storage/lwlock.h +++ b/src/include/storage/lwlock.h @@ -14,82 +14,7 @@ #ifndef LWLOCK_H #define LWLOCK_H -/* - * It's a bit odd to declare NUM_BUFFER_PARTITIONS and NUM_LOCK_PARTITIONS - * here, but we need them to set up enum LWLockId correctly, and having - * this file include lock.h or bufmgr.h would be backwards. - */ - -/* Number of partitions of the shared buffer mapping hashtable */ -#define NUM_BUFFER_PARTITIONS 16 - -/* Number of partitions the shared lock tables are divided into */ -#define LOG2_NUM_LOCK_PARTITIONS 4 -#define NUM_LOCK_PARTITIONS (1 << LOG2_NUM_LOCK_PARTITIONS) - -/* Number of partitions the shared predicate lock tables are divided into */ -#define LOG2_NUM_PREDICATELOCK_PARTITIONS 4 -#define NUM_PREDICATELOCK_PARTITIONS (1 << LOG2_NUM_PREDICATELOCK_PARTITIONS) - -/* - * We have a number of predefined LWLocks, plus a bunch of LWLocks that are - * dynamically assigned (e.g., for shared buffers). The LWLock structures - * live in shared memory (since they contain shared data) and are identified - * by values of this enumerated type. We abuse the notion of an enum somewhat - * by allowing values not listed in the enum declaration to be assigned. - * The extra value MaxDynamicLWLock is there to keep the compiler from - * deciding that the enum can be represented as char or short ... - * - * If you remove a lock, please replace it with a placeholder. This retains - * the lock numbering, which is helpful for DTrace and other external - * debugging scripts. - */ -typedef enum LWLockId -{ - BufFreelistLock, - ShmemIndexLock, - OidGenLock, - XidGenLock, - ProcArrayLock, - SInvalReadLock, - SInvalWriteLock, - WALInsertLock, - WALWriteLock, - ControlFileLock, - CheckpointLock, - CLogControlLock, - SubtransControlLock, - MultiXactGenLock, - MultiXactOffsetControlLock, - MultiXactMemberControlLock, - RelCacheInitLock, - BgWriterCommLock, - TwoPhaseStateLock, - TablespaceCreateLock, - BtreeVacuumLock, - AddinShmemInitLock, - AutovacuumLock, - AutovacuumScheduleLock, - SyncScanLock, - RelationMappingLock, - AsyncCtlLock, - AsyncQueueLock, - SerializableXactHashLock, - SerializableFinishedListLock, - SerializablePredicateLockListLock, - OldSerXidLock, - SyncRepLock, - /* Individual lock IDs end here */ - FirstBufMappingLock, - FirstLockMgrLock = FirstBufMappingLock + NUM_BUFFER_PARTITIONS, - FirstPredicateLockMgrLock = FirstLockMgrLock + NUM_LOCK_PARTITIONS, - - /* must be last except for MaxDynamicLWLock: */ - NumFixedLWLocks = FirstPredicateLockMgrLock + NUM_PREDICATELOCK_PARTITIONS, - - MaxDynamicLWLock = 1000000000 -} LWLockId; - +#include "storage/flexlock.h" typedef enum LWLockMode { @@ -97,22 +22,9 @@ typedef enum LWLockMode LW_SHARED } LWLockMode; - -#ifdef LOCK_DEBUG -extern bool Trace_lwlocks; -#endif - -extern LWLockId LWLockAssign(void); -extern void LWLockAcquire(LWLockId lockid, LWLockMode mode); -extern bool LWLockConditionalAcquire(LWLockId lockid, LWLockMode mode); -extern void LWLockRelease(LWLockId lockid); -extern void LWLockReleaseAll(void); -extern bool LWLockHeldByMe(LWLockId lockid); - -extern int NumLWLocks(void); -extern Size LWLockShmemSize(void); -extern void CreateLWLocks(void); - -extern void RequestAddinLWLocks(int n); +extern FlexLockId LWLockAssign(void); +extern void LWLockAcquire(FlexLockId lockid, LWLockMode mode); +extern bool LWLockConditionalAcquire(FlexLockId lockid, LWLockMode mode); +extern void LWLockRelease(FlexLockId lockid); #endif /* LWLOCK_H */ diff --git a/src/include/storage/proc.h b/src/include/storage/proc.h index 6e798b1b2d..7e8630dfcc 100644 --- a/src/include/storage/proc.h +++ b/src/include/storage/proc.h @@ -114,10 +114,10 @@ struct PGPROC */ bool recoveryConflictPending; - /* Info about LWLock the process is currently waiting for, if any. */ - bool lwWaiting; /* true if waiting for an LW lock */ - bool lwExclusive; /* true if waiting for exclusive access */ - struct PGPROC *lwWaitLink; /* next waiter for same LW lock */ + /* Info about FlexLock the process is currently waiting for, if any. */ + int flWaitResult; /* result of wait, or 0 if still waiting */ + int flWaitMode; /* lock mode sought */ + struct PGPROC *flWaitLink; /* next waiter for same FlexLock */ /* Info about lock the process is currently waiting for, if any. */ /* waitLock and waitProcLock are NULL if not currently waiting. */ @@ -147,7 +147,7 @@ struct PGPROC struct XidCache subxids; /* cache for subtransaction XIDs */ /* Per-backend LWLock. Protects fields below. */ - LWLockId backendLock; /* protects the fields below */ + FlexLockId backendLock; /* protects the fields below */ /* Lock manager data, recording fast-path locks taken by this backend. */ uint64 fpLockBits; /* lock modes held for each fast-path slot */ |