diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c index 1b7053cb1c..048979c716 100644 --- a/src/backend/storage/lmgr/lock.c +++ b/src/backend/storage/lmgr/lock.c @@ -254,6 +254,22 @@ static HTAB *LockMethodLockHash; static HTAB *LockMethodProcLockHash; static HTAB *LockMethodLocalHash; +/* Initial size of the LockMethodLocalHash table */ +#define LOCKMETHODLOCALHASH_INIT_SIZE 16 + +/* + * If the size of the LockMethodLocalHash table grows beyond this then try + * to shrink the table back down to LOCKMETHODLOCALHASH_INIT_SIZE. This must + * not be less than LOCKMETHODLOCALHASH_INIT_SIZE + */ +#define LOCKMETHODLOCALHASH_SHRINK_THRESHOLD 64 + +/* + * How many times must TryShrinkLocalLockHash() be called while + * LockMethodLocalHash has exceeded LOCKMETHODLOCALHASH_SHRINK_THRESHOLD + * before we rebuild the hash table. + */ +#define LOCKMETHODLOCALHASH_SHRINK_FREQUENCY 1000 /* private state for error cleanup */ static LOCALLOCK *StrongLockInProgress; @@ -339,6 +355,8 @@ PROCLOCK_PRINT(const char *where, const PROCLOCK *proclockP) #endif /* not LOCK_DEBUG */ +static void InitLocalLockHash(void); +static inline void TryShrinkLocalLockHash(void); static uint32 proclock_hash(const void *key, Size keysize); static void RemoveLocalLock(LOCALLOCK *locallock); static PROCLOCK *SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc, @@ -431,14 +449,28 @@ InitLocks(void) if (!found) SpinLockInit(&FastPathStrongRelationLocks->mutex); + InitLocalLockHash(); +} + +/* + * InitLocalLockHash + * Initialize the LockMethodLocalHash hash table. + */ +static void +InitLocalLockHash(void) +{ + HASHCTL info; + /* * Allocate non-shared hash table for LOCALLOCK structs. This stores lock * counts and resource owner information. * - * The non-shared table could already exist in this process (this occurs - * when the postmaster is recreating shared memory after a backend crash). - * If so, delete and recreate it. (We could simply leave it, since it - * ought to be empty in the postmaster, but for safety let's zap it.) + * First destroy any old table that may exist. We might just be + * recreating the table or it could already exist in this process (this + * occurs when the postmaster is recreating shared memory after a backend + * crash). If so, delete and recreate it. (We could simply leave it, + * since it ought to be empty in the postmaster, but for safety let's zap + * it.) */ if (LockMethodLocalHash) hash_destroy(LockMethodLocalHash); @@ -447,11 +479,47 @@ InitLocks(void) info.entrysize = sizeof(LOCALLOCK); LockMethodLocalHash = hash_create("LOCALLOCK hash", - 16, + LOCKMETHODLOCALHASH_INIT_SIZE, &info, HASH_ELEM | HASH_BLOBS); } +/* + * TryShrinkLocalLockHash + * Consider rebuilding LockMethodLocalHash. + * + * NB: We only rebuild the table if; 1) The table's max bucket has gone + * beyond the defined threshold, and; 2) The number of times the function + * has been called while meeting case #1 has exceeded the defined frequency. + * Without #2 we may rebuild the table too often and since rebuilding the hash + * table is not free, we may slow down workloads that frequently obtain large + * numbers of locks at once. + */ +static inline void +TryShrinkLocalLockHash(void) +{ + static int ntimes_exceeded = 0; + + /* + * 1. Consider shrinking the table whenever the table is empty and the + * maximum used bucket is beyond LOCKMETHODLOCALHASH_SHRINK_THRESHOLD. + */ + if (hash_get_num_entries(LockMethodLocalHash) == 0 && + hash_get_max_bucket(LockMethodLocalHash) > + LOCKMETHODLOCALHASH_SHRINK_THRESHOLD) + { + /* Increment the number of times we've exceeding the threshold */ + ntimes_exceeded++; + + /* 2. Shrink if we've exceeded the threshold enough times */ + if (ntimes_exceeded >= LOCKMETHODLOCALHASH_SHRINK_FREQUENCY) + { + /* Rebuild the table and zero the counter */ + InitLocalLockHash(); + ntimes_exceeded = 0; + } + } +} /* * Fetch the lock method table associated with a given lock @@ -2349,6 +2417,13 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks) LWLockRelease(partitionLock); } /* loop over partitions */ + /* + * The hash_seq_search can become inefficient when the hash table has + * grown significantly larger than the default size due to the backend + * having obtained large numbers of locks at once. Consider shrinking it. + */ + TryShrinkLocalLockHash(); + #ifdef LOCK_DEBUG if (*(lockMethodTable->trace_flag)) elog(LOG, "LockReleaseAll done"); diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c index 0dfbec8e3e..d93e5279ee 100644 --- a/src/backend/utils/hash/dynahash.c +++ b/src/backend/utils/hash/dynahash.c @@ -1351,6 +1351,15 @@ hash_get_num_entries(HTAB *hashp) return sum; } +/* + * hash_get_max_bucket -- get the maximum used bucket in a hashtable + */ +uint32 +hash_get_max_bucket(HTAB *hashp) +{ + return hashp->hctl->max_bucket; +} + /* * hash_seq_init/_search/_term * Sequentially search through hash table and return diff --git a/src/include/utils/hsearch.h b/src/include/utils/hsearch.h index fe5ab9c868..941f99398d 100644 --- a/src/include/utils/hsearch.h +++ b/src/include/utils/hsearch.h @@ -132,6 +132,7 @@ extern void *hash_search_with_hash_value(HTAB *hashp, const void *keyPtr, extern bool hash_update_hash_key(HTAB *hashp, void *existingEntry, const void *newKeyPtr); extern long hash_get_num_entries(HTAB *hashp); +extern uint32 hash_get_max_bucket(HTAB *hashp); extern void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp); extern void *hash_seq_search(HASH_SEQ_STATUS *status); extern void hash_seq_term(HASH_SEQ_STATUS *status);