From 2da723e692164fe136fc7d43130b111fd98a756a Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Mon, 19 Jul 2021 13:48:14 -0700 Subject: [PATCH 3/3] WIP: slab performance. Discussion: https://postgr.es/m/20210717194333.mr5io3zup3kxahfm@alap3.anarazel.de --- src/backend/utils/mmgr/slab.c | 434 ++++++++++++++++++++++------------ 1 file changed, 285 insertions(+), 149 deletions(-) diff --git a/src/backend/utils/mmgr/slab.c b/src/backend/utils/mmgr/slab.c index e4b8275045..c979553dcd 100644 --- a/src/backend/utils/mmgr/slab.c +++ b/src/backend/utils/mmgr/slab.c @@ -23,15 +23,18 @@ * global (context) level. This is possible as the chunk size (and thus also * the number of chunks per block) is fixed. * - * On each block, free chunks are tracked in a simple linked list. Contents - * of free chunks is replaced with an index of the next free chunk, forming - * a very simple linked list. Each block also contains a counter of free - * chunks. Combined with the local block-level freelist, it makes it trivial - * to eventually free the whole block. + * On each block, never allocated chunks are tracked by a simple offset, and + * free chunks are tracked in a simple linked list. The offset approach + * avoids needing to iterate over all chunks when allocating a new block, + * which would cause page faults and cache pollution. Contents of free chunks + * is replaced with a pointer to the next free chunk, forming a very simple + * linked list. Each block also contains a counter of free chunks. Combined + * with the local block-level freelist, it makes it trivial to eventually + * free the whole block. * * At the context level, we use 'freelist' to track blocks ordered by number * of free chunks, starting with blocks having a single allocated chunk, and - * with completely full blocks on the tail. + * with completely full blocks on the tail. XXX * * This also allows various optimizations - for example when searching for * free chunk, the allocator reuses space from the fullest blocks first, in @@ -44,7 +47,7 @@ * case this performs as if the pointer was not maintained. * * We cache the freelist index for the blocks with the fewest free chunks - * (minFreeChunks), so that we don't have to search the freelist on every + * (minFreeChunkIndex), so that we don't have to search the freelist on every * SlabAlloc() call, which is quite expensive. * *------------------------------------------------------------------------- @@ -56,6 +59,17 @@ #include "utils/memdebug.h" #include "utils/memutils.h" +struct SlabBlock; +struct SlabChunk; + +/* + * Number of actual freelists + 1, for full blocks. Full blocks are always at + * offset 0. + */ +#define SLAB_FREELIST_COUNT 9 + +#define SLAB_RETAIN_EMPTY_BLOCK_COUNT 10 + /* * SlabContext is a specialized implementation of MemoryContext. */ @@ -68,13 +82,16 @@ typedef struct SlabContext Size blockSize; /* block size */ Size headerSize; /* allocated size of context header */ int chunksPerBlock; /* number of chunks per block */ - int minFreeChunks; /* min number of free chunks in any block */ + int minFreeChunksIndex; /* min number of free chunks in any block XXX */ int nblocks; /* number of blocks allocated */ #ifdef MEMORY_CONTEXT_CHECKING bool *freechunks; /* bitmap of free chunks in a block */ #endif + int nfreeblocks; + dlist_head freeblocks; + int freelist_shift; /* blocks with free space, grouped by number of free chunks: */ - dlist_head freelist[FLEXIBLE_ARRAY_MEMBER]; + dlist_head freelist[SLAB_FREELIST_COUNT]; } SlabContext; /* @@ -83,13 +100,15 @@ typedef struct SlabContext * * node: doubly-linked list of blocks in global freelist * nfree: number of free chunks in this block - * firstFreeChunk: index of the first free chunk + * firstFreeChunk: first free chunk */ typedef struct SlabBlock { dlist_node node; /* doubly-linked list */ - int nfree; /* number of free chunks */ - int firstFreeChunk; /* index of the first free chunk in the block */ + int nfree; /* number of chunks on freelist + unused */ + int nunused; /* number of unused chunks */ + struct SlabChunk *unused; /* */ + struct SlabChunk *firstFreeChunk; /* first free chunk in the block */ } SlabBlock; /* @@ -123,6 +142,35 @@ typedef struct SlabChunk #define SlabChunkIndex(slab, block, chunk) \ (((char *) chunk - SlabBlockStart(block)) / slab->fullChunkSize) +static inline uint8 +SlabFreelistIndex(SlabContext *slab, int freecount) +{ + uint8 index; + + Assert(freecount <= slab->chunksPerBlock); + + /* + * Ensure, without a branch, that index 0 is only used for blocks entirely + * without free chunks. + * XXX: There probably is a cheaper way to do this. Needing to shift twice + * by slab->freelist_shift isn't great. + */ + index = (freecount + (1 << slab->freelist_shift) - 1) >> slab->freelist_shift; + + if (freecount == 0) + Assert(index == 0); + else + Assert(index > 0 && index < (SLAB_FREELIST_COUNT)); + + return index; +} + +static inline dlist_head* +SlabFreelist(SlabContext *slab, int freecount) +{ + return &slab->freelist[SlabFreelistIndex(slab, freecount)]; +} + /* * These functions implement the MemoryContext API for Slab contexts. */ @@ -179,7 +227,6 @@ SlabContextCreate(MemoryContext parent, { int chunksPerBlock; Size fullChunkSize; - Size freelistSize; Size headerSize; SlabContext *slab; int i; @@ -192,11 +239,11 @@ SlabContextCreate(MemoryContext parent, "padding calculation in SlabChunk is wrong"); /* Make sure the linked list node fits inside a freed chunk */ - if (chunkSize < sizeof(int)) - chunkSize = sizeof(int); + if (chunkSize < MAXALIGN(sizeof(void *))) + chunkSize = MAXALIGN(sizeof(void *)); /* chunk, including SLAB header (both addresses nicely aligned) */ - fullChunkSize = sizeof(SlabChunk) + MAXALIGN(chunkSize); + fullChunkSize = sizeof(SlabChunk) + chunkSize; /* Make sure the block can store at least one chunk. */ if (blockSize < fullChunkSize + sizeof(SlabBlock)) @@ -206,16 +253,14 @@ SlabContextCreate(MemoryContext parent, /* Compute maximum number of chunks per block */ chunksPerBlock = (blockSize - sizeof(SlabBlock)) / fullChunkSize; - /* The freelist starts with 0, ends with chunksPerBlock. */ - freelistSize = sizeof(dlist_head) * (chunksPerBlock + 1); - /* * Allocate the context header. Unlike aset.c, we never try to combine * this with the first regular block; not worth the extra complication. + * XXX: What's the evidence for that? */ /* Size of the memory context header */ - headerSize = offsetof(SlabContext, freelist) + freelistSize; + headerSize = sizeof(SlabContext); #ifdef MEMORY_CONTEXT_CHECKING @@ -249,17 +294,34 @@ SlabContextCreate(MemoryContext parent, slab->blockSize = blockSize; slab->headerSize = headerSize; slab->chunksPerBlock = chunksPerBlock; - slab->minFreeChunks = 0; + slab->minFreeChunksIndex = 0; slab->nblocks = 0; + slab->nfreeblocks = 0; + + /* + * Compute a shift that guarantees that shifting chunksPerBlock with it + * yields is smaller than SLAB_FREELIST_COUNT - 1 (one freelist is used for full blocks). + */ + slab->freelist_shift = 0; + while ((slab->chunksPerBlock >> slab->freelist_shift) >= (SLAB_FREELIST_COUNT - 1)) + slab->freelist_shift++; + +#if 0 + elog(LOG, "freelist shift for %d chunks of size %zu is %d, block size %zu", + slab->chunksPerBlock, slab->fullChunkSize, slab->freelist_shift, + slab->blockSize); +#endif + + dlist_init(&slab->freeblocks); /* initialize the freelist slots */ - for (i = 0; i < (slab->chunksPerBlock + 1); i++) + for (i = 0; i < SLAB_FREELIST_COUNT; i++) dlist_init(&slab->freelist[i]); #ifdef MEMORY_CONTEXT_CHECKING /* set the freechunks pointer right after the freelists array */ slab->freechunks - = (bool *) slab + offsetof(SlabContext, freelist) + freelistSize; + = (bool *) slab + sizeof(SlabContext); #endif /* Finally, do the type-independent part of context creation */ @@ -292,8 +354,27 @@ SlabReset(MemoryContext context) SlabCheck(context); #endif + /* release retained empty blocks */ + { + dlist_mutable_iter miter; + + dlist_foreach_modify(miter, &slab->freeblocks) + { + SlabBlock *block = dlist_container(SlabBlock, node, miter.cur); + + dlist_delete(miter.cur); + +#ifdef CLOBBER_FREED_MEMORY + wipe_mem(block, slab->blockSize); +#endif + free(block); + slab->nblocks--; + context->mem_allocated -= slab->blockSize; + } + } + /* walk over freelists and free the blocks */ - for (i = 0; i <= slab->chunksPerBlock; i++) + for (i = 0; i < SLAB_FREELIST_COUNT; i++) { dlist_mutable_iter miter; @@ -312,7 +393,7 @@ SlabReset(MemoryContext context) } } - slab->minFreeChunks = 0; + slab->minFreeChunksIndex = 0; Assert(slab->nblocks == 0); Assert(context->mem_allocated == 0); @@ -342,7 +423,6 @@ SlabAlloc(MemoryContext context, Size size, int flags) SlabContext *slab = castNode(SlabContext, context); SlabBlock *block; SlabChunk *chunk; - int idx; Assert(slab); @@ -352,8 +432,8 @@ SlabAlloc(MemoryContext context, Size size, int flags) * sense... */ - Assert((slab->minFreeChunks >= 0) && - (slab->minFreeChunks < slab->chunksPerBlock)); + Assert((slab->minFreeChunksIndex >= 0) && + (slab->minFreeChunksIndex < SLAB_FREELIST_COUNT)); /* make sure we only allow correct request size */ if (size != slab->chunkSize) @@ -367,58 +447,88 @@ SlabAlloc(MemoryContext context, Size size, int flags) * slab->minFreeChunks == 0 means there are no blocks with free chunks, * thanks to how minFreeChunks is updated at the end of SlabAlloc(). */ - if (slab->minFreeChunks == 0) + if (unlikely(slab->minFreeChunksIndex == 0)) { - block = (SlabBlock *) malloc(slab->blockSize); + if (slab->nfreeblocks > 0) + { + dlist_node *node; - if (block == NULL) - return NULL; + node = dlist_pop_head_node(&slab->freeblocks); + block = dlist_container(SlabBlock, node, node); + slab->nfreeblocks--; + } + else + { + block = (SlabBlock *) malloc(slab->blockSize); - block->nfree = slab->chunksPerBlock; - block->firstFreeChunk = 0; + if (unlikely(block == NULL)) + return NULL; - /* - * Put all the chunks on a freelist. Walk the chunks and point each - * one to the next one. - */ - for (idx = 0; idx < slab->chunksPerBlock; idx++) - { - chunk = SlabBlockGetChunk(slab, block, idx); - *(int32 *) SlabChunkGetPointer(chunk) = (idx + 1); + slab->nblocks += 1; + context->mem_allocated += slab->blockSize; } + block->nfree = slab->chunksPerBlock; + block->firstFreeChunk = NULL; + block->nunused = slab->chunksPerBlock; + block->unused = (SlabChunk *) SlabBlockStart(block); + + slab->minFreeChunksIndex = SlabFreelistIndex(slab, slab->chunksPerBlock); + /* * And add it to the last freelist with all chunks empty. * * We know there are no blocks in the freelist, otherwise we wouldn't * need a new block. */ - Assert(dlist_is_empty(&slab->freelist[slab->chunksPerBlock])); + Assert(dlist_is_empty(SlabFreelist(slab, slab->chunksPerBlock))); - dlist_push_head(&slab->freelist[slab->chunksPerBlock], &block->node); + dlist_push_head(SlabFreelist(slab, slab->chunksPerBlock), + &block->node); - slab->minFreeChunks = slab->chunksPerBlock; - slab->nblocks += 1; - context->mem_allocated += slab->blockSize; + chunk = block->unused; + block->unused = (SlabChunk *)(((char *) block->unused) + slab->fullChunkSize); + block->nunused--; } + else + { + /* grab the block from the freelist */ + block = dlist_head_element(SlabBlock, node, + &slab->freelist[slab->minFreeChunksIndex]); - /* grab the block from the freelist (even the new block is there) */ - block = dlist_head_element(SlabBlock, node, - &slab->freelist[slab->minFreeChunks]); + /* make sure we actually got a valid block, with matching nfree */ + Assert(block != NULL); + Assert(slab->minFreeChunksIndex == SlabFreelistIndex(slab, block->nfree)); + Assert(block->nfree > 0); - /* make sure we actually got a valid block, with matching nfree */ - Assert(block != NULL); - Assert(slab->minFreeChunks == block->nfree); - Assert(block->nfree > 0); + /* we know index of the first free chunk in the block */ + if (block->nunused > 0) + { + chunk = block->unused; + block->unused = (SlabChunk *)(((char *) block->unused) + slab->fullChunkSize); + block->nunused--; + } + else + { + chunk = block->firstFreeChunk; - /* we know index of the first free chunk in the block */ - idx = block->firstFreeChunk; + /* + * Remove the chunk from the freelist head. The index of the next free + * chunk is stored in the chunk itself. + */ + VALGRIND_MAKE_MEM_DEFINED(SlabChunkGetPointer(chunk), sizeof(void*)); + block->firstFreeChunk = *(SlabChunk **) SlabChunkGetPointer(chunk); + } + } - /* make sure the chunk index is valid, and that it's marked as empty */ - Assert((idx >= 0) && (idx < slab->chunksPerBlock)); + /* make sure the chunk is in the block and that it's marked as empty (XXX?) */ + Assert((char *) chunk >= SlabBlockStart(block)); + Assert((char *) chunk < (((char *)block) + slab->blockSize)); - /* compute the chunk location block start (after the block header) */ - chunk = SlabBlockGetChunk(slab, block, idx); + Assert(block->firstFreeChunk == NULL || ( + (block->firstFreeChunk >= (SlabChunk *) SlabBlockStart(block)) && + block->firstFreeChunk <= (SlabChunk *) (((char *)block) + slab->blockSize)) + ); /* * Update the block nfree count, and also the minFreeChunks as we've @@ -426,54 +536,51 @@ SlabAlloc(MemoryContext context, Size size, int flags) * (because that's how we chose the block). */ block->nfree--; - slab->minFreeChunks = block->nfree; - /* - * Remove the chunk from the freelist head. The index of the next free - * chunk is stored in the chunk itself. - */ - VALGRIND_MAKE_MEM_DEFINED(SlabChunkGetPointer(chunk), sizeof(int32)); - block->firstFreeChunk = *(int32 *) SlabChunkGetPointer(chunk); + /* Prepare to initialize the chunk header. */ + VALGRIND_MAKE_MEM_UNDEFINED(chunk, sizeof(SlabChunk)); - Assert(block->firstFreeChunk >= 0); - Assert(block->firstFreeChunk <= slab->chunksPerBlock); + chunk->block = block; + chunk->slab = slab; - Assert((block->nfree != 0 && - block->firstFreeChunk < slab->chunksPerBlock) || - (block->nfree == 0 && - block->firstFreeChunk == slab->chunksPerBlock)); + Assert(slab->minFreeChunksIndex == SlabFreelistIndex(slab, block->nfree + 1)); /* move the whole block to the right place in the freelist */ - dlist_delete(&block->node); - dlist_push_head(&slab->freelist[block->nfree], &block->node); - - /* - * And finally update minFreeChunks, i.e. the index to the block with the - * lowest number of free chunks. We only need to do that when the block - * got full (otherwise we know the current block is the right one). We'll - * simply walk the freelist until we find a non-empty entry. - */ - if (slab->minFreeChunks == 0) + if (unlikely(SlabFreelistIndex(slab, block->nfree) != slab->minFreeChunksIndex)) { - for (idx = 1; idx <= slab->chunksPerBlock; idx++) + slab->minFreeChunksIndex = SlabFreelistIndex(slab, block->nfree); + dlist_delete(&block->node); + dlist_push_head(SlabFreelist(slab, block->nfree), &block->node); + + + /* + * And finally update minFreeChunks, i.e. the index to the block with the + * lowest number of free chunks. We only need to do that when the block + * got full (otherwise we know the current block is the right one). We'll + * simply walk the freelist until we find a non-empty entry. + */ + if (slab->minFreeChunksIndex == 0) { - if (dlist_is_empty(&slab->freelist[idx])) - continue; + for (int idx = 1; idx < SLAB_FREELIST_COUNT; idx++) + { + if (dlist_is_empty(&slab->freelist[idx])) + continue; - /* found a non-empty freelist */ - slab->minFreeChunks = idx; - break; + /* found a non-empty freelist */ + slab->minFreeChunksIndex = idx; + break; + } } } +#if 0 + /* + * FIXME: I don't understand what this ever did? It should be unreachable + * I think? + */ if (slab->minFreeChunks == slab->chunksPerBlock) slab->minFreeChunks = 0; - - /* Prepare to initialize the chunk header. */ - VALGRIND_MAKE_MEM_UNDEFINED(chunk, sizeof(SlabChunk)); - - chunk->block = block; - chunk->slab = slab; +#endif #ifdef MEMORY_CONTEXT_CHECKING /* slab mark to catch clobber of "unused" space */ @@ -496,6 +603,61 @@ SlabAlloc(MemoryContext context, Size size, int flags) return SlabChunkGetPointer(chunk); } +static void pg_noinline +SlabFreeSlow(SlabContext *slab, SlabBlock *block) +{ + dlist_delete(&block->node); + + /* + * See if we need to update the minFreeChunks field for the slab - we only + * need to do that if the block had that number of free chunks + * before we freed one. In that case, we check if there still are blocks + * in the original freelist and we either keep the current value (if there + * still are blocks) or increment it by one (the new block is still the + * one with minimum free chunks). + * + * The one exception is when the block will get completely free - in that + * case we will free it, se we can't use it for minFreeChunks. It however + * means there are no more blocks with free chunks. + */ + if (slab->minFreeChunksIndex == SlabFreelistIndex(slab, block->nfree - 1)) + { + /* Have we removed the last chunk from the freelist? */ + if (dlist_is_empty(&slab->freelist[slab->minFreeChunksIndex])) + { + /* but if we made the block entirely free, we'll free it */ + if (block->nfree == slab->chunksPerBlock) + slab->minFreeChunksIndex = 0; + else + slab->minFreeChunksIndex++; + } + } + + /* If the block is now completely empty, free it (in a way). */ + if (block->nfree == slab->chunksPerBlock) + { + /* + * To avoid constantly freeing/allocating blocks in bursty patterns + * (on most crucially cases of repeatedly allocating and freeing a + * single chunk), retain a small number of blocks. + */ + if (slab->nfreeblocks < SLAB_RETAIN_EMPTY_BLOCK_COUNT) + { + dlist_push_head(&slab->freeblocks, &block->node); + slab->nfreeblocks++; + } + else + { + slab->nblocks--; + slab->header.mem_allocated -= slab->blockSize; + free(block); + } + } + else + dlist_push_head(SlabFreelist(slab, block->nfree), &block->node); + +} + /* * SlabFree * Frees allocated memory; memory is removed from the slab. @@ -503,7 +665,6 @@ SlabAlloc(MemoryContext context, Size size, int flags) static void SlabFree(MemoryContext context, void *pointer) { - int idx; SlabContext *slab = castNode(SlabContext, context); SlabChunk *chunk = SlabPointerGetChunk(pointer); SlabBlock *block = chunk->block; @@ -516,61 +677,26 @@ SlabFree(MemoryContext context, void *pointer) slab->header.name, chunk); #endif - /* compute index of the chunk with respect to block start */ - idx = SlabChunkIndex(slab, block, chunk); - /* add chunk to freelist, and update block nfree count */ - *(int32 *) pointer = block->firstFreeChunk; - block->firstFreeChunk = idx; + *(SlabChunk **) pointer = block->firstFreeChunk; + block->firstFreeChunk = chunk; block->nfree++; Assert(block->nfree > 0); Assert(block->nfree <= slab->chunksPerBlock); #ifdef CLOBBER_FREED_MEMORY - /* XXX don't wipe the int32 index, used for block-level freelist */ - wipe_mem((char *) pointer + sizeof(int32), - slab->chunkSize - sizeof(int32)); + /* XXX don't wipe the SlabChunk* index, used for block-level freelist */ + wipe_mem((char *) pointer + sizeof(SlabChunk*), + slab->chunkSize - sizeof(SlabChunk*)); #endif /* remove the block from a freelist */ - dlist_delete(&block->node); - - /* - * See if we need to update the minFreeChunks field for the slab - we only - * need to do that if there the block had that number of free chunks - * before we freed one. In that case, we check if there still are blocks - * in the original freelist and we either keep the current value (if there - * still are blocks) or increment it by one (the new block is still the - * one with minimum free chunks). - * - * The one exception is when the block will get completely free - in that - * case we will free it, se we can't use it for minFreeChunks. It however - * means there are no more blocks with free chunks. - */ - if (slab->minFreeChunks == (block->nfree - 1)) + if (SlabFreelistIndex(slab, block->nfree) != SlabFreelistIndex(slab, block->nfree - 1)) { - /* Have we removed the last chunk from the freelist? */ - if (dlist_is_empty(&slab->freelist[slab->minFreeChunks])) - { - /* but if we made the block entirely free, we'll free it */ - if (block->nfree == slab->chunksPerBlock) - slab->minFreeChunks = 0; - else - slab->minFreeChunks++; - } + SlabFreeSlow(slab, block); } - /* If the block is now completely empty, free it. */ - if (block->nfree == slab->chunksPerBlock) - { - free(block); - slab->nblocks--; - context->mem_allocated -= slab->blockSize; - } - else - dlist_push_head(&slab->freelist[block->nfree], &block->node); - Assert(slab->nblocks >= 0); Assert(slab->nblocks * slab->blockSize == context->mem_allocated); } @@ -657,7 +783,7 @@ SlabStats(MemoryContext context, /* Include context header in totalspace */ totalspace = slab->headerSize; - for (i = 0; i <= slab->chunksPerBlock; i++) + for (i = 0; i < SLAB_FREELIST_COUNT; i++) { dlist_iter iter; @@ -714,7 +840,7 @@ SlabCheck(MemoryContext context) Assert(slab->chunksPerBlock > 0); /* walk all the freelists */ - for (i = 0; i <= slab->chunksPerBlock; i++) + for (i = 0; i < SLAB_FREELIST_COUNT; i++) { int j, nfree; @@ -723,20 +849,19 @@ SlabCheck(MemoryContext context) /* walk all blocks on this freelist */ dlist_foreach(iter, &slab->freelist[i]) { - int idx; SlabBlock *block = dlist_container(SlabBlock, node, iter.cur); + SlabChunk *cur_chunk; /* * Make sure the number of free chunks (in the block header) * matches position in the freelist. */ - if (block->nfree != i) + if (SlabFreelistIndex(slab, block->nfree) != i) elog(WARNING, "problem in slab %s: number of free chunks %d in block %p does not match freelist %d", name, block->nfree, block, i); /* reset the bitmap of free chunks for this block */ memset(slab->freechunks, 0, (slab->chunksPerBlock * sizeof(bool))); - idx = block->firstFreeChunk; /* * Now walk through the chunks, count the free ones and also @@ -744,20 +869,31 @@ SlabCheck(MemoryContext context) * freelist is stored within the chunks themselves, we have to * walk through the chunks and construct our own bitmap. */ - + cur_chunk = block->firstFreeChunk; nfree = 0; - while (idx < slab->chunksPerBlock) + while (cur_chunk != NULL) { - SlabChunk *chunk; + int idx = SlabChunkIndex(slab, block, cur_chunk); /* count the chunk as free, add it to the bitmap */ nfree++; slab->freechunks[idx] = true; /* read index of the next free chunk */ - chunk = SlabBlockGetChunk(slab, block, idx); - VALGRIND_MAKE_MEM_DEFINED(SlabChunkGetPointer(chunk), sizeof(int32)); - idx = *(int32 *) SlabChunkGetPointer(chunk); + VALGRIND_MAKE_MEM_DEFINED(SlabChunkGetPointer(cur_chunk), sizeof(SlabChunk **)); + cur_chunk = *(SlabChunk **) SlabChunkGetPointer(cur_chunk); + } + + cur_chunk = block->unused; + for (j = 0; j < block->nunused; j++) + { + int idx = SlabChunkIndex(slab, block, cur_chunk); + + /* count the chunk as free, add it to the bitmap */ + nfree++; + slab->freechunks[idx] = true; + + cur_chunk = (SlabChunk *)(((char *) cur_chunk) + slab->fullChunkSize); } for (j = 0; j < slab->chunksPerBlock; j++) -- 2.31.1