diff --git a/doc/src/sgml/ref/reindex.sgml b/doc/src/sgml/ref/reindex.sgml
index 7222665..051ebd7 100644
--- a/doc/src/sgml/ref/reindex.sgml
+++ b/doc/src/sgml/ref/reindex.sgml
@@ -21,7 +21,7 @@ PostgreSQL documentation
-REINDEX { INDEX | TABLE | DATABASE | SYSTEM } name [ FORCE ]
+REINDEX { INDEX | TABLE | DATABASE | SYSTEM } [ CONCURRENTLY ] name [ FORCE ]
@@ -68,9 +68,12 @@ REINDEX { INDEX | TABLE | DATABASE | SYSTEM } nam
An index build with the CONCURRENTLY> option failed, leaving
an invalid> index. Such indexes are useless but it can be
convenient to use REINDEX> to rebuild them. Note that
- REINDEX> will not perform a concurrent build. To build the
- index without interfering with production you should drop the index and
- reissue the CREATE INDEX CONCURRENTLY> command.
+ REINDEX> will perform a concurrent build if
+ CONCURRENTLY> is specified. To build the index without interfering
+ with production you should drop the index and reissue either the
+ CREATE INDEX CONCURRENTLY> or REINDEX CONCURRENTLY>
+ command. Indexes of toast relations can be rebuilt with REINDEX
+ CONCURRENTLY>.
@@ -139,6 +142,21 @@ REINDEX { INDEX | TABLE | DATABASE | SYSTEM } nam
+ CONCURRENTLY
+
+
+ When this option is used, PostgreSQL> will rebuild the
+ index without taking any locks that prevent concurrent inserts,
+ updates, or deletes on the table; whereas a standard reindex build
+ locks out writes (but not reads) on the table until it's done.
+ There are several caveats to be aware of when using this option
+ — see .
+
+
+
+
+
FORCE
@@ -231,6 +249,112 @@ REINDEX { INDEX | TABLE | DATABASE | SYSTEM } nam
to be reindexed by separate commands. This is still possible, but
redundant.
+
+
+
+ Rebuilding Indexes Concurrently
+
+
+ index
+ rebuilding concurrently
+
+
+
+ Rebuilding an index can interfere with regular operation of a database.
+ Normally PostgreSQL> locks the table whose index is rebuilt
+ against writes and performs the entire index build with a single scan of the
+ table. Other transactions can still read the table, but if they try to
+ insert, update, or delete rows in the table they will block until the
+ index rebuild is finished. This could have a severe effect if the system is
+ a live production database. Very large tables can take many hours to be
+ indexed, and even for smaller tables, an index rebuild can lock out writers
+ for periods that are unacceptably long for a production system.
+
+
+
+ PostgreSQL> supports rebuilding indexes without locking
+ out writes. This method is invoked by specifying the
+ CONCURRENTLY> option of REINDEX>.
+ When this option is used, PostgreSQL> must perform two
+ scans of the table for each index that needs to be rebuild and in
+ addition it must wait for all existing transactions that could potentially
+ use the index to terminate. This method requires more total work than a
+ standard index rebuild and takes significantly longer to complete as it
+ needs to wait for unfinished transactions that might modify the index.
+ However, since it allows normal operations to continue while the index
+ is rebuilt, this method is useful for rebuilding indexes in a production
+ environment. Of course, the extra CPU, memory and I/O load imposed by
+ the index rebuild might slow other operations.
+
+
+
+ In a concurrent index build, a new index whose storage will replace the one
+ to be rebuild is actually entered into the system catalogs in one transaction,
+ then two table scans occur in two more transactions and to make the new
+ index valid from the other backends. Once this is performed, the old
+ and fresh indexes are swapped in, and the index used during process is
+ marked as invalid in a third transaction. Finally two additional
+ transactions are used to mark the concurrent index as not ready and then
+ drop it.
+
+
+
+ If a problem arises while rebuilding the indexes, such as a
+ uniqueness violation in a unique index, the REINDEX>
+ command will fail but leave behind an invalid> new index on top
+ of the existing one. This index will be ignored for querying purposes
+ because it might be incomplete; however it will still consume update
+ overhead. The psql> \d> command will report
+ such an index as INVALID>:
+
+
+postgres=# \d tab
+ Table "public.tab"
+ Column | Type | Modifiers
+--------+---------+-----------
+ col | integer |
+Indexes:
+ "idx" btree (col)
+ "idx_cct" btree (col) INVALID
+
+
+ The recommended recovery method in such cases is to drop the concurrent
+ index and try again to perform REINDEX CONCURRENTLY>.
+ The concurrent index created during the processing has a name finishing by
+ the suffix cct. This works as well with indexes of toast relations.
+
+
+
+ Regular index builds permit other regular index builds on the
+ same table to occur in parallel, but only one concurrent index build
+ can occur on a table at a time. In both cases, no other types of schema
+ modification on the table are allowed meanwhile. Another difference
+ is that a regular REINDEX TABLE> or REINDEX INDEX>
+ command can be performed within a transaction block, but
+ REINDEX CONCURRENTLY> cannot. REINDEX DATABASE> is
+ by default not allowed to run inside a transaction block, so in this case
+ CONCURRENTLY> is not supported.
+
+
+
+ Invalid indexes of toast relations can be dropped if a failure occurred
+ during REINDEX CONCURRENTLY>. Live indexes of toast relations
+ cannot be dropped.
+
+
+
+ REINDEX DATABASE used with CONCURRENTLY
+ rebuilds concurrently only the non-system relations. System
+ relations are rebuilt with a non-concurrent context. Toast indexes are
+ rebuilt concurrently if the relation they depend on is a non-system
+ relation.
+
+
+
+ REINDEX SYSTEM does not support CONCURRENTLY
+ .
+
+
@@ -262,7 +386,17 @@ $ psql broken_db
...
broken_db=> REINDEX DATABASE broken_db;
broken_db=> \q
-
+
+
+
+
+ Rebuild a table concurrently:
+
+
+REINDEX TABLE CONCURRENTLY my_broken_table;
+
+
+
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index 0f3b45f..f72efbb 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -43,9 +43,11 @@
#include "catalog/pg_trigger.h"
#include "catalog/pg_type.h"
#include "catalog/storage.h"
+#include "commands/defrem.h"
#include "commands/tablecmds.h"
#include "commands/trigger.h"
#include "executor/executor.h"
+#include "mb/pg_wchar.h"
#include "miscadmin.h"
#include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h"
@@ -672,6 +674,10 @@ UpdateIndexRelation(Oid indexoid,
* will be marked "invalid" and the caller must take additional steps
* to fix it up.
* is_internal: if true, post creation hook for new index
+ * is_reindex: if true, create an index that is used as a duplicate of an
+ * existing index created during a concurrent operation. This index can
+ * also be a toast relation. Sufficient locks are normally taken on
+ * the related relations once this is called during a concurrent operation.
*
* Returns the OID of the created index.
*/
@@ -695,7 +701,8 @@ index_create(Relation heapRelation,
bool allow_system_table_mods,
bool skip_build,
bool concurrent,
- bool is_internal)
+ bool is_internal,
+ bool is_reindex)
{
Oid heapRelationId = RelationGetRelid(heapRelation);
Relation pg_class;
@@ -738,19 +745,23 @@ index_create(Relation heapRelation,
/*
* concurrent index build on a system catalog is unsafe because we tend to
- * release locks before committing in catalogs
+ * release locks before committing in catalogs. If the index is created during
+ * a REINDEX CONCURRENTLY operation, sufficient locks are already taken.
*/
if (concurrent &&
- IsSystemRelation(heapRelation))
+ IsSystemRelation(heapRelation) &&
+ !is_reindex)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("concurrent index creation on system catalog tables is not supported")));
/*
* This case is currently not supported, but there's no way to ask for it
- * in the grammar anyway, so it can't happen.
+ * in the grammar anyway, so it can't happen. This might be called during a
+ * conccurrent reindex operation, in this case sufficient locks are already
+ * taken on the related relations.
*/
- if (concurrent && is_exclusion)
+ if (concurrent && is_exclusion && !is_reindex)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg_internal("concurrent index creation for exclusion constraints is not supported")));
@@ -1095,6 +1106,427 @@ index_create(Relation heapRelation,
return indexRelationId;
}
+
+/*
+ * index_concurrent_create
+ *
+ * Create an index based on the given one that will be used for concurrent
+ * operations. The index is inserted into catalogs and needs to be built later
+ * on. This is called during concurrent index processing. The heap relation
+ * on which is based the index needs to be closed by the caller.
+ */
+Oid
+index_concurrent_create(Relation heapRelation, Oid indOid, char *concurrentName)
+{
+ Relation indexRelation;
+ IndexInfo *indexInfo;
+ Oid concurrentOid = InvalidOid;
+ List *columnNames = NIL;
+ List *indexprs = NIL;
+ ListCell *indexpr_item;
+ int i;
+ HeapTuple indexTuple, classTuple;
+ Datum indclassDatum, colOptionDatum, optionDatum;
+ oidvector *indclass;
+ int2vector *indcoloptions;
+ bool isnull;
+ bool isconstraint;
+ bool initdeferred = false;
+ Oid constraintOid = get_index_constraint(indOid);
+
+ indexRelation = index_open(indOid, RowExclusiveLock);
+
+ /* Concurrent index uses the same index information as former index */
+ indexInfo = BuildIndexInfo(indexRelation);
+
+ /*
+ * Determine if index is initdeferred, this depends on its dependent
+ * constraint.
+ */
+ if (OidIsValid(constraintOid))
+ {
+ /* Look for the correct value */
+ HeapTuple constTuple;
+ Form_pg_constraint constraint;
+
+ constTuple = SearchSysCache1(CONSTROID,
+ ObjectIdGetDatum(constraintOid));
+ if (!HeapTupleIsValid(constTuple))
+ elog(ERROR, "cache lookup failed for constraint %u",
+ constraintOid);
+ constraint = (Form_pg_constraint) GETSTRUCT(constTuple);
+ initdeferred = constraint->condeferred;
+
+ ReleaseSysCache(constTuple);
+ }
+
+ /* Get expressions associated to this index for compilation of column names */
+ indexprs = RelationGetIndexExpressions(indexRelation);
+ indexpr_item = list_head(indexprs);
+
+ /* Build the list of column names, necessary for index_create */
+ for (i = 0; i < indexInfo->ii_NumIndexAttrs; i++)
+ {
+ char *origname, *curname;
+ int i;
+ char buf[NAMEDATALEN];
+
+ AttrNumber attnum = indexInfo->ii_KeyAttrNumbers[i];
+
+ /* Pick up column name depending on attribute type */
+ if (attnum != 0)
+ {
+ /*
+ * This is a column attribute, so simply pick column name from
+ * relation.
+ */
+ Form_pg_attribute attform = heapRelation->rd_att->attrs[attnum - 1];;
+ origname = pstrdup(NameStr(attform->attname));
+ }
+ else
+ {
+ Node *indnode;
+ /*
+ * This is the case of an expression, so pick up the expression
+ * name.
+ */
+ Assert(indexpr_item != NULL);
+ indnode = (Node *) lfirst(indexpr_item);
+ indexpr_item = lnext(indexpr_item);
+ origname = deparse_expression(indnode,
+ deparse_context_for(RelationGetRelationName(heapRelation),
+ RelationGetRelid(heapRelation)),
+ false, false);
+ }
+
+ /*
+ * Check if the name picked has any conflict with exising names and
+ * change it.
+ */
+ curname = origname;
+ for (i = 1;; i++)
+ {
+ ListCell *lc2;
+ char nbuf[32];
+ int nlen;
+
+ foreach(lc2, columnNames)
+ {
+ if (strcmp(curname, (char *) lfirst(lc2)) == 0)
+ break;
+ }
+ if (lc2 == NULL)
+ break; /* found nonconflicting name */
+
+ sprintf(nbuf, "%d", i);
+
+ /* Ensure generated names are shorter than NAMEDATALEN */
+ nlen = pg_mbcliplen(origname, strlen(origname),
+ NAMEDATALEN - 1 - strlen(nbuf));
+ memcpy(buf, origname, nlen);
+ strcpy(buf + nlen, nbuf);
+ curname = buf;
+ }
+
+ /* Append name to existing list */
+ columnNames = lappend(columnNames, pstrdup(curname));
+ }
+
+ /*
+ * Index is considered as a constraint if it is PRIMARY KEY or EXCLUSION.
+ */
+ isconstraint = indexRelation->rd_index->indisprimary ||
+ indexRelation->rd_index->indisexclusion;
+
+ /* Get the array of class and column options IDs from index info */
+ indexTuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(indOid));
+ if (!HeapTupleIsValid(indexTuple))
+ elog(ERROR, "cache lookup failed for index %u", indOid);
+ indclassDatum = SysCacheGetAttr(INDEXRELID, indexTuple,
+ Anum_pg_index_indclass, &isnull);
+ Assert(!isnull);
+ indclass = (oidvector *) DatumGetPointer(indclassDatum);
+
+ colOptionDatum = SysCacheGetAttr(INDEXRELID, indexTuple,
+ Anum_pg_index_indoption, &isnull);
+ Assert(!isnull);
+ indcoloptions = (int2vector *) DatumGetPointer(colOptionDatum);
+
+ /* Fetch options of index if any */
+ classTuple = SearchSysCache1(RELOID, indOid);
+ if (!HeapTupleIsValid(classTuple))
+ elog(ERROR, "cache lookup failed for relation %u", indOid);
+ optionDatum = SysCacheGetAttr(RELOID, classTuple,
+ Anum_pg_class_reloptions, &isnull);
+
+ /* Now create the concurrent index */
+ concurrentOid = index_create(heapRelation,
+ (const char*)concurrentName,
+ InvalidOid,
+ InvalidOid,
+ indexInfo,
+ columnNames,
+ indexRelation->rd_rel->relam,
+ indexRelation->rd_rel->reltablespace,
+ indexRelation->rd_indcollation,
+ indclass->values,
+ indcoloptions->values,
+ optionDatum,
+ indexRelation->rd_index->indisprimary,
+ isconstraint, /* is constraint? */
+ !indexRelation->rd_index->indimmediate, /* is deferrable? */
+ initdeferred, /* is initially deferred? */
+ true, /* allow table to be a system catalog? */
+ true, /* skip build? */
+ true, /* concurrent? */
+ false, /* is_internal */
+ true); /* reindex? */
+
+ /* Close the relations used and clean up */
+ index_close(indexRelation, RowExclusiveLock);
+ ReleaseSysCache(indexTuple);
+ ReleaseSysCache(classTuple);
+
+ return concurrentOid;
+}
+
+
+/*
+ * index_concurrent_build
+ *
+ * Build index for a concurrent operation. Low-level locks are taken when this
+ * operation is performed to prevent only schema changes.
+ */
+void
+index_concurrent_build(Oid heapOid,
+ Oid indexOid,
+ bool isprimary)
+{
+ Relation rel,
+ indexRelation;
+ IndexInfo *indexInfo;
+
+ /* Open and lock the parent heap relation */
+ rel = heap_open(heapOid, ShareUpdateExclusiveLock);
+
+ /* And the target index relation */
+ indexRelation = index_open(indexOid, RowExclusiveLock);
+
+ /* We have to re-build the IndexInfo struct, since it was lost in commit */
+ indexInfo = BuildIndexInfo(indexRelation);
+ Assert(!indexInfo->ii_ReadyForInserts);
+ indexInfo->ii_Concurrent = true;
+ indexInfo->ii_BrokenHotChain = false;
+
+ /* Now build the index */
+ index_build(rel, indexRelation, indexInfo, isprimary, false);
+
+ /* Close both the relations, but keep the locks */
+ heap_close(rel, NoLock);
+ index_close(indexRelation, NoLock);
+}
+
+
+/*
+ * index_concurrent_swap
+ *
+ * Replace old index by old index in a concurrent context. For the time being
+ * what is done here is switching the relation relfilenode of the indexes. If
+ * extra operations are necessary during a concurrent swap, processing should
+ * be added here. AccessExclusiveLock is taken on the index relations that are
+ * swapped until the end of the transaction where this function is called.
+ */
+void
+index_concurrent_swap(Oid newIndexOid, Oid oldIndexOid)
+{
+ Relation oldIndexRel, newIndexRel, pg_class;
+ HeapTuple oldIndexTuple, newIndexTuple;
+ Form_pg_class oldIndexForm, newIndexForm;
+ Oid tmpnode;
+
+ /*
+ * Take an exclusive lock on the old and new index before swapping them.
+ */
+ oldIndexRel = relation_open(oldIndexOid, AccessExclusiveLock);
+ newIndexRel = relation_open(newIndexOid, AccessExclusiveLock);
+
+ /* Now swap relfilenode of those indexes */
+ pg_class = heap_open(RelationRelationId, RowExclusiveLock);
+
+ oldIndexTuple = SearchSysCacheCopy1(RELOID,
+ ObjectIdGetDatum(oldIndexOid));
+ if (!HeapTupleIsValid(oldIndexTuple))
+ elog(ERROR, "could not find tuple for relation %u", oldIndexOid);
+ newIndexTuple = SearchSysCacheCopy1(RELOID,
+ ObjectIdGetDatum(newIndexOid));
+ if (!HeapTupleIsValid(newIndexTuple))
+ elog(ERROR, "could not find tuple for relation %u", newIndexOid);
+ oldIndexForm = (Form_pg_class) GETSTRUCT(oldIndexTuple);
+ newIndexForm = (Form_pg_class) GETSTRUCT(newIndexTuple);
+
+ /* Here is where the actual swapping happens */
+ tmpnode = oldIndexForm->relfilenode;
+ oldIndexForm->relfilenode = newIndexForm->relfilenode;
+ newIndexForm->relfilenode = tmpnode;
+
+ /* Then update the tuples for each relation */
+ simple_heap_update(pg_class, &oldIndexTuple->t_self, oldIndexTuple);
+ simple_heap_update(pg_class, &newIndexTuple->t_self, newIndexTuple);
+ CatalogUpdateIndexes(pg_class, oldIndexTuple);
+ CatalogUpdateIndexes(pg_class, newIndexTuple);
+
+ /* Close relations and clean up */
+ heap_close(pg_class, RowExclusiveLock);
+
+ /* The lock taken previously is not released until the end of transaction */
+ relation_close(oldIndexRel, NoLock);
+ relation_close(newIndexRel, NoLock);
+}
+
+/*
+ * index_concurrent_set_dead
+ *
+ * Perform the last invalidation stage of DROP INDEX CONCURRENTLY before
+ * actually dropping the index. After calling this function the index is
+ * seen by all the backends as dead.
+ */
+void
+index_concurrent_set_dead(Oid indexId, Oid heapId, LOCKTAG *locktag)
+{
+ Relation heapRelation;
+ Relation indexRelation;
+
+ /*
+ * Now we must wait until no running transaction could be using the
+ * index for a query if necessary.
+ *
+ * Note: the reason we use actual lock acquisition here, rather than
+ * just checking the ProcArray and sleeping, is that deadlock is
+ * possible if one of the transactions in question is blocked trying
+ * to acquire an exclusive lock on our table. The lock code will
+ * detect deadlock and error out properly.
+ */
+ if (locktag)
+ WaitForVirtualLocks(*locktag, AccessExclusiveLock);
+
+ /*
+ * No more predicate locks will be acquired on this index, and we're
+ * about to stop doing inserts into the index which could show
+ * conflicts with existing predicate locks, so now is the time to move
+ * them to the heap relation.
+ */
+ heapRelation = heap_open(heapId, ShareUpdateExclusiveLock);
+ indexRelation = index_open(indexId, ShareUpdateExclusiveLock);
+ TransferPredicateLocksToHeapRelation(indexRelation);
+
+ /*
+ * Now we are sure that nobody uses the index for queries; they just
+ * might have it open for updating it. So now we can unset indisready
+ * and indislive, then wait till nobody could be using it at all
+ * anymore.
+ */
+ index_set_state_flags(indexId, INDEX_DROP_SET_DEAD);
+
+ /*
+ * Invalidate the relcache for the table, so that after this commit
+ * all sessions will refresh the table's index list. Forgetting just
+ * the index's relcache entry is not enough.
+ */
+ CacheInvalidateRelcache(heapRelation);
+
+ /*
+ * Close the relations again, though still holding session lock.
+ */
+ heap_close(heapRelation, NoLock);
+ index_close(indexRelation, NoLock);
+}
+
+/*
+ * index_concurrent_clear_valid
+ *
+ * Release the valid state of a given index and then release the cache of
+ * its parent relation. This function should be called when initializing an
+ * index drop in a concurrent context before setting the index as dead.
+ */
+void
+index_concurrent_clear_valid(Relation heapRelation, Oid indexOid)
+{
+ /*
+ * Mark index invalid by updating its pg_index entry
+ */
+ index_set_state_flags(indexOid, INDEX_DROP_CLEAR_VALID);
+
+ /*
+ * Invalidate the relcache for the table, so that after this commit
+ * all sessions will refresh any cached plans that might reference the
+ * index.
+ */
+ CacheInvalidateRelcache(heapRelation);
+}
+
+/*
+ * index_concurrent_drop
+ *
+ * Drop a single index concurrently as the last step of an index concurrent
+ * process Deletion is done through performDeletion or dependencies of the
+ * index are not dropped. At this point all the indexes are already considered
+ * as invalid and dead so they can be dropped without using any concurrent
+ * options.
+ */
+void
+index_concurrent_drop(Oid indexOid)
+{
+ Oid constraintOid = get_index_constraint(indexOid);
+ ObjectAddress object;
+ Form_pg_index indexForm;
+ Relation pg_index;
+ HeapTuple indexTuple;
+ bool indislive;
+
+ /*
+ * Check that the index dropped here is not alive, it might be used by
+ * other backends in this case.
+ */
+ pg_index = heap_open(IndexRelationId, RowExclusiveLock);
+
+ indexTuple = SearchSysCacheCopy1(INDEXRELID,
+ ObjectIdGetDatum(indexOid));
+ if (!HeapTupleIsValid(indexTuple))
+ elog(ERROR, "cache lookup failed for index %u", indexOid);
+ indexForm = (Form_pg_index) GETSTRUCT(indexTuple);
+ indislive = indexForm->indislive;
+
+ /* Clean up */
+ heap_close(pg_index, RowExclusiveLock);
+
+ /* Leave if index is still alive */
+ if (indislive)
+ return;
+
+ /*
+ * We are sure to have a dead index, so begin the drop process.
+ * Register constraint or index for drop.
+ */
+ if (OidIsValid(constraintOid))
+ {
+ object.classId = ConstraintRelationId;
+ object.objectId = constraintOid;
+ }
+ else
+ {
+ object.classId = RelationRelationId;
+ object.objectId = indexOid;
+ }
+
+ object.objectSubId = 0;
+
+ /* Perform deletion for normal and toast indexes */
+ performDeletion(&object,
+ DROP_RESTRICT,
+ 0);
+}
+
+
/*
* index_constraint_create
*
@@ -1324,7 +1756,6 @@ index_drop(Oid indexId, bool concurrent)
indexrelid;
LOCKTAG heaplocktag;
LOCKMODE lockmode;
- VirtualTransactionId *old_lockholders;
/*
* To drop an index safely, we must grab exclusive lock on its parent
@@ -1406,17 +1837,8 @@ index_drop(Oid indexId, bool concurrent)
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("DROP INDEX CONCURRENTLY must be first action in transaction")));
- /*
- * Mark index invalid by updating its pg_index entry
- */
- index_set_state_flags(indexId, INDEX_DROP_CLEAR_VALID);
-
- /*
- * Invalidate the relcache for the table, so that after this commit
- * all sessions will refresh any cached plans that might reference the
- * index.
- */
- CacheInvalidateRelcache(userHeapRelation);
+ /* Mark the index as invalid */
+ index_concurrent_clear_valid(userHeapRelation, indexId);
/* save lockrelid and locktag for below, then close but keep locks */
heaprelid = userHeapRelation->rd_lockInfo.lockRelId;
@@ -1444,63 +1866,8 @@ index_drop(Oid indexId, bool concurrent)
CommitTransactionCommand();
StartTransactionCommand();
- /*
- * Now we must wait until no running transaction could be using the
- * index for a query. To do this, inquire which xacts currently would
- * conflict with AccessExclusiveLock on the table -- ie, which ones
- * have a lock of any kind on the table. Then wait for each of these
- * xacts to commit or abort. Note we do not need to worry about xacts
- * that open the table for reading after this point; they will see the
- * index as invalid when they open the relation.
- *
- * Note: the reason we use actual lock acquisition here, rather than
- * just checking the ProcArray and sleeping, is that deadlock is
- * possible if one of the transactions in question is blocked trying
- * to acquire an exclusive lock on our table. The lock code will
- * detect deadlock and error out properly.
- *
- * Note: GetLockConflicts() never reports our own xid, hence we need
- * not check for that. Also, prepared xacts are not reported, which
- * is fine since they certainly aren't going to do anything more.
- */
- old_lockholders = GetLockConflicts(&heaplocktag, AccessExclusiveLock);
-
- while (VirtualTransactionIdIsValid(*old_lockholders))
- {
- VirtualXactLock(*old_lockholders, true);
- old_lockholders++;
- }
-
- /*
- * No more predicate locks will be acquired on this index, and we're
- * about to stop doing inserts into the index which could show
- * conflicts with existing predicate locks, so now is the time to move
- * them to the heap relation.
- */
- userHeapRelation = heap_open(heapId, ShareUpdateExclusiveLock);
- userIndexRelation = index_open(indexId, ShareUpdateExclusiveLock);
- TransferPredicateLocksToHeapRelation(userIndexRelation);
-
- /*
- * Now we are sure that nobody uses the index for queries; they just
- * might have it open for updating it. So now we can unset indisready
- * and indislive, then wait till nobody could be using it at all
- * anymore.
- */
- index_set_state_flags(indexId, INDEX_DROP_SET_DEAD);
-
- /*
- * Invalidate the relcache for the table, so that after this commit
- * all sessions will refresh the table's index list. Forgetting just
- * the index's relcache entry is not enough.
- */
- CacheInvalidateRelcache(userHeapRelation);
-
- /*
- * Close the relations again, though still holding session lock.
- */
- heap_close(userHeapRelation, NoLock);
- index_close(userIndexRelation, NoLock);
+ /* Finish invalidation of index and mark it as dead */
+ index_concurrent_set_dead(indexId, heapId, &heaplocktag);
/*
* Again, commit the transaction to make the pg_index update visible
@@ -1513,13 +1880,7 @@ index_drop(Oid indexId, bool concurrent)
* Wait till every transaction that saw the old index state has
* finished. The logic here is the same as above.
*/
- old_lockholders = GetLockConflicts(&heaplocktag, AccessExclusiveLock);
-
- while (VirtualTransactionIdIsValid(*old_lockholders))
- {
- VirtualXactLock(*old_lockholders, true);
- old_lockholders++;
- }
+ WaitForVirtualLocks(heaplocktag, AccessExclusiveLock);
/*
* Re-open relations to allow us to complete our actions.
diff --git a/src/backend/catalog/toasting.c b/src/backend/catalog/toasting.c
index 385d64d..0c2971b 100644
--- a/src/backend/catalog/toasting.c
+++ b/src/backend/catalog/toasting.c
@@ -281,7 +281,7 @@ create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid, Datum reloptio
rel->rd_rel->reltablespace,
collationObjectId, classObjectId, coloptions, (Datum) 0,
true, false, false, false,
- true, false, false, true);
+ true, false, false, false, false);
heap_close(toast_rel, NoLock);
diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
index f855bef..a12dcb9 100644
--- a/src/backend/commands/indexcmds.c
+++ b/src/backend/commands/indexcmds.c
@@ -68,8 +68,9 @@ static void ComputeIndexAttrs(IndexInfo *indexInfo,
static Oid GetIndexOpClass(List *opclass, Oid attrType,
char *accessMethodName, Oid accessMethodId);
static char *ChooseIndexName(const char *tabname, Oid namespaceId,
- List *colnames, List *exclusionOpNames,
- bool primary, bool isconstraint);
+ List *colnames, List *exclusionOpNames,
+ bool primary, bool isconstraint,
+ bool concurrent);
static char *ChooseIndexNameAddition(List *colnames);
static List *ChooseIndexColumnNames(List *indexElems);
static void RangeVarCallbackForReindexIndex(const RangeVar *relation,
@@ -311,7 +312,6 @@ DefineIndex(IndexStmt *stmt,
Oid tablespaceId;
List *indexColNames;
Relation rel;
- Relation indexRelation;
HeapTuple tuple;
Form_pg_am accessMethodForm;
bool amcanorder;
@@ -320,13 +320,9 @@ DefineIndex(IndexStmt *stmt,
int16 *coloptions;
IndexInfo *indexInfo;
int numberOfAttributes;
- VirtualTransactionId *old_lockholders;
- VirtualTransactionId *old_snapshots;
- int n_old_snapshots;
LockRelId heaprelid;
LOCKTAG heaplocktag;
Snapshot snapshot;
- int i;
/*
* count attributes in index
@@ -453,7 +449,8 @@ DefineIndex(IndexStmt *stmt,
indexColNames,
stmt->excludeOpNames,
stmt->primary,
- stmt->isconstraint);
+ stmt->isconstraint,
+ false);
/*
* look up the access method, verify it can handle the requested features
@@ -600,7 +597,7 @@ DefineIndex(IndexStmt *stmt,
stmt->isconstraint, stmt->deferrable, stmt->initdeferred,
allowSystemTableMods,
skip_build || stmt->concurrent,
- stmt->concurrent, !check_rights);
+ stmt->concurrent, !check_rights, false);
/* Add any requested comment */
if (stmt->idxcomment != NULL)
@@ -663,18 +660,8 @@ DefineIndex(IndexStmt *stmt,
* one of the transactions in question is blocked trying to acquire an
* exclusive lock on our table. The lock code will detect deadlock and
* error out properly.
- *
- * Note: GetLockConflicts() never reports our own xid, hence we need not
- * check for that. Also, prepared xacts are not reported, which is fine
- * since they certainly aren't going to do anything more.
*/
- old_lockholders = GetLockConflicts(&heaplocktag, ShareLock);
-
- while (VirtualTransactionIdIsValid(*old_lockholders))
- {
- VirtualXactLock(*old_lockholders, true);
- old_lockholders++;
- }
+ WaitForVirtualLocks(heaplocktag, ShareLock);
/*
* At this moment we are sure that there are no transactions with the
@@ -694,27 +681,13 @@ DefineIndex(IndexStmt *stmt,
* HOT-chain or the extension of the chain is HOT-safe for this index.
*/
- /* Open and lock the parent heap relation */
- rel = heap_openrv(stmt->relation, ShareUpdateExclusiveLock);
-
- /* And the target index relation */
- indexRelation = index_open(indexRelationId, RowExclusiveLock);
-
/* Set ActiveSnapshot since functions in the indexes may need it */
PushActiveSnapshot(GetTransactionSnapshot());
- /* We have to re-build the IndexInfo struct, since it was lost in commit */
- indexInfo = BuildIndexInfo(indexRelation);
- Assert(!indexInfo->ii_ReadyForInserts);
- indexInfo->ii_Concurrent = true;
- indexInfo->ii_BrokenHotChain = false;
-
- /* Now build the index */
- index_build(rel, indexRelation, indexInfo, stmt->primary, false);
-
- /* Close both the relations, but keep the locks */
- heap_close(rel, NoLock);
- index_close(indexRelation, NoLock);
+ /* Perform concurrent build of index */
+ index_concurrent_build(RangeVarGetRelid(stmt->relation, NoLock, false),
+ indexRelationId,
+ stmt->primary);
/*
* Update the pg_index row to mark the index as ready for inserts. Once we
@@ -738,13 +711,7 @@ DefineIndex(IndexStmt *stmt,
* We once again wait until no transaction can have the table open with
* the index marked as read-only for updates.
*/
- old_lockholders = GetLockConflicts(&heaplocktag, ShareLock);
-
- while (VirtualTransactionIdIsValid(*old_lockholders))
- {
- VirtualXactLock(*old_lockholders, true);
- old_lockholders++;
- }
+ WaitForVirtualLocks(heaplocktag, ShareLock);
/*
* Now take the "reference snapshot" that will be used by validate_index()
@@ -773,74 +740,9 @@ DefineIndex(IndexStmt *stmt,
* The index is now valid in the sense that it contains all currently
* interesting tuples. But since it might not contain tuples deleted just
* before the reference snap was taken, we have to wait out any
- * transactions that might have older snapshots. Obtain a list of VXIDs
- * of such transactions, and wait for them individually.
- *
- * We can exclude any running transactions that have xmin > the xmin of
- * our reference snapshot; their oldest snapshot must be newer than ours.
- * We can also exclude any transactions that have xmin = zero, since they
- * evidently have no live snapshot at all (and any one they might be in
- * process of taking is certainly newer than ours). Transactions in other
- * DBs can be ignored too, since they'll never even be able to see this
- * index.
- *
- * We can also exclude autovacuum processes and processes running manual
- * lazy VACUUMs, because they won't be fazed by missing index entries
- * either. (Manual ANALYZEs, however, can't be excluded because they
- * might be within transactions that are going to do arbitrary operations
- * later.)
- *
- * Also, GetCurrentVirtualXIDs never reports our own vxid, so we need not
- * check for that.
- *
- * If a process goes idle-in-transaction with xmin zero, we do not need to
- * wait for it anymore, per the above argument. We do not have the
- * infrastructure right now to stop waiting if that happens, but we can at
- * least avoid the folly of waiting when it is idle at the time we would
- * begin to wait. We do this by repeatedly rechecking the output of
- * GetCurrentVirtualXIDs. If, during any iteration, a particular vxid
- * doesn't show up in the output, we know we can forget about it.
+ * transactions that might have older snapshots.
*/
- old_snapshots = GetCurrentVirtualXIDs(snapshot->xmin, true, false,
- PROC_IS_AUTOVACUUM | PROC_IN_VACUUM,
- &n_old_snapshots);
-
- for (i = 0; i < n_old_snapshots; i++)
- {
- if (!VirtualTransactionIdIsValid(old_snapshots[i]))
- continue; /* found uninteresting in previous cycle */
-
- if (i > 0)
- {
- /* see if anything's changed ... */
- VirtualTransactionId *newer_snapshots;
- int n_newer_snapshots;
- int j;
- int k;
-
- newer_snapshots = GetCurrentVirtualXIDs(snapshot->xmin,
- true, false,
- PROC_IS_AUTOVACUUM | PROC_IN_VACUUM,
- &n_newer_snapshots);
- for (j = i; j < n_old_snapshots; j++)
- {
- if (!VirtualTransactionIdIsValid(old_snapshots[j]))
- continue; /* found uninteresting in previous cycle */
- for (k = 0; k < n_newer_snapshots; k++)
- {
- if (VirtualTransactionIdEquals(old_snapshots[j],
- newer_snapshots[k]))
- break;
- }
- if (k >= n_newer_snapshots) /* not there anymore */
- SetInvalidVirtualTransactionId(old_snapshots[j]);
- }
- pfree(newer_snapshots);
- }
-
- if (VirtualTransactionIdIsValid(old_snapshots[i]))
- VirtualXactLock(old_snapshots[i], true);
- }
+ WaitForOldSnapshots(snapshot);
/*
* Index can now be marked valid -- update its pg_index entry
@@ -853,7 +755,7 @@ DefineIndex(IndexStmt *stmt,
* relcache inval on the parent table to force replanning of cached plans.
* Otherwise existing sessions might fail to use the new index where it
* would be useful. (Note that our earlier commits did not create reasons
- * to replan; so relcache flush on the index itself was sufficient.)
+ * to replan; relcache flush on the index itself was sufficient.)
*/
CacheInvalidateRelcacheByRelid(heaprelid.relId);
@@ -873,6 +775,521 @@ DefineIndex(IndexStmt *stmt,
/*
+ * ReindexRelationConcurrently
+ *
+ * Process REINDEX CONCURRENTLY for given relation Oid. The relation can be
+ * either an index or a table. If a table is specified, each reindexing step
+ * is done in parallel with all the table's indexes as well as its dependent
+ * toast indexes.
+ */
+bool
+ReindexRelationConcurrently(Oid relationOid)
+{
+ List *concurrentIndexIds = NIL,
+ *indexIds = NIL,
+ *parentRelationIds = NIL,
+ *lockTags = NIL,
+ *relationLocks = NIL;
+ ListCell *lc, *lc2;
+ Snapshot snapshot;
+
+ /*
+ * Extract the list of indexes that are going to be rebuilt based on the
+ * list of relation Oids given by caller. For each element in given list,
+ * If the relkind of given relation Oid is a table, all its valid indexes
+ * will be rebuilt, including its associated toast table indexes. If
+ * relkind is an index, this index itself will be rebuilt. The locks taken
+ * parent relations and involved indexes are kept until this transaction
+ * is committed to protect against schema changes that might occur until
+ * the session lock is taken on each relation.
+ */
+ switch (get_rel_relkind(relationOid))
+ {
+ case RELKIND_RELATION:
+ {
+ /*
+ * In the case of a relation, find all its indexes
+ * including toast indexes.
+ */
+ Relation heapRelation = heap_open(relationOid,
+ ShareUpdateExclusiveLock);
+
+ /* Track this relation for session locks */
+ parentRelationIds = lappend_oid(parentRelationIds, relationOid);
+
+ /* Relation on which is based index cannot be shared */
+ if (heapRelation->rd_rel->relisshared)
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("concurrent reindex is not supported for shared relations")));
+
+ /* Add all the valid indexes of relation to list */
+ foreach(lc2, RelationGetIndexList(heapRelation))
+ {
+ Oid cellOid = lfirst_oid(lc2);
+ Relation indexRelation = index_open(cellOid,
+ ShareUpdateExclusiveLock);
+
+ if (!indexRelation->rd_index->indisvalid)
+ ereport(WARNING,
+ (errcode(ERRCODE_INDEX_CORRUPTED),
+ errmsg("cannot reindex concurrently invalid index \"%s.%s\", skipping",
+ get_namespace_name(get_rel_namespace(cellOid)),
+ get_rel_name(cellOid))));
+ else
+ indexIds = lappend_oid(indexIds, cellOid);
+
+ index_close(indexRelation, NoLock);
+ }
+
+ /* Also add the toast indexes */
+ if (OidIsValid(heapRelation->rd_rel->reltoastrelid))
+ {
+ Oid toastOid = heapRelation->rd_rel->reltoastrelid;
+ Relation toastRelation = heap_open(toastOid,
+ ShareUpdateExclusiveLock);
+
+ /* Track this relation for session locks */
+ parentRelationIds = lappend_oid(parentRelationIds, toastOid);
+
+ foreach(lc2, RelationGetIndexList(toastRelation))
+ {
+ Oid cellOid = lfirst_oid(lc2);
+ Relation indexRelation = index_open(cellOid,
+ ShareUpdateExclusiveLock);
+
+ if (!indexRelation->rd_index->indisvalid)
+ ereport(WARNING,
+ (errcode(ERRCODE_INDEX_CORRUPTED),
+ errmsg("cannot reindex concurrently invalid index \"%s.%s\", skipping",
+ get_namespace_name(get_rel_namespace(cellOid)),
+ get_rel_name(cellOid))));
+ else
+ indexIds = lappend_oid(indexIds, cellOid);
+
+ index_close(indexRelation, NoLock);
+ }
+
+ heap_close(toastRelation, NoLock);
+ }
+
+ heap_close(heapRelation, NoLock);
+ break;
+ }
+ case RELKIND_INDEX:
+ {
+ /*
+ * For an index simply add its Oid to list. Invalid indexes
+ * cannot be included in list.
+ */
+ Relation indexRelation = index_open(relationOid, ShareUpdateExclusiveLock);
+
+ /* Track the parent relation of this index for session locks */
+ parentRelationIds = list_make1_oid(IndexGetRelation(relationOid, false));
+
+ if (!indexRelation->rd_index->indisvalid)
+ ereport(WARNING,
+ (errcode(ERRCODE_INDEX_CORRUPTED),
+ errmsg("cannot reindex concurrently invalid index \"%s.%s\", skipping",
+ get_namespace_name(get_rel_namespace(relationOid)),
+ get_rel_name(relationOid))));
+ else
+ indexIds = list_make1_oid(relationOid);
+
+ index_close(indexRelation, NoLock);
+ break;
+ }
+ default:
+ /* nothing to do */
+ break;
+ }
+
+ /* Definetely no indexes, so leave */
+ if (indexIds == NIL)
+ return false;
+
+ Assert(parentRelationIds != NIL);
+
+ /*
+ * Phase 1 of REINDEX CONCURRENTLY
+ *
+ * Here begins the process for rebuilding concurrently the indexes.
+ * We need first to create an index which is based on the same data
+ * as the former index except that it will be only registered in catalogs
+ * and will be built after. It is possible to perform all the operations
+ * on all the indexes at the same time for a parent relation including
+ * its indexes for toast relation.
+ */
+
+ /* Do the concurrent index creation for each index */
+ foreach(lc, indexIds)
+ {
+ char *concurrentName;
+ Oid indOid = lfirst_oid(lc);
+ Oid concurrentOid = InvalidOid;
+ Relation indexRel,
+ indexParentRel,
+ indexConcurrentRel;
+ LockRelId lockrelid;
+
+ indexRel = index_open(indOid, ShareUpdateExclusiveLock);
+ /* Open the index parent relation, might be a toast or parent relation */
+ indexParentRel = heap_open(indexRel->rd_index->indrelid,
+ ShareUpdateExclusiveLock);
+
+ /* Choose a relation name for concurrent index */
+ concurrentName = ChooseIndexName(get_rel_name(indOid),
+ get_rel_namespace(indexRel->rd_index->indrelid),
+ NULL,
+ false,
+ false,
+ false,
+ true);
+
+ /* Create concurrent index based on given index */
+ concurrentOid = index_concurrent_create(indexParentRel,
+ indOid,
+ concurrentName);
+
+ /*
+ * Now open the relation of concurrent index, a lock is also needed on
+ * it
+ */
+ indexConcurrentRel = index_open(concurrentOid, ShareUpdateExclusiveLock);
+
+ /* Save the concurrent index Oid */
+ concurrentIndexIds = lappend_oid(concurrentIndexIds, concurrentOid);
+
+ /*
+ * Save lockrelid to protect each concurrent relation from drop then
+ * close relations. The lockrelid on parent relation is not taken here
+ * to avoid multiple locks taken on the same relation, instead we rely
+ * on parentRelationIds built earlier.
+ */
+ lockrelid = indexRel->rd_lockInfo.lockRelId;
+ relationLocks = lappend(relationLocks, &lockrelid);
+ lockrelid = indexConcurrentRel->rd_lockInfo.lockRelId;
+ relationLocks = lappend(relationLocks, &lockrelid);
+
+ index_close(indexRel, NoLock);
+ index_close(indexConcurrentRel, NoLock);
+ heap_close(indexParentRel, NoLock);
+ }
+
+ /*
+ * Save the heap lock for following visibility checks with other backends
+ * might conflict with this session.
+ */
+ foreach(lc, parentRelationIds)
+ {
+ Relation heapRelation = heap_open(lfirst_oid(lc), ShareUpdateExclusiveLock);
+ LockRelId lockrelid = heapRelation->rd_lockInfo.lockRelId;
+ LOCKTAG *heaplocktag = (LOCKTAG *) palloc(sizeof(LOCKTAG));
+
+ /* Add lockrelid of parent relation to the list of locked relations */
+ relationLocks = lappend(relationLocks, &lockrelid);
+
+ /* Save the LOCKTAG for this parent relation for the wait phase */
+ SET_LOCKTAG_RELATION(*heaplocktag, lockrelid.dbId, lockrelid.relId);
+ lockTags = lappend(lockTags, heaplocktag);
+
+ /* Close heap relation */
+ heap_close(heapRelation, NoLock);
+ }
+
+ /*
+ * For a concurrent build, it is necessary to make the catalog entries
+ * visible to the other transactions before actually building the index.
+ * This will prevent them from making incompatible HOT updates. The index
+ * is marked as not ready and invalid so as no other transactions will try
+ * to use it for INSERT or SELECT.
+ *
+ * Before committing, get a session level lock on the relation, the
+ * concurrent index and its copy to insure that none of them are dropped
+ * until the operation is done.
+ */
+ foreach(lc, relationLocks)
+ {
+ LockRelId lockRel = * (LockRelId *) lfirst(lc);
+ LockRelationIdForSession(&lockRel, ShareUpdateExclusiveLock);
+ }
+
+ PopActiveSnapshot();
+ CommitTransactionCommand();
+
+ /*
+ * Phase 2 of REINDEX CONCURRENTLY
+ *
+ * Build concurrent indexes in a separate transaction for each index to
+ * avoid having open transactions for an unnecessary long time. A
+ * concurrent build is done for each concurrent index that will replace
+ * the old indexes. Before doing that, we need to wait on the parent
+ * relations until no running transactions could have the parent table
+ * of index open.
+ */
+
+ /* Perform a wait on all the session locks */
+ StartTransactionCommand();
+ WaitForMultipleVirtualLocks(lockTags, ShareLock);
+ CommitTransactionCommand();
+
+ /* Get the first element of concurrent index list */
+ lc2 = list_head(concurrentIndexIds);
+
+ foreach(lc, indexIds)
+ {
+ Relation indexRel;
+ Oid indOid = lfirst_oid(lc);
+ Oid concurrentOid = lfirst_oid(lc2);
+ bool primary;
+
+ /* Move to next concurrent item */
+ lc2 = lnext(lc2);
+
+ /* Start new transaction for this index concurrent build */
+ StartTransactionCommand();
+
+ /* Set ActiveSnapshot since functions in the indexes may need it */
+ PushActiveSnapshot(GetTransactionSnapshot());
+
+ /* Index relation has been closed by previous commit, so reopen it */
+ indexRel = index_open(indOid, ShareUpdateExclusiveLock);
+ primary = indexRel->rd_index->indisprimary;
+ index_close(indexRel, ShareUpdateExclusiveLock);
+
+ /* Perform concurrent build of new index */
+ index_concurrent_build(indexRel->rd_index->indrelid,
+ concurrentOid,
+ primary);
+
+ /*
+ * Update the pg_index row of the concurrent index as ready for inserts.
+ * Once we commit this transaction, any new transactions that open the
+ * table must insert new entries into the index for insertions and
+ * non-HOT updates.
+ */
+ index_set_state_flags(concurrentOid, INDEX_CREATE_SET_READY);
+
+ /* we can do away with our snapshot */
+ PopActiveSnapshot();
+
+ /*
+ * Commit this transaction to make the indisready update visible for
+ * concurrent index.
+ */
+ CommitTransactionCommand();
+ }
+
+
+ /*
+ * Phase 3 of REINDEX CONCURRENTLY
+ *
+ * During this phase the concurrent indexes catch up with the INSERT that
+ * might have occurred in the parent table and are marked as valid once done.
+ *
+ * We once again wait until no transaction can have the table open with
+ * the index marked as read-only for updates. Each index validation is done
+ * with a separate transaction to avoid opening transaction for an
+ * unnecessary too long time.
+ */
+
+ /*
+ * Perform a scan of each concurrent index with the heap, then insert
+ * any missing index entries.
+ */
+ foreach(lc, concurrentIndexIds)
+ {
+ Oid indOid = lfirst_oid(lc);
+ Oid relOid;
+
+ /* Open separate transaction to validate index */
+ StartTransactionCommand();
+
+ /* Get the parent relation Oid */
+ relOid = IndexGetRelation(indOid, false);
+
+ /*
+ * Take the reference snapshot that will be used for the concurrent indexes
+ * validation.
+ */
+ snapshot = RegisterSnapshot(GetTransactionSnapshot());
+ PushActiveSnapshot(snapshot);
+
+ /* Validate index, which might be a toast */
+ validate_index(relOid, indOid, snapshot);
+
+ /*
+ * This concurrent index is now valid as they contain all the tuples
+ * necessary. However, it might not have taken into account deleted tuples
+ * before the reference snapshot was taken, so we need to wait for the
+ * transactions that might have older snapshots than ours.
+ */
+ WaitForOldSnapshots(snapshot);
+
+ /*
+ * Concurrent index can now be marked as valid -- update pg_index
+ * entries.
+ */
+ index_set_state_flags(indOid, INDEX_CREATE_SET_VALID);
+
+ /*
+ * The pg_index update will cause backends to update its entries for the
+ * concurrent index but it is necessary to do the same thing for cache.
+ */
+ CacheInvalidateRelcacheByRelid(relOid);
+
+ /* we can now do away with our active snapshot */
+ PopActiveSnapshot();
+
+ /* And we can remove the validating snapshot too */
+ UnregisterSnapshot(snapshot);
+
+ /* Commit this transaction to make the concurrent index valid */
+ CommitTransactionCommand();
+ }
+
+ /*
+ * Phase 4 of REINDEX CONCURRENTLY
+ *
+ * Now that the concurrent indexes are valid and can be used, we need to
+ * swap each concurrent index with its corresponding old index. The old
+ * index is marked as invalid once this is done, making it not usable
+ * by other backends once its associated transaction is committed.
+ */
+
+ /* Get the first element is concurrent index list */
+ lc2 = list_head(concurrentIndexIds);
+
+ /* Swap the indexes and mark the indexes that have the old data as invalid */
+ foreach(lc, indexIds)
+ {
+ Oid indOid = lfirst_oid(lc);
+ Oid concurrentOid = lfirst_oid(lc2);
+ Relation indexRel, indexParentRel;
+
+ /* Move to next concurrent item */
+ lc2 = lnext(lc2);
+
+ /*
+ * Each index needs to be swapped in a separate transaction, so start
+ * a new one.
+ */
+ StartTransactionCommand();
+
+ /*
+ * Mark the cache of associated relation as invalid, open relation
+ * relations. AccessExclusive Lock is taken here and not a lower lock
+ * to reduce likelihood of deadlock as ShareUpdateExclusiveLock is
+ * already taken within session.
+ */
+ indexRel = index_open(indOid, ShareUpdateExclusiveLock);
+ indexParentRel = heap_open(indexRel->rd_index->indrelid,
+ ShareUpdateExclusiveLock);
+
+ /* Mark the old index as invalid */
+ index_concurrent_clear_valid(indexParentRel, concurrentOid);
+
+ /* Swap old index and its concurrent */
+ index_concurrent_swap(concurrentOid, indOid);
+
+ /*
+ * Invalidate the relcache for the table, so that after this commit
+ * all sessions will refresh any cached plans that might reference the
+ * index.
+ */
+ CacheInvalidateRelcache(indexParentRel);
+
+ /* Close relations opened previously for cache invalidation */
+ index_close(indexRel, ShareUpdateExclusiveLock);
+ heap_close(indexParentRel, ShareUpdateExclusiveLock);
+
+ /* Commit this transaction and make old index invalidation visible */
+ CommitTransactionCommand();
+ }
+
+ /*
+ * Phase 5 of REINDEX CONCURRENTLY
+ *
+ * The concurrent indexes now hold the old relfilenode of the other indexes
+ * transactions that might use them. Each operation is performed with a
+ * separate transaction.
+ */
+
+ /* Now mark the concurrent indexes as not ready */
+ foreach(lc, concurrentIndexIds)
+ {
+ Oid indOid = lfirst_oid(lc);
+ Oid relOid;
+
+ StartTransactionCommand();
+ relOid = IndexGetRelation(indOid, false);
+
+ /*
+ * Finish the index invalidation and set it as dead. It is not
+ * necessary to wait for virtual locks on the parent relation as it
+ * is already sure that this session holds sufficient locks.s
+ */
+ index_concurrent_set_dead(indOid, relOid, NULL);
+
+ /* Commit this transaction to make the update visible. */
+ CommitTransactionCommand();
+ }
+
+ /*
+ * Phase 6 of REINDEX CONCURRENTLY
+ *
+ * Drop the concurrent indexes. This needs to be done through
+ * performDeletion or related dependencies will not be dropped for the old
+ * indexes. The internal mechanism of DROP INDEX CONCURRENTLY is not used
+ * as here the indexes are already considered as dead and invalid, so they
+ * will not be used by other backends.
+ */
+ foreach(lc, concurrentIndexIds)
+ {
+ Oid indexOid = lfirst_oid(lc);
+
+ /* Start transaction to drop this index */
+ StartTransactionCommand();
+
+ /* Get fresh snapshot for next step */
+ PushActiveSnapshot(GetTransactionSnapshot());
+
+ /*
+ * Open transaction if necessary, for the first index treated its
+ * transaction has been already opened previously.
+ */
+ index_concurrent_drop(indexOid);
+
+ /*
+ * For the last index to be treated, do not commit transaction yet.
+ * This will be done once all the locks on indexes and parent relations
+ * are released.
+ */
+ if (indexOid != llast_oid(concurrentIndexIds))
+ {
+ /* We can do away with our snapshot */
+ PopActiveSnapshot();
+
+ /* Commit this transaction to make the update visible. */
+ CommitTransactionCommand();
+ }
+ }
+
+ /*
+ * Last thing to do is release the session-level lock on the parent table
+ * and the indexes of table.
+ */
+ foreach(lc, relationLocks)
+ {
+ LockRelId lockRel = * (LockRelId *) lfirst(lc);
+ UnlockRelationIdForSession(&lockRel, ShareUpdateExclusiveLock);
+ }
+
+ return true;
+}
+
+
+/*
* CheckMutability
* Test whether given expression is mutable
*/
@@ -1535,7 +1952,8 @@ ChooseRelationName(const char *name1, const char *name2,
static char *
ChooseIndexName(const char *tabname, Oid namespaceId,
List *colnames, List *exclusionOpNames,
- bool primary, bool isconstraint)
+ bool primary, bool isconstraint,
+ bool concurrent)
{
char *indexname;
@@ -1561,6 +1979,13 @@ ChooseIndexName(const char *tabname, Oid namespaceId,
"key",
namespaceId);
}
+ else if (concurrent)
+ {
+ indexname = ChooseRelationName(tabname,
+ NULL,
+ "cct",
+ namespaceId);
+ }
else
{
indexname = ChooseRelationName(tabname,
@@ -1673,18 +2098,22 @@ ChooseIndexColumnNames(List *indexElems)
* Recreate a specific index.
*/
Oid
-ReindexIndex(RangeVar *indexRelation)
+ReindexIndex(RangeVar *indexRelation, bool concurrent)
{
Oid indOid;
Oid heapOid = InvalidOid;
- /* lock level used here should match index lock reindex_index() */
- indOid = RangeVarGetRelidExtended(indexRelation, AccessExclusiveLock,
- false, false,
- RangeVarCallbackForReindexIndex,
- (void *) &heapOid);
+ indOid = RangeVarGetRelidExtended(indexRelation,
+ concurrent ? ShareUpdateExclusiveLock : AccessExclusiveLock,
+ false, false,
+ RangeVarCallbackForReindexIndex,
+ (void *) &heapOid);
- reindex_index(indOid, false);
+ /* Continue process for concurrent or non-concurrent case */
+ if (!concurrent)
+ reindex_index(indOid, false);
+ else
+ ReindexRelationConcurrently(indOid);
return indOid;
}
@@ -1748,18 +2177,33 @@ RangeVarCallbackForReindexIndex(const RangeVar *relation,
}
}
+
/*
* ReindexTable
* Recreate all indexes of a table (and of its toast table, if any)
*/
Oid
-ReindexTable(RangeVar *relation)
+ReindexTable(RangeVar *relation, bool concurrent)
{
Oid heapOid;
/* The lock level used here should match reindex_relation(). */
- heapOid = RangeVarGetRelidExtended(relation, ShareLock, false, false,
- RangeVarCallbackOwnsTable, NULL);
+ heapOid = RangeVarGetRelidExtended(relation,
+ concurrent ? ShareUpdateExclusiveLock : ShareLock,
+ false, false,
+ RangeVarCallbackOwnsTable, NULL);
+
+ /* Run through the concurrent process if necessary */
+ if (concurrent)
+ {
+ if (!ReindexRelationConcurrently(heapOid))
+ {
+ ereport(NOTICE,
+ (errmsg("table \"%s\" has no indexes",
+ relation->relname)));
+ }
+ return heapOid;
+ }
if (!reindex_relation(heapOid, REINDEX_REL_PROCESS_TOAST))
ereport(NOTICE,
@@ -1778,7 +2222,10 @@ ReindexTable(RangeVar *relation)
* That means this must not be called within a user transaction block!
*/
Oid
-ReindexDatabase(const char *databaseName, bool do_system, bool do_user)
+ReindexDatabase(const char *databaseName,
+ bool do_system,
+ bool do_user,
+ bool concurrent)
{
Relation relationRelation;
HeapScanDesc scan;
@@ -1790,6 +2237,15 @@ ReindexDatabase(const char *databaseName, bool do_system, bool do_user)
AssertArg(databaseName);
+ /*
+ * CONCURRENTLY operation is not allowed for a system, but it is for a
+ * database.
+ */
+ if (concurrent && !do_user)
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("cannot reindex system concurrently")));
+
if (strcmp(databaseName, get_database_name(MyDatabaseId)) != 0)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
@@ -1873,15 +2329,40 @@ ReindexDatabase(const char *databaseName, bool do_system, bool do_user)
foreach(l, relids)
{
Oid relid = lfirst_oid(l);
+ bool result = false;
+ bool process_concurrent;
StartTransactionCommand();
/* functions in indexes may want a snapshot set */
PushActiveSnapshot(GetTransactionSnapshot());
- if (reindex_relation(relid, REINDEX_REL_PROCESS_TOAST))
+
+ /* Determine if relation needs to be processed concurrently */
+ process_concurrent = concurrent &&
+ !IsSystemNamespace(get_rel_namespace(relid));
+
+ /*
+ * Reindex relation with a concurrent or non-concurrent process.
+ * System relations cannot be reindexed concurrently, but they
+ * need to be reindexed including pg_class with a normal process
+ * as they could be corrupted, and concurrent process might also
+ * use them. This does not include toast relations, which are
+ * reindexed when their parent relation is processed.
+ */
+ if (process_concurrent)
+ {
+ old = MemoryContextSwitchTo(private_context);
+ result = ReindexRelationConcurrently(relid);
+ MemoryContextSwitchTo(old);
+ }
+ else
+ result = reindex_relation(relid, REINDEX_REL_PROCESS_TOAST);
+
+ if (result)
ereport(NOTICE,
- (errmsg("table \"%s.%s\" was reindexed",
+ (errmsg("table \"%s.%s\" was reindexed%s",
get_namespace_name(get_rel_namespace(relid)),
- get_rel_name(relid))));
+ get_rel_name(relid),
+ process_concurrent ? " concurrently" : "")));
PopActiveSnapshot();
CommitTransactionCommand();
}
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 0d6f5c0..0bd67a2 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -904,6 +904,36 @@ RangeVarCallbackForDropRelation(const RangeVar *rel, Oid relOid, Oid oldRelOid,
if (classform->relkind != relkind)
DropErrorMsgWrongType(rel->relname, classform->relkind, relkind);
+ /*
+ * Check the case of a system index that might have been invalidated by a
+ * failed concurrent process and allow its drop.
+ */
+ if (IsSystemClass(classform) &&
+ relkind == RELKIND_INDEX)
+ {
+ HeapTuple locTuple;
+ Form_pg_index indexform;
+ bool indisvalid;
+
+ locTuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(state->heapOid));
+ if (!HeapTupleIsValid(locTuple))
+ {
+ ReleaseSysCache(tuple);
+ return;
+ }
+
+ indexform = (Form_pg_index) GETSTRUCT(locTuple);
+ indisvalid = indexform->indisvalid;
+ ReleaseSysCache(locTuple);
+
+ /* Leave if index entry is not valid */
+ if (!indisvalid)
+ {
+ ReleaseSysCache(tuple);
+ return;
+ }
+ }
+
/* Allow DROP to either table owner or schema owner */
if (!pg_class_ownercheck(relOid, GetUserId()) &&
!pg_namespace_ownercheck(classform->relnamespace, GetUserId()))
diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c
index 11be62e..c46bdcc 100644
--- a/src/backend/executor/execUtils.c
+++ b/src/backend/executor/execUtils.c
@@ -1185,6 +1185,20 @@ check_exclusion_constraint(Relation heap, Relation index, IndexInfo *indexInfo,
}
/*
+ * As an invalid index only exists when created in a concurrent context,
+ * and that this code path cannot be taken by CREATE INDEX CONCURRENTLY
+ * as this feature is not available for exclusion constraints, this code
+ * path can only be taken by REINDEX CONCURRENTLY. In this case the same
+ * index exists in parallel to this one so we can bypass this check as
+ * it has already been done on the other index existing in parallel.
+ * If exclusion constraints are supported in the future for CREATE INDEX
+ * CONCURRENTLY, this should be removed or completed especially for this
+ * purpose.
+ */
+ if (!index->rd_index->indisvalid)
+ return true;
+
+ /*
* Search the tuples that are in the index for any violations, including
* tuples that aren't visible yet.
*/
diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c
index 867b0c0..b93d90c 100644
--- a/src/backend/nodes/copyfuncs.c
+++ b/src/backend/nodes/copyfuncs.c
@@ -3617,6 +3617,7 @@ _copyReindexStmt(const ReindexStmt *from)
COPY_STRING_FIELD(name);
COPY_SCALAR_FIELD(do_system);
COPY_SCALAR_FIELD(do_user);
+ COPY_SCALAR_FIELD(concurrent);
return newnode;
}
diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c
index 085cd5b..2687bf0 100644
--- a/src/backend/nodes/equalfuncs.c
+++ b/src/backend/nodes/equalfuncs.c
@@ -1853,6 +1853,7 @@ _equalReindexStmt(const ReindexStmt *a, const ReindexStmt *b)
COMPARE_STRING_FIELD(name);
COMPARE_SCALAR_FIELD(do_system);
COMPARE_SCALAR_FIELD(do_user);
+ COMPARE_SCALAR_FIELD(concurrent);
return true;
}
diff --git a/src/backend/parser/gram.y b/src/backend/parser/gram.y
index 0787d2f..f087219 100644
--- a/src/backend/parser/gram.y
+++ b/src/backend/parser/gram.y
@@ -6806,29 +6806,32 @@ opt_if_exists: IF_P EXISTS { $$ = TRUE; }
*****************************************************************************/
ReindexStmt:
- REINDEX reindex_type qualified_name opt_force
+ REINDEX reindex_type opt_concurrently qualified_name opt_force
{
ReindexStmt *n = makeNode(ReindexStmt);
n->kind = $2;
- n->relation = $3;
+ n->concurrent = $3;
+ n->relation = $4;
n->name = NULL;
$$ = (Node *)n;
}
- | REINDEX SYSTEM_P name opt_force
+ | REINDEX SYSTEM_P opt_concurrently name opt_force
{
ReindexStmt *n = makeNode(ReindexStmt);
n->kind = OBJECT_DATABASE;
- n->name = $3;
+ n->concurrent = $3;
+ n->name = $4;
n->relation = NULL;
n->do_system = true;
n->do_user = false;
$$ = (Node *)n;
}
- | REINDEX DATABASE name opt_force
+ | REINDEX DATABASE opt_concurrently name opt_force
{
ReindexStmt *n = makeNode(ReindexStmt);
n->kind = OBJECT_DATABASE;
- n->name = $3;
+ n->concurrent = $3;
+ n->name = $4;
n->relation = NULL;
n->do_system = true;
n->do_user = true;
diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c
index 4308128..1662a6e 100644
--- a/src/backend/storage/ipc/procarray.c
+++ b/src/backend/storage/ipc/procarray.c
@@ -2528,6 +2528,152 @@ XidCacheRemoveRunningXids(TransactionId xid,
LWLockRelease(ProcArrayLock);
}
+
+/*
+ * WaitForMultipleVirtualLocks
+ *
+ * Wait until no transactions hold the relation related to lock those locks.
+ * To do this, inquire which xacts currently would conflict with each lock on
+ * the table referred by the respective LOCKTAG -- ie, which ones have a lock
+ * that permits writing the relation. Then wait for each of these xacts to
+ * commit or abort.
+ *
+ * To do this, inquire which xacts currently would conflict with lockmode
+ * on the relation.
+ *
+ * Note: GetLockConflicts() never reports our own xid, hence we need not
+ * check for that. Also, prepared xacts are not reported, which is fine
+ * since they certainly aren't going to do anything more.
+ */
+void
+WaitForMultipleVirtualLocks(List *locktags, LOCKMODE lockmode)
+{
+ VirtualTransactionId **old_lockholders;
+ int i, count = 0;
+ ListCell *lc;
+
+ /* Leave if no locks to wait for */
+ if (list_length(locktags) == 0)
+ return;
+
+ old_lockholders = (VirtualTransactionId **)
+ palloc(list_length(locktags) * sizeof(VirtualTransactionId *));
+
+ /* Collect the transactions we need to wait on for each relation lock */
+ foreach(lc, locktags)
+ {
+ LOCKTAG *locktag = lfirst(lc);
+ old_lockholders[count++] = GetLockConflicts(locktag, lockmode);
+ }
+
+ /* Finally wait for each transaction to complete */
+ for (i = 0; i < count; i++)
+ {
+ VirtualTransactionId *lockholders = old_lockholders[i];
+
+ while (VirtualTransactionIdIsValid(*lockholders))
+ {
+ VirtualXactLock(*lockholders, true);
+ lockholders++;
+ }
+ }
+
+ pfree(old_lockholders);
+}
+
+
+/*
+ * WaitForVirtualLocks
+ *
+ * Similar to WaitForMultipleVirtualLocks, but for a single lock.
+ */
+void
+WaitForVirtualLocks(LOCKTAG heaplocktag, LOCKMODE lockmode)
+{
+ WaitForMultipleVirtualLocks(list_make1(&heaplocktag), lockmode);
+}
+
+
+/*
+ * WaitForOldSnapshots
+ *
+ * Wait for transactions that might have older snapshot than the given one,
+ * because is might not contain tuples deleted just before it has been taken.
+ * Obtain a list of VXIDs of such transactions, and wait for them
+ * individually.
+ *
+ * We can exclude any running transactions that have xmin > the xmin of
+ * our reference snapshot; their oldest snapshot must be newer than ours.
+ * We can also exclude any transactions that have xmin = zero, since they
+ * evidently have no live snapshot at all (and any one they might be in
+ * process of taking is certainly newer than ours). Transactions in other
+ * DBs can be ignored too, since they'll never even be able to see this
+ * index.
+ *
+ * We can also exclude autovacuum processes and processes running manual
+ * lazy VACUUMs, because they won't be fazed by missing index entries
+ * either. (Manual ANALYZEs, however, can't be excluded because they
+ * might be within transactions that are going to do arbitrary operations
+ * later.)
+ *
+ * Also, GetCurrentVirtualXIDs never reports our own vxid, so we need not
+ * check for that.
+ *
+ * If a process goes idle-in-transaction with xmin zero, we do not need to
+ * wait for it anymore, per the above argument. We do not have the
+ * infrastructure right now to stop waiting if that happens, but we can at
+ * least avoid the folly of waiting when it is idle at the time we would
+ * begin to wait. We do this by repeatedly rechecking the output of
+ * GetCurrentVirtualXIDs. If, during any iteration, a particular vxid
+ * doesn't show up in the output, we know we can forget about it.
+ */
+void
+WaitForOldSnapshots(Snapshot snapshot)
+{
+ int i, n_old_snapshots;
+ VirtualTransactionId *old_snapshots;
+
+ old_snapshots = GetCurrentVirtualXIDs(snapshot->xmin, true, false,
+ PROC_IS_AUTOVACUUM | PROC_IN_VACUUM,
+ &n_old_snapshots);
+
+ for (i = 0; i < n_old_snapshots; i++)
+ {
+ if (!VirtualTransactionIdIsValid(old_snapshots[i]))
+ continue; /* found uninteresting in previous cycle */
+
+ if (i > 0)
+ {
+ /* see if anything's changed ... */
+ VirtualTransactionId *newer_snapshots;
+ int n_newer_snapshots, j, k;
+
+ newer_snapshots = GetCurrentVirtualXIDs(snapshot->xmin,
+ true, false,
+ PROC_IS_AUTOVACUUM | PROC_IN_VACUUM,
+ &n_newer_snapshots);
+ for (j = i; j < n_old_snapshots; j++)
+ {
+ if (!VirtualTransactionIdIsValid(old_snapshots[j]))
+ continue; /* found uninteresting in previous cycle */
+ for (k = 0; k < n_newer_snapshots; k++)
+ {
+ if (VirtualTransactionIdEquals(old_snapshots[j],
+ newer_snapshots[k]))
+ break;
+ }
+ if (k >= n_newer_snapshots) /* not there anymore */
+ SetInvalidVirtualTransactionId(old_snapshots[j]);
+ }
+ pfree(newer_snapshots);
+ }
+
+ if (VirtualTransactionIdIsValid(old_snapshots[i]))
+ VirtualXactLock(old_snapshots[i], true);
+ }
+}
+
+
#ifdef XIDCACHE_DEBUG
/*
diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c
index a1c03f1..6a0341b 100644
--- a/src/backend/tcop/utility.c
+++ b/src/backend/tcop/utility.c
@@ -1292,16 +1292,20 @@ standard_ProcessUtility(Node *parsetree,
{
ReindexStmt *stmt = (ReindexStmt *) parsetree;
+ if (stmt->concurrent)
+ PreventTransactionChain(isTopLevel,
+ "REINDEX CONCURRENTLY");
+
/* we choose to allow this during "read only" transactions */
PreventCommandDuringRecovery("REINDEX");
switch (stmt->kind)
{
case OBJECT_INDEX:
- ReindexIndex(stmt->relation);
+ ReindexIndex(stmt->relation, stmt->concurrent);
break;
case OBJECT_TABLE:
case OBJECT_MATVIEW:
- ReindexTable(stmt->relation);
+ ReindexTable(stmt->relation, stmt->concurrent);
break;
case OBJECT_DATABASE:
@@ -1313,8 +1317,8 @@ standard_ProcessUtility(Node *parsetree,
*/
PreventTransactionChain(isTopLevel,
"REINDEX DATABASE");
- ReindexDatabase(stmt->name,
- stmt->do_system, stmt->do_user);
+ ReindexDatabase(stmt->name, stmt->do_system,
+ stmt->do_user, stmt->concurrent);
break;
default:
elog(ERROR, "unrecognized object type: %d",
diff --git a/src/include/catalog/index.h b/src/include/catalog/index.h
index fb323f7..db2a531 100644
--- a/src/include/catalog/index.h
+++ b/src/include/catalog/index.h
@@ -60,7 +60,26 @@ extern Oid index_create(Relation heapRelation,
bool allow_system_table_mods,
bool skip_build,
bool concurrent,
- bool is_internal);
+ bool is_internal,
+ bool is_reindex);
+
+extern Oid index_concurrent_create(Relation heapRelation,
+ Oid indOid,
+ char *concurrentName);
+
+extern void index_concurrent_build(Oid heapOid,
+ Oid indexOid,
+ bool isprimary);
+
+extern void index_concurrent_swap(Oid newIndexOid, Oid oldIndexOid);
+
+extern void index_concurrent_set_dead(Oid indexId,
+ Oid heapId,
+ LOCKTAG *locktag);
+
+extern void index_concurrent_clear_valid(Relation heapRelation, Oid indexOid);
+
+extern void index_concurrent_drop(Oid indexOid);
extern void index_constraint_create(Relation heapRelation,
Oid indexRelationId,
diff --git a/src/include/commands/defrem.h b/src/include/commands/defrem.h
index 62515b2..54137c6 100644
--- a/src/include/commands/defrem.h
+++ b/src/include/commands/defrem.h
@@ -26,10 +26,11 @@ extern Oid DefineIndex(IndexStmt *stmt,
bool check_rights,
bool skip_build,
bool quiet);
-extern Oid ReindexIndex(RangeVar *indexRelation);
-extern Oid ReindexTable(RangeVar *relation);
+extern Oid ReindexIndex(RangeVar *indexRelation, bool concurrent);
+extern Oid ReindexTable(RangeVar *relation, bool concurrent);
extern Oid ReindexDatabase(const char *databaseName,
- bool do_system, bool do_user);
+ bool do_system, bool do_user, bool concurrent);
+extern bool ReindexRelationConcurrently(Oid relOid);
extern char *makeObjectName(const char *name1, const char *name2,
const char *label);
extern char *ChooseRelationName(const char *name1, const char *name2,
diff --git a/src/include/nodes/parsenodes.h b/src/include/nodes/parsenodes.h
index 2229ef0..bb3ae47 100644
--- a/src/include/nodes/parsenodes.h
+++ b/src/include/nodes/parsenodes.h
@@ -2538,6 +2538,7 @@ typedef struct ReindexStmt
const char *name; /* name of database to reindex */
bool do_system; /* include system tables in database case */
bool do_user; /* include user tables in database case */
+ bool concurrent; /* reindex concurrently? */
} ReindexStmt;
/* ----------------------
diff --git a/src/include/storage/procarray.h b/src/include/storage/procarray.h
index d5fdfea..d4a0981 100644
--- a/src/include/storage/procarray.h
+++ b/src/include/storage/procarray.h
@@ -76,4 +76,8 @@ extern void XidCacheRemoveRunningXids(TransactionId xid,
int nxids, const TransactionId *xids,
TransactionId latestXid);
+extern void WaitForMultipleVirtualLocks(List *locktags, LOCKMODE lockmode);
+extern void WaitForVirtualLocks(LOCKTAG heaplocktag, LOCKMODE lockmode);
+extern void WaitForOldSnapshots(Snapshot snapshot);
+
#endif /* PROCARRAY_H */
diff --git a/src/test/regress/expected/create_index.out b/src/test/regress/expected/create_index.out
index 2ae991e..88ec81a 100644
--- a/src/test/regress/expected/create_index.out
+++ b/src/test/regress/expected/create_index.out
@@ -2721,3 +2721,54 @@ ORDER BY thousand;
1 | 1001
(2 rows)
+--
+-- Check behavior of REINDEX and REINDEX CONCURRENTLY
+--
+CREATE TABLE concur_reindex_tab (c1 int);
+-- REINDEX
+REINDEX TABLE concur_reindex_tab; -- notice
+NOTICE: table "concur_reindex_tab" has no indexes
+REINDEX TABLE CONCURRENTLY concur_reindex_tab; -- notice
+NOTICE: table "concur_reindex_tab" has no indexes
+ALTER TABLE concur_reindex_tab ADD COLUMN c2 text; -- add toast index
+-- Normal index with integer column
+CREATE UNIQUE INDEX concur_reindex_ind1 ON concur_reindex_tab(c1);
+-- Normal index with text column
+CREATE INDEX concur_reindex_ind2 ON concur_reindex_tab(c2);
+-- UNIQUE index with expression
+CREATE UNIQUE INDEX concur_reindex_ind3 ON concur_reindex_tab(abs(c1));
+-- Duplicate column names
+CREATE INDEX concur_reindex_ind4 ON concur_reindex_tab(c1, c1, c2);
+-- Create table for check on foreign key dependence switch with indexes swapped
+ALTER TABLE concur_reindex_tab ADD PRIMARY KEY USING INDEX concur_reindex_ind1;
+CREATE TABLE concur_reindex_tab2 (c1 int REFERENCES concur_reindex_tab);
+INSERT INTO concur_reindex_tab VALUES (1, 'a');
+INSERT INTO concur_reindex_tab VALUES (2, 'a');
+REINDEX INDEX CONCURRENTLY concur_reindex_ind1;
+REINDEX TABLE CONCURRENTLY concur_reindex_tab;
+-- Check errors
+-- Cannot run inside a transaction block
+BEGIN;
+REINDEX TABLE CONCURRENTLY concur_reindex_tab;
+ERROR: REINDEX CONCURRENTLY cannot run inside a transaction block
+COMMIT;
+REINDEX TABLE CONCURRENTLY pg_database; -- no shared relation
+ERROR: concurrent reindex is not supported for shared relations
+REINDEX SYSTEM CONCURRENTLY postgres; -- not allowed for SYSTEM
+ERROR: cannot reindex system concurrently
+-- Check the relation status, there should not be invalid indexes
+\d concur_reindex_tab
+Table "public.concur_reindex_tab"
+ Column | Type | Modifiers
+--------+---------+-----------
+ c1 | integer | not null
+ c2 | text |
+Indexes:
+ "concur_reindex_ind1" PRIMARY KEY, btree (c1)
+ "concur_reindex_ind3" UNIQUE, btree (abs(c1))
+ "concur_reindex_ind2" btree (c2)
+ "concur_reindex_ind4" btree (c1, c1, c2)
+Referenced by:
+ TABLE "concur_reindex_tab2" CONSTRAINT "concur_reindex_tab2_c1_fkey" FOREIGN KEY (c1) REFERENCES concur_reindex_tab(c1)
+
+DROP TABLE concur_reindex_tab, concur_reindex_tab2;
diff --git a/src/test/regress/sql/create_index.sql b/src/test/regress/sql/create_index.sql
index 914e7a5..a0b2ae2 100644
--- a/src/test/regress/sql/create_index.sql
+++ b/src/test/regress/sql/create_index.sql
@@ -912,3 +912,39 @@ ORDER BY thousand;
SELECT thousand, tenthous FROM tenk1
WHERE thousand < 2 AND tenthous IN (1001,3000)
ORDER BY thousand;
+
+--
+-- Check behavior of REINDEX and REINDEX CONCURRENTLY
+--
+CREATE TABLE concur_reindex_tab (c1 int);
+-- REINDEX
+REINDEX TABLE concur_reindex_tab; -- notice
+REINDEX TABLE CONCURRENTLY concur_reindex_tab; -- notice
+ALTER TABLE concur_reindex_tab ADD COLUMN c2 text; -- add toast index
+-- Normal index with integer column
+CREATE UNIQUE INDEX concur_reindex_ind1 ON concur_reindex_tab(c1);
+-- Normal index with text column
+CREATE INDEX concur_reindex_ind2 ON concur_reindex_tab(c2);
+-- UNIQUE index with expression
+CREATE UNIQUE INDEX concur_reindex_ind3 ON concur_reindex_tab(abs(c1));
+-- Duplicate column names
+CREATE INDEX concur_reindex_ind4 ON concur_reindex_tab(c1, c1, c2);
+-- Create table for check on foreign key dependence switch with indexes swapped
+ALTER TABLE concur_reindex_tab ADD PRIMARY KEY USING INDEX concur_reindex_ind1;
+CREATE TABLE concur_reindex_tab2 (c1 int REFERENCES concur_reindex_tab);
+INSERT INTO concur_reindex_tab VALUES (1, 'a');
+INSERT INTO concur_reindex_tab VALUES (2, 'a');
+REINDEX INDEX CONCURRENTLY concur_reindex_ind1;
+REINDEX TABLE CONCURRENTLY concur_reindex_tab;
+
+-- Check errors
+-- Cannot run inside a transaction block
+BEGIN;
+REINDEX TABLE CONCURRENTLY concur_reindex_tab;
+COMMIT;
+REINDEX TABLE CONCURRENTLY pg_database; -- no shared relation
+REINDEX SYSTEM CONCURRENTLY postgres; -- not allowed for SYSTEM
+
+-- Check the relation status, there should not be invalid indexes
+\d concur_reindex_tab
+DROP TABLE concur_reindex_tab, concur_reindex_tab2;