From 9ef3944f8b6d6d5972a10250e6382b2b4b11d96e Mon Sep 17 00:00:00 2001 From: Ildus K Date: Fri, 14 Apr 2017 14:55:16 +0300 Subject: [PATCH 001/528] Add new files for PartitionUpdate --- Makefile | 2 +- src/hooks.c | 3 ++ src/include/partition_update.h | 0 src/partition_filter.c | 1 - src/partition_update.c | 15 +++++++++ src/planner_tree_modification.c | 58 ++++++++++++++++++++++++++++++++- 6 files changed, 76 insertions(+), 3 deletions(-) create mode 100644 src/include/partition_update.h create mode 100644 src/partition_update.c diff --git a/Makefile b/Makefile index ece73c45..6d7d56a4 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ OBJS = src/init.o src/relation_info.o src/utils.o src/partition_filter.o \ src/hooks.o src/nodes_common.o src/xact_handling.o src/utility_stmt_hooking.o \ src/planner_tree_modification.o src/debug_print.o src/partition_creation.o \ src/compat/pg_compat.o src/compat/relation_tags.o src/compat/expand_rte_hook.o \ - src/compat/rowmarks_fix.o $(WIN32RES) + src/compat/rowmarks_fix.o src/partition_update.o $(WIN32RES) PG_CPPFLAGS = -I$(CURDIR)/src/include diff --git a/src/hooks.c b/src/hooks.c index ae214eeb..b4d5d3a8 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -509,6 +509,9 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) /* Add PartitionFilter node for INSERT queries */ ExecuteForPlanTree(result, add_partition_filters); + /* Add PartitionFilter node for UPDATE queries */ + ExecuteForPlanTree(result, add_partition_update_nodes); + /* Decrement relation tags refcount */ decr_refcount_relation_tags(); diff --git a/src/include/partition_update.h b/src/include/partition_update.h new file mode 100644 index 00000000..e69de29b diff --git a/src/partition_filter.c b/src/partition_filter.c index 8fa09d88..db21c110 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -718,7 +718,6 @@ pfilter_build_tlist(Relation parent_rel, List *tlist) return result_tlist; } - /* * ---------------------------------------------- * Additional init steps for ResultPartsStorage diff --git a/src/partition_update.c b/src/partition_update.c new file mode 100644 index 00000000..b24829c9 --- /dev/null +++ b/src/partition_update.c @@ -0,0 +1,15 @@ + +/* + * -------------------------------- + * PartitionUpdate implementation + * -------------------------------- + */ + +Plan * +make_partition_update(Plan *subplan, Oid parent_relid, + OnConflictAction conflict_action, + List *returning_list) + +{ +} + diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index ff18611d..a475165e 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -339,7 +339,7 @@ handle_modification_query(Query *parse) /* * ------------------------------- - * PartitionFilter-related stuff + * PartitionFilter and PartitionUpdate-related stuff * ------------------------------- */ @@ -351,6 +351,14 @@ add_partition_filters(List *rtable, Plan *plan) plan_tree_walker(plan, partition_filter_visitor, rtable); } +/* Add PartitionUpdate nodes to the plan tree */ +void +add_partition_update_nodes(List *rtable, Plan *plan) +{ + if (pg_pathman_enable_partition_updaters) + plan_tree_walker(plan, partition_update_visitor, rtable); +} + /* * Add partition filters to ModifyTable node's children. * @@ -399,6 +407,54 @@ partition_filter_visitor(Plan *plan, void *context) } +/* + * Add partition updaters to ModifyTable node's children. + * + * 'context' should point to the PlannedStmt->rtable. + */ +static void +partition_update_visitor(Plan *plan, void *context) +{ + List *rtable = (List *) context; + ModifyTable *modify_table = (ModifyTable *) plan; + ListCell *lc1, + *lc2, + *lc3; + + /* Skip if not ModifyTable with 'INSERT' command */ + if (!IsA(modify_table, ModifyTable) || modify_table->operation != CMD_UPDATE) + return; + + Assert(rtable && IsA(rtable, List)); + + lc3 = list_head(modify_table->returningLists); + forboth (lc1, modify_table->plans, lc2, modify_table->resultRelations) + { + Index rindex = lfirst_int(lc2); + Oid relid = getrelid(rindex, rtable); + const PartRelationInfo *prel = get_pathman_relation_info(relid); + + /* Check that table is partitioned */ + if (prel) + { + List *returning_list = NIL; + + /* Extract returning list if possible */ + if (lc3) + { + returning_list = lfirst(lc3); + lc3 = lnext(lc3); + } + + lfirst(lc1) = make_partition_update((Plan *) lfirst(lc1), + relid, + modify_table->onConflictAction, + returning_list); + } + } +} + + /* * ----------------------------------------------- * Parenthood safety checks (SELECT * FROM ONLY) From b5e479d9acdd83b16c291c673b006acdd9adf9bd Mon Sep 17 00:00:00 2001 From: Ildus K Date: Tue, 18 Apr 2017 14:04:21 +0300 Subject: [PATCH 002/528] Add base methods for PartitionUpdate --- src/include/partition_update.h | 55 +++++++++ src/include/planner_tree_modification.h | 1 + src/partition_filter.c | 35 +----- src/partition_update.c | 145 ++++++++++++++++++++++-- src/planner_tree_modification.c | 13 +-- 5 files changed, 206 insertions(+), 43 deletions(-) diff --git a/src/include/partition_update.h b/src/include/partition_update.h index e69de29b..b9607c5c 100644 --- a/src/include/partition_update.h +++ b/src/include/partition_update.h @@ -0,0 +1,55 @@ +/* ------------------------------------------------------------------------ + * + * partition_update.h + * Insert row to right partition in UPDATE operation + * + * Copyright (c) 2017, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + +#ifndef PARTITION_UPDATE_H +#define PARTITION_UPDATE_H + +#include "relation_info.h" +#include "utils.h" + +#include "postgres.h" +#include "commands/explain.h" +#include "optimizer/planner.h" + +#if PG_VERSION_NUM >= 90600 +#include "nodes/extensible.h" +#endif + +typedef struct PartitionUpdateState +{ + CustomScanState css; + + Oid partitioned_table; + List *returning_list; + ModifyTableState *parent_state; + Plan *subplan; /* proxy variable to store subplan */ +} PartitionUpdateState; + +extern bool pg_pathman_enable_partition_update; + +extern CustomScanMethods partition_update_plan_methods; +extern CustomExecMethods partition_update_exec_methods; + +void init_partition_update_static_data(void); +Node *partition_update_create_scan_state(CustomScan *node); + +void partition_update_begin(CustomScanState *node, EState *estate, int eflags); +void partition_update_end(CustomScanState *node); +void partition_update_rescan(CustomScanState *node); +void partition_update_explain(CustomScanState *node, List *ancestors, + ExplainState *es); + +TupleTableSlot *partition_update_exec(CustomScanState *node); + +Plan *make_partition_update(Plan *subplan, + Oid parent_relid, + List *returning_list); + +#endif /* PARTITION_UPDATE_H */ diff --git a/src/include/planner_tree_modification.h b/src/include/planner_tree_modification.h index 17e17fb4..8b4a480f 100644 --- a/src/include/planner_tree_modification.h +++ b/src/include/planner_tree_modification.h @@ -34,6 +34,7 @@ void pathman_transform_query(Query *parse); /* These functions scribble on Plan tree */ void add_partition_filters(List *rtable, Plan *plan); +void add_partition_update_nodes(List *rtable, Plan *plan); /* used by assign_rel_parenthood_status() etc */ diff --git a/src/partition_filter.c b/src/partition_filter.c index db21c110..d7657c2b 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -677,35 +677,12 @@ pfilter_build_tlist(Relation parent_rel, List *tlist) Expr *col_expr; Form_pg_attribute attr; - /* Make sure that this attribute exists */ - if (i > RelationGetDescr(parent_rel)->natts) - elog(ERROR, "error in function " CppAsString(pfilter_build_tlist)); - - /* Fetch pg_attribute entry for this column */ - attr = RelationGetDescr(parent_rel)->attrs[i - 1]; - - /* If this column is dropped, create a placeholder Const */ - if (attr->attisdropped) - { - /* Insert NULL for dropped column */ - col_expr = (Expr *) makeConst(INT4OID, - -1, - InvalidOid, - sizeof(int32), - (Datum) 0, - true, - true); - } - /* Otherwise we should create a Var referencing subplan's output */ - else - { - col_expr = (Expr *) makeVar(INDEX_VAR, /* point to subplan's elements */ - i, /* direct attribute mapping */ - exprType((Node *) tle->expr), - exprTypmod((Node *) tle->expr), - exprCollation((Node *) tle->expr), - 0); - } + col_expr = (Expr *) makeVar(INDEX_VAR, /* point to subplan's elements */ + i, /* direct attribute mapping */ + exprType((Node *) tle->expr), + exprTypmod((Node *) tle->expr), + exprCollation((Node *) tle->expr), + 0); result_tlist = lappend(result_tlist, makeTargetEntry(col_expr, diff --git a/src/partition_update.c b/src/partition_update.c index b24829c9..01ecd940 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -1,15 +1,146 @@ - -/* - * -------------------------------- - * PartitionUpdate implementation - * -------------------------------- +/* ------------------------------------------------------------------------ + * + * partition_update.c + * Insert row to right partition in UPDATE operation + * + * Copyright (c) 2017, Postgres Professional + * + * ------------------------------------------------------------------------ */ +#include "partition_filter.h" +#include "partition_update.h" + +#include "utils/guc.h" + +bool pg_pathman_enable_partition_update = true; + +CustomScanMethods partition_update_plan_methods; +CustomExecMethods partition_update_exec_methods; + + +void +init_partition_update_static_data(void) +{ + partition_update_plan_methods.CustomName = "PartitionUpdate"; + partition_update_plan_methods.CreateCustomScanState = partition_update_create_scan_state; + + partition_update_exec_methods.CustomName = "PartitionUpdate"; + partition_update_exec_methods.BeginCustomScan = partition_update_begin; + partition_update_exec_methods.ExecCustomScan = partition_update_exec; + partition_update_exec_methods.EndCustomScan = partition_update_end; + partition_update_exec_methods.ReScanCustomScan = partition_update_rescan; + partition_update_exec_methods.MarkPosCustomScan = NULL; + partition_update_exec_methods.RestrPosCustomScan = NULL; + partition_update_exec_methods.ExplainCustomScan = partition_update_explain; + + DefineCustomBoolVariable("pg_pathman.enable_partitionupdate", + "Enables the planner's use of PartitionUpdate custom node.", + NULL, + &pg_pathman_enable_partition_update, + true, + PGC_USERSET, + 0, + NULL, + NULL, + NULL); +} + + Plan * -make_partition_update(Plan *subplan, Oid parent_relid, - OnConflictAction conflict_action, +make_partition_update(Plan *subplan, + Oid parent_relid, List *returning_list) { + Plan *pfilter; + CustomScan *cscan = makeNode(CustomScan); + + /* Copy costs etc */ + cscan->scan.plan.startup_cost = subplan->startup_cost; + cscan->scan.plan.total_cost = subplan->total_cost; + cscan->scan.plan.plan_rows = subplan->plan_rows; + cscan->scan.plan.plan_width = subplan->plan_width; + + /* Setup methods and child plan */ + cscan->methods = &partition_update_plan_methods; + pfilter = make_partition_filter(subplan, parent_relid, ONCONFLICT_NONE, + returning_list); + cscan->custom_plans = list_make1(pfilter); + + /* No physical relation will be scanned */ + cscan->scan.scanrelid = 0; + cscan->custom_scan_tlist = subplan->targetlist; + cscan->custom_private = NULL; + + return &cscan->scan.plan; } +Node * +partition_update_create_scan_state(CustomScan *node) +{ + PartitionUpdateState *state; + + state = (PartitionUpdateState *) palloc0(sizeof(PartitionUpdateState)); + NodeSetTag(state, T_CustomScanState); + + state->css.flags = node->flags; + state->css.methods = &partition_update_exec_methods; + + /* Extract necessary variables */ + state->subplan = (Plan *) linitial(node->custom_plans); + return (Node *) state; +} + +void +partition_update_begin(CustomScanState *node, EState *estate, int eflags) +{ + PartitionUpdateState *state = (PartitionUpdateState *) node; + + /* Initialize PartitionFilter child node */ + node->custom_ps = list_make1(ExecInitNode(state->subplan, estate, eflags)); +} + +TupleTableSlot * +partition_update_exec(CustomScanState *node) +{ + PartitionFilterState *state = (PartitionFilterState *) node; + PlanState *child_ps = (PlanState *) linitial(node->custom_ps); + EState *estate = node->ss.ps.state; + TupleTableSlot *slot; + ResultRelInfo *saved_rel_info; + + /* save original ResultRelInfo */ + saved_rel_info = estate->es_result_relation_info; + + slot = ExecProcNode(child_ps); + if (!TupIsNull(slot)) + { + /* we got the slot that can be inserted to child partition */ + return slot; + } + + return NULL; +} + +void +partition_update_end(CustomScanState *node) +{ + PartitionUpdateState *state = (PartitionUpdateState *) node; + + Assert(list_length(node->custom_ps) == 1); + ExecEndNode((PlanState *) linitial(node->custom_ps)); +} + +void +partition_update_rescan(CustomScanState *node) +{ + Assert(list_length(node->custom_ps) == 1); + ExecReScan((PlanState *) linitial(node->custom_ps)); +} + +void +partition_update_explain(CustomScanState *node, List *ancestors, ExplainState *es) +{ + /* Nothing to do here now */ +} diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index a475165e..5f6a9b4a 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -14,6 +14,7 @@ #include "nodes_common.h" #include "partition_filter.h" +#include "partition_update.h" #include "planner_tree_modification.h" #include "miscadmin.h" @@ -32,6 +33,7 @@ static void disable_standard_inheritance(Query *parse); static void handle_modification_query(Query *parse); static void partition_filter_visitor(Plan *plan, void *context); +static void partition_update_visitor(Plan *plan, void *context); static rel_parenthood_status tag_extract_parenthood_status(List *relation_tag); @@ -249,9 +251,7 @@ handle_modification_query(Query *parse) result_rel = parse->resultRelation; /* Exit if it's not a DELETE or UPDATE query */ - if (result_rel == 0 || - (parse->commandType != CMD_UPDATE && - parse->commandType != CMD_DELETE)) + if (result_rel == 0 || parse->commandType != CMD_DELETE) return; rte = rt_fetch(result_rel, parse->rtable); @@ -355,7 +355,7 @@ add_partition_filters(List *rtable, Plan *plan) void add_partition_update_nodes(List *rtable, Plan *plan) { - if (pg_pathman_enable_partition_updaters) + if (pg_pathman_enable_partition_update) plan_tree_walker(plan, partition_update_visitor, rtable); } @@ -408,7 +408,7 @@ partition_filter_visitor(Plan *plan, void *context) /* - * Add partition updaters to ModifyTable node's children. + * Add partition update to ModifyTable node's children. * * 'context' should point to the PlannedStmt->rtable. */ @@ -421,7 +421,7 @@ partition_update_visitor(Plan *plan, void *context) *lc2, *lc3; - /* Skip if not ModifyTable with 'INSERT' command */ + /* Skip if not ModifyTable with 'UPDATE' command */ if (!IsA(modify_table, ModifyTable) || modify_table->operation != CMD_UPDATE) return; @@ -448,7 +448,6 @@ partition_update_visitor(Plan *plan, void *context) lfirst(lc1) = make_partition_update((Plan *) lfirst(lc1), relid, - modify_table->onConflictAction, returning_list); } } From f684b8d059d56674fb1c7649ed1b23a708928f03 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 19 Apr 2017 16:21:24 +0300 Subject: [PATCH 003/528] Try another targetlist generation --- src/partition_filter.c | 28 +++++++++++++--------------- src/partition_update.c | 1 + src/pg_pathman.c | 2 ++ 3 files changed, 16 insertions(+), 15 deletions(-) diff --git a/src/partition_filter.c b/src/partition_filter.c index d7657c2b..802bae4d 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -669,27 +669,25 @@ pfilter_build_tlist(Relation parent_rel, List *tlist) { List *result_tlist = NIL; ListCell *lc; - int i = 1; foreach (lc, tlist) { TargetEntry *tle = (TargetEntry *) lfirst(lc); - Expr *col_expr; + TargetEntry *newtle; Form_pg_attribute attr; - col_expr = (Expr *) makeVar(INDEX_VAR, /* point to subplan's elements */ - i, /* direct attribute mapping */ - exprType((Node *) tle->expr), - exprTypmod((Node *) tle->expr), - exprCollation((Node *) tle->expr), - 0); - - result_tlist = lappend(result_tlist, - makeTargetEntry(col_expr, - i, - NULL, - tle->resjunk)); - i++; /* next resno */ + if (tle->expr != NULL && IsA(tle->expr, Var)) + { + Var *var = (Var *) palloc(sizeof(Var)); + *var = *((Var *)(tle->expr)); + var->varno = INDEX_VAR; + newtle = makeTargetEntry((Expr *) var, tle->resno, tle->resname, + tle->resjunk); + } + else + newtle = copyObject(tle); + + result_tlist = lappend(result_tlist, newtle); } return result_tlist; diff --git a/src/partition_update.c b/src/partition_update.c index 01ecd940..cd8cd73b 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -67,6 +67,7 @@ make_partition_update(Plan *subplan, pfilter = make_partition_filter(subplan, parent_relid, ONCONFLICT_NONE, returning_list); cscan->custom_plans = list_make1(pfilter); + cscan->scan.plan.targetlist = pfilter->targetlist; /* No physical relation will be scanned */ cscan->scan.scanrelid = 0; diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 8c99b75a..7061fe9f 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -16,6 +16,7 @@ #include "hooks.h" #include "pathman.h" #include "partition_filter.h" +#include "partition_update.h" #include "planner_tree_modification.h" #include "runtimeappend.h" #include "runtime_merge_append.h" @@ -164,6 +165,7 @@ _PG_init(void) init_runtimeappend_static_data(); init_runtime_merge_append_static_data(); init_partition_filter_static_data(); + init_partition_update_static_data(); } /* Get cached PATHMAN_CONFIG relation Oid */ From 4d692b78444c018a47b04dbcb85c7f2b1c9e257a Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 19 Apr 2017 19:18:01 +0300 Subject: [PATCH 004/528] Fix plan on child partition --- src/hooks.c | 2 +- src/planner_tree_modification.c | 18 +++++++++++++++++- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index b4d5d3a8..00e8ff37 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -509,7 +509,7 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) /* Add PartitionFilter node for INSERT queries */ ExecuteForPlanTree(result, add_partition_filters); - /* Add PartitionFilter node for UPDATE queries */ + /* Add PartitionUpdate node for UPDATE queries */ ExecuteForPlanTree(result, add_partition_update_nodes); /* Decrement relation tags refcount */ diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 5f6a9b4a..ee92605d 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -16,6 +16,7 @@ #include "partition_filter.h" #include "partition_update.h" #include "planner_tree_modification.h" +#include "relation_info.h" #include "miscadmin.h" #include "optimizer/clauses.h" @@ -251,7 +252,8 @@ handle_modification_query(Query *parse) result_rel = parse->resultRelation; /* Exit if it's not a DELETE or UPDATE query */ - if (result_rel == 0 || parse->commandType != CMD_DELETE) + if (result_rel == 0 || (parse->commandType != CMD_UPDATE && + parse->commandType != CMD_DELETE)) return; rte = rt_fetch(result_rel, parse->rtable); @@ -430,10 +432,24 @@ partition_update_visitor(Plan *plan, void *context) lc3 = list_head(modify_table->returningLists); forboth (lc1, modify_table->plans, lc2, modify_table->resultRelations) { + Oid parent_relid; Index rindex = lfirst_int(lc2); Oid relid = getrelid(rindex, rtable); const PartRelationInfo *prel = get_pathman_relation_info(relid); + /* query can be changed earlier to point on child partition, + * so we're possibly now looking at query that updates child partition + */ + if (prel == NULL) + { + parent_relid = get_parent_of_partition(relid, NULL); + if (parent_relid) + { + prel = get_pathman_relation_info(parent_relid); + relid = parent_relid; + } + } + /* Check that table is partitioned */ if (prel) { From 0bd4bc0134ee0581f61164eafffa9214b244872f Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 20 Apr 2017 12:14:06 +0300 Subject: [PATCH 005/528] Fix target list generation for INSERTs --- src/partition_filter.c | 13 +++++++++++-- src/partition_update.c | 4 ++-- src/planner_tree_modification.c | 2 ++ 3 files changed, 15 insertions(+), 4 deletions(-) diff --git a/src/partition_filter.c b/src/partition_filter.c index 802bae4d..2f88ac09 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -674,7 +674,6 @@ pfilter_build_tlist(Relation parent_rel, List *tlist) { TargetEntry *tle = (TargetEntry *) lfirst(lc); TargetEntry *newtle; - Form_pg_attribute attr; if (tle->expr != NULL && IsA(tle->expr, Var)) { @@ -685,7 +684,17 @@ pfilter_build_tlist(Relation parent_rel, List *tlist) tle->resjunk); } else - newtle = copyObject(tle); + { + Var *var = makeVar(INDEX_VAR, /* point to subplan's elements */ + tle->resno, + exprType((Node *) tle->expr), + exprTypmod((Node *) tle->expr), + exprCollation((Node *) tle->expr), + 0); + + newtle = makeTargetEntry((Expr *) var, tle->resno, tle->resname, + tle->resjunk); + } result_tlist = lappend(result_tlist, newtle); } diff --git a/src/partition_update.c b/src/partition_update.c index cd8cd73b..314ce7d4 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -22,10 +22,10 @@ CustomExecMethods partition_update_exec_methods; void init_partition_update_static_data(void) { - partition_update_plan_methods.CustomName = "PartitionUpdate"; + partition_update_plan_methods.CustomName = "PrepareInsert"; partition_update_plan_methods.CreateCustomScanState = partition_update_create_scan_state; - partition_update_exec_methods.CustomName = "PartitionUpdate"; + partition_update_exec_methods.CustomName = "PrepareInsert"; partition_update_exec_methods.BeginCustomScan = partition_update_begin; partition_update_exec_methods.ExecCustomScan = partition_update_exec; partition_update_exec_methods.EndCustomScan = partition_update_end; diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index ee92605d..d4558c4b 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -455,6 +455,8 @@ partition_update_visitor(Plan *plan, void *context) { List *returning_list = NIL; + modify_table->operation = CMD_INSERT; + /* Extract returning list if possible */ if (lc3) { From 9811d4c5b9553f37a11ba24cae31aefbeb6c6500 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 20 Apr 2017 18:49:24 +0300 Subject: [PATCH 006/528] Add DELETE support before INSERT --- src/partition_update.c | 127 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 124 insertions(+), 3 deletions(-) diff --git a/src/partition_update.c b/src/partition_update.c index 314ce7d4..200badbb 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -4,6 +4,8 @@ * Insert row to right partition in UPDATE operation * * Copyright (c) 2017, Postgres Professional + * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California * * ------------------------------------------------------------------------ */ @@ -11,6 +13,8 @@ #include "partition_filter.h" #include "partition_update.h" +#include "access/xact.h" +#include "executor/nodeModifyTable.h" #include "utils/guc.h" bool pg_pathman_enable_partition_update = true; @@ -18,6 +22,8 @@ bool pg_pathman_enable_partition_update = true; CustomScanMethods partition_update_plan_methods; CustomExecMethods partition_update_exec_methods; +static TupleTableSlot *ExecDeleteInternal(ItemPointer tupleid, HeapTuple oldtuple, + EPQState *epqstate, EState *estate); void init_partition_update_static_data(void) @@ -105,7 +111,6 @@ partition_update_begin(CustomScanState *node, EState *estate, int eflags) TupleTableSlot * partition_update_exec(CustomScanState *node) { - PartitionFilterState *state = (PartitionFilterState *) node; PlanState *child_ps = (PlanState *) linitial(node->custom_ps); EState *estate = node->ss.ps.state; TupleTableSlot *slot; @@ -114,9 +119,50 @@ partition_update_exec(CustomScanState *node) /* save original ResultRelInfo */ saved_rel_info = estate->es_result_relation_info; + /* execute PartitionFilter child node */ slot = ExecProcNode(child_ps); + if (!TupIsNull(slot)) { + char relkind; + Datum datum; + bool isNull; + ResultRelInfo *resultRelInfo; + HeapTuple oldtuple; + ItemPointer tupleid; + ItemPointerData tuple_ctid; + JunkFilter *junkfilter; + EPQState epqstate; + AttrNumber ctid_attno; + + resultRelInfo = estate->es_result_relation_info; + junkfilter = resultRelInfo->ri_junkFilter; + Assert(junkfilter != NULL); + + EvalPlanQualSetSlot(&epqstate, slot); + oldtuple = NULL; + + /* + * extract the 'ctid' junk attribute. + */ + relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind; + Assert(relkind == RELKIND_RELATION); + ctid_attno = ExecFindJunkAttribute(junkfilter, "ctid"); + datum = ExecGetJunkAttribute(slot, ctid_attno, &isNull); + /* shouldn't ever get a null result... */ + if (isNull) + elog(ERROR, "ctid is NULL"); + + tupleid = (ItemPointer) DatumGetPointer(datum); + tuple_ctid = *tupleid; /* be sure we don't free + * ctid!! */ + tupleid = &tuple_ctid; + + /* delete old tuple */ + estate->es_result_relation_info = saved_rel_info; + ExecDeleteInternal(tupleid, oldtuple, &epqstate, estate); + estate->es_result_relation_info = resultRelInfo; + /* we got the slot that can be inserted to child partition */ return slot; } @@ -127,8 +173,6 @@ partition_update_exec(CustomScanState *node) void partition_update_end(CustomScanState *node) { - PartitionUpdateState *state = (PartitionUpdateState *) node; - Assert(list_length(node->custom_ps) == 1); ExecEndNode((PlanState *) linitial(node->custom_ps)); } @@ -145,3 +189,80 @@ partition_update_explain(CustomScanState *node, List *ancestors, ExplainState *e { /* Nothing to do here now */ } + + +/* ---------------------------------------------------------------- + * ExecDeleteInternal + * Basicly copy of ExecDelete from executor/nodeModifyTable.c + * ---------------------------------------------------------------- + */ +static TupleTableSlot * +ExecDeleteInternal(ItemPointer tupleid, + HeapTuple oldtuple, + EPQState *epqstate, + EState *estate) +{ + ResultRelInfo *resultRelInfo; + Relation resultRelationDesc; + HTSU_Result result; + HeapUpdateFailureData hufd; + + /* + * get information on the (current) result relation + */ + resultRelInfo = estate->es_result_relation_info; + resultRelationDesc = resultRelInfo->ri_RelationDesc; + +ldelete:; + result = heap_delete(resultRelationDesc, tupleid, + estate->es_output_cid, + estate->es_crosscheck_snapshot, + true /* wait for commit */ , + &hufd); + switch (result) + { + case HeapTupleSelfUpdated: + if (hufd.cmax != estate->es_output_cid) + ereport(ERROR, + (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION), + errmsg("tuple to be updated was already modified by an operation triggered by the current command"), + errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows."))); + + /* Else, already deleted by self; nothing to do */ + return NULL; + + case HeapTupleMayBeUpdated: + break; + + case HeapTupleUpdated: + if (IsolationUsesXactSnapshot()) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("could not serialize access due to concurrent update"))); + if (!ItemPointerEquals(tupleid, &hufd.ctid)) + { + TupleTableSlot *epqslot; + + epqslot = EvalPlanQual(estate, + epqstate, + resultRelationDesc, + resultRelInfo->ri_RangeTableIndex, + LockTupleExclusive, + &hufd.ctid, + hufd.xmax); + if (!TupIsNull(epqslot)) + { + *tupleid = hufd.ctid; + goto ldelete; + } + } + /* tuple already deleted; nothing to do */ + return NULL; + + default: + elog(ERROR, "unrecognized heap_delete status: %u", result); + return NULL; + } + + return NULL; +} From 5a1eb53d107e62f1f3c890866fade1449edfa61d Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 24 Apr 2017 17:05:48 +0300 Subject: [PATCH 007/528] Fix inserts --- src/partition_update.c | 27 +++++++++++---------------- 1 file changed, 11 insertions(+), 16 deletions(-) diff --git a/src/partition_update.c b/src/partition_update.c index 200badbb..a2e3cb24 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -22,8 +22,7 @@ bool pg_pathman_enable_partition_update = true; CustomScanMethods partition_update_plan_methods; CustomExecMethods partition_update_exec_methods; -static TupleTableSlot *ExecDeleteInternal(ItemPointer tupleid, HeapTuple oldtuple, - EPQState *epqstate, EState *estate); +static TupleTableSlot *ExecDeleteInternal(ItemPointer tupleid, EPQState *epqstate, EState *estate); void init_partition_update_static_data(void) @@ -44,7 +43,7 @@ init_partition_update_static_data(void) "Enables the planner's use of PartitionUpdate custom node.", NULL, &pg_pathman_enable_partition_update, - true, + false, PGC_USERSET, 0, NULL, @@ -111,13 +110,8 @@ partition_update_begin(CustomScanState *node, EState *estate, int eflags) TupleTableSlot * partition_update_exec(CustomScanState *node) { - PlanState *child_ps = (PlanState *) linitial(node->custom_ps); - EState *estate = node->ss.ps.state; - TupleTableSlot *slot; - ResultRelInfo *saved_rel_info; - - /* save original ResultRelInfo */ - saved_rel_info = estate->es_result_relation_info; + PlanState *child_ps = (PlanState *) linitial(node->custom_ps); + TupleTableSlot *slot; /* execute PartitionFilter child node */ slot = ExecProcNode(child_ps); @@ -128,19 +122,21 @@ partition_update_exec(CustomScanState *node) Datum datum; bool isNull; ResultRelInfo *resultRelInfo; - HeapTuple oldtuple; ItemPointer tupleid; ItemPointerData tuple_ctid; JunkFilter *junkfilter; EPQState epqstate; AttrNumber ctid_attno; + PartitionFilterState *child_state = (PartitionFilterState *) child_ps; + EState *estate = node->ss.ps.state; + + resultRelInfo = estate->es_result_relation_info; junkfilter = resultRelInfo->ri_junkFilter; Assert(junkfilter != NULL); EvalPlanQualSetSlot(&epqstate, slot); - oldtuple = NULL; /* * extract the 'ctid' junk attribute. @@ -159,11 +155,11 @@ partition_update_exec(CustomScanState *node) tupleid = &tuple_ctid; /* delete old tuple */ - estate->es_result_relation_info = saved_rel_info; - ExecDeleteInternal(tupleid, oldtuple, &epqstate, estate); + estate->es_result_relation_info = child_state->result_parts.saved_rel_info; + ExecDeleteInternal(tupleid, &epqstate, estate); estate->es_result_relation_info = resultRelInfo; - /* we got the slot that can be inserted to child partition */ + /* we've got the slot that can be inserted to child partition */ return slot; } @@ -198,7 +194,6 @@ partition_update_explain(CustomScanState *node, List *ancestors, ExplainState *e */ static TupleTableSlot * ExecDeleteInternal(ItemPointer tupleid, - HeapTuple oldtuple, EPQState *epqstate, EState *estate) { From 1999403fa5de6926889a7da6637b9d657c0bf8d5 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 24 Apr 2017 18:47:12 +0300 Subject: [PATCH 008/528] Fix target list for INSERTs on creation stage --- src/partition_filter.c | 55 ++++++++++++++++++++++++------------------ 1 file changed, 32 insertions(+), 23 deletions(-) diff --git a/src/partition_filter.c b/src/partition_filter.c index 2f88ac09..2f491830 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -86,7 +86,7 @@ static Node *fix_returning_list_mutator(Node *node, void *state); static Index append_rte_to_estate(EState *estate, RangeTblEntry *rte); static int append_rri_to_estate(EState *estate, ResultRelInfo *rri); -static List * pfilter_build_tlist(Relation parent_rel, List *tlist); +static List * pfilter_build_tlist(Relation parent_rel, Plan *subplan); static void pf_memcxt_callback(void *arg); static estate_mod_data * fetch_estate_mod_data(EState *estate); @@ -486,7 +486,7 @@ make_partition_filter(Plan *subplan, Oid parent_relid, /* Build an appropriate target list using a cached Relation entry */ parent_rel = RelationIdGetRelation(parent_relid); - cscan->scan.plan.targetlist = pfilter_build_tlist(parent_rel, subplan->targetlist); + cscan->scan.plan.targetlist = pfilter_build_tlist(parent_rel, subplan); RelationClose(parent_rel); /* No physical relation will be scanned */ @@ -665,35 +665,44 @@ partition_filter_explain(CustomScanState *node, List *ancestors, ExplainState *e * Build partition filter's target list pointing to subplan tuple's elements. */ static List * -pfilter_build_tlist(Relation parent_rel, List *tlist) +pfilter_build_tlist(Relation parent_rel, Plan *subplan) { List *result_tlist = NIL; ListCell *lc; - foreach (lc, tlist) + foreach (lc, subplan->targetlist) { - TargetEntry *tle = (TargetEntry *) lfirst(lc); - TargetEntry *newtle; + TargetEntry *tle = (TargetEntry *) lfirst(lc), + *newtle = NULL; + + if (IsA(tle->expr, Const)) + newtle = makeTargetEntry(copyObject(tle->expr), tle->resno, tle->resname, + tle->resjunk); - if (tle->expr != NULL && IsA(tle->expr, Var)) - { - Var *var = (Var *) palloc(sizeof(Var)); - *var = *((Var *)(tle->expr)); - var->varno = INDEX_VAR; - newtle = makeTargetEntry((Expr *) var, tle->resno, tle->resname, - tle->resjunk); - } else { - Var *var = makeVar(INDEX_VAR, /* point to subplan's elements */ - tle->resno, - exprType((Node *) tle->expr), - exprTypmod((Node *) tle->expr), - exprCollation((Node *) tle->expr), - 0); - - newtle = makeTargetEntry((Expr *) var, tle->resno, tle->resname, - tle->resjunk); + if (tle->expr != NULL && IsA(tle->expr, Var)) + { + Var *var = (Var *) palloc(sizeof(Var)); + *var = *((Var *)(tle->expr)); + var->varno = INDEX_VAR; + var->varattno = tle->resno; + + newtle = makeTargetEntry((Expr *) var, tle->resno, tle->resname, + tle->resjunk); + } + else + { + Var *var = makeVar(INDEX_VAR, /* point to subplan's elements */ + tle->resno, + exprType((Node *) tle->expr), + exprTypmod((Node *) tle->expr), + exprCollation((Node *) tle->expr), + 0); + + newtle = makeTargetEntry((Expr *) var, tle->resno, tle->resname, + tle->resjunk); + } } result_tlist = lappend(result_tlist, newtle); From c2bb1d8bf3adc047db8f91d3458277d462892ea2 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 24 Apr 2017 18:59:16 +0300 Subject: [PATCH 009/528] Fix clang warning --- src/partition_update.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/partition_update.c b/src/partition_update.c index a2e3cb24..842e1287 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -118,7 +118,6 @@ partition_update_exec(CustomScanState *node) if (!TupIsNull(slot)) { - char relkind; Datum datum; bool isNull; ResultRelInfo *resultRelInfo; @@ -131,7 +130,6 @@ partition_update_exec(CustomScanState *node) PartitionFilterState *child_state = (PartitionFilterState *) child_ps; EState *estate = node->ss.ps.state; - resultRelInfo = estate->es_result_relation_info; junkfilter = resultRelInfo->ri_junkFilter; Assert(junkfilter != NULL); @@ -141,8 +139,7 @@ partition_update_exec(CustomScanState *node) /* * extract the 'ctid' junk attribute. */ - relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind; - Assert(relkind == RELKIND_RELATION); + Assert(resultRelInfo->ri_RelationDesc->rd_rel->relkind == RELKIND_RELATION); ctid_attno = ExecFindJunkAttribute(junkfilter, "ctid"); datum = ExecGetJunkAttribute(slot, ctid_attno, &isNull); /* shouldn't ever get a null result... */ From 76da280eb18cf89ee23fce863ab2d9a062cbd76f Mon Sep 17 00:00:00 2001 From: Ildus K Date: Tue, 25 Apr 2017 13:28:06 +0300 Subject: [PATCH 010/528] Add delete support in FDW tables --- src/partition_update.c | 211 ++++++++++++++++++++++++++++------------- 1 file changed, 147 insertions(+), 64 deletions(-) diff --git a/src/partition_update.c b/src/partition_update.c index 842e1287..fab7f6f2 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -14,7 +14,10 @@ #include "partition_update.h" #include "access/xact.h" +#include "access/htup_details.h" +#include "commands/trigger.h" #include "executor/nodeModifyTable.h" +#include "foreign/fdwapi.h" #include "utils/guc.h" bool pg_pathman_enable_partition_update = true; @@ -22,7 +25,11 @@ bool pg_pathman_enable_partition_update = true; CustomScanMethods partition_update_plan_methods; CustomExecMethods partition_update_exec_methods; -static TupleTableSlot *ExecDeleteInternal(ItemPointer tupleid, EPQState *epqstate, EState *estate); +static TupleTableSlot *ExecDeleteInternal(ItemPointer tupleid, + HeapTuple oldtuple, + TupleTableSlot *planSlot, + EPQState *epqstate, + EState *estate); void init_partition_update_static_data(void) @@ -120,12 +127,15 @@ partition_update_exec(CustomScanState *node) { Datum datum; bool isNull; + char relkind; ResultRelInfo *resultRelInfo; - ItemPointer tupleid; + ItemPointer tupleid = NULL; ItemPointerData tuple_ctid; JunkFilter *junkfilter; EPQState epqstate; AttrNumber ctid_attno; + HeapTupleData oldtupdata; + HeapTuple oldtuple; PartitionFilterState *child_state = (PartitionFilterState *) child_ps; EState *estate = node->ss.ps.state; @@ -136,24 +146,59 @@ partition_update_exec(CustomScanState *node) EvalPlanQualSetSlot(&epqstate, slot); - /* - * extract the 'ctid' junk attribute. - */ - Assert(resultRelInfo->ri_RelationDesc->rd_rel->relkind == RELKIND_RELATION); - ctid_attno = ExecFindJunkAttribute(junkfilter, "ctid"); - datum = ExecGetJunkAttribute(slot, ctid_attno, &isNull); - /* shouldn't ever get a null result... */ - if (isNull) - elog(ERROR, "ctid is NULL"); - - tupleid = (ItemPointer) DatumGetPointer(datum); - tuple_ctid = *tupleid; /* be sure we don't free - * ctid!! */ - tupleid = &tuple_ctid; + oldtuple = NULL; + relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind; + if (relkind == RELKIND_RELATION) + { + /* + * extract the 'ctid' junk attribute. + */ + Assert(resultRelInfo->ri_RelationDesc->rd_rel->relkind == RELKIND_RELATION); + ctid_attno = ExecFindJunkAttribute(junkfilter, "ctid"); + datum = ExecGetJunkAttribute(slot, ctid_attno, &isNull); + /* shouldn't ever get a null result... */ + if (isNull) + elog(ERROR, "ctid is NULL"); + + tupleid = (ItemPointer) DatumGetPointer(datum); + tuple_ctid = *tupleid; /* be sure we don't free + * ctid!! */ + tupleid = &tuple_ctid; + } + else if (relkind == RELKIND_FOREIGN_TABLE) + { + if (AttributeNumberIsValid(junkfilter->jf_junkAttNo)) + { + datum = ExecGetJunkAttribute(slot, + junkfilter->jf_junkAttNo, + &isNull); + /* shouldn't ever get a null result... */ + if (isNull) + elog(ERROR, "wholerow is NULL"); + + oldtupdata.t_data = DatumGetHeapTupleHeader(datum); + oldtupdata.t_len = + HeapTupleHeaderGetDatumLength(oldtupdata.t_data); + ItemPointerSetInvalid(&(oldtupdata.t_self)); + + /* Historically, view triggers see invalid t_tableOid. */ + oldtupdata.t_tableOid =RelationGetRelid(resultRelInfo->ri_RelationDesc); + + oldtuple = &oldtupdata; + } + } + else + elog(ERROR, "PartitionUpdate supports only relations and foreign tables"); /* delete old tuple */ estate->es_result_relation_info = child_state->result_parts.saved_rel_info; - ExecDeleteInternal(tupleid, &epqstate, estate); + + /* + * We have two cases here: + * normal relations - tupleid points to actual tuple + * foreign tables - tupleid is invalid, slot is required + */ + ExecDeleteInternal(tupleid, oldtuple, slot, &epqstate, estate); estate->es_result_relation_info = resultRelInfo; /* we've got the slot that can be inserted to child partition */ @@ -191,8 +236,10 @@ partition_update_explain(CustomScanState *node, List *ancestors, ExplainState *e */ static TupleTableSlot * ExecDeleteInternal(ItemPointer tupleid, - EPQState *epqstate, - EState *estate) + HeapTuple oldtuple, + TupleTableSlot *planSlot, + EPQState *epqstate, + EState *estate) { ResultRelInfo *resultRelInfo; Relation resultRelationDesc; @@ -205,56 +252,92 @@ ExecDeleteInternal(ItemPointer tupleid, resultRelInfo = estate->es_result_relation_info; resultRelationDesc = resultRelInfo->ri_RelationDesc; -ldelete:; - result = heap_delete(resultRelationDesc, tupleid, - estate->es_output_cid, - estate->es_crosscheck_snapshot, - true /* wait for commit */ , - &hufd); - switch (result) + /* BEFORE ROW DELETE Triggers */ + if (resultRelInfo->ri_TrigDesc && + resultRelInfo->ri_TrigDesc->trig_delete_before_row) { - case HeapTupleSelfUpdated: - if (hufd.cmax != estate->es_output_cid) - ereport(ERROR, - (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION), - errmsg("tuple to be updated was already modified by an operation triggered by the current command"), - errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows."))); - - /* Else, already deleted by self; nothing to do */ - return NULL; - - case HeapTupleMayBeUpdated: - break; - - case HeapTupleUpdated: - if (IsolationUsesXactSnapshot()) - ereport(ERROR, - (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), - errmsg("could not serialize access due to concurrent update"))); - if (!ItemPointerEquals(tupleid, &hufd.ctid)) - { - TupleTableSlot *epqslot; - - epqslot = EvalPlanQual(estate, - epqstate, - resultRelationDesc, - resultRelInfo->ri_RangeTableIndex, - LockTupleExclusive, - &hufd.ctid, - hufd.xmax); - if (!TupIsNull(epqslot)) + bool dodelete; + + dodelete = ExecBRDeleteTriggers(estate, epqstate, resultRelInfo, + tupleid, oldtuple); + + if (!dodelete) + elog(ERROR, "In partitioned tables the old row always should be deleted"); + } + + if (resultRelInfo->ri_FdwRoutine) + { + TupleTableSlot *slot = MakeSingleTupleTableSlot(RelationGetDescr(resultRelationDesc)); + + /* + * delete from foreign table: let the FDW do it + */ + ExecSetSlotDescriptor(slot, RelationGetDescr(resultRelationDesc)); + resultRelInfo->ri_FdwRoutine->ExecForeignDelete(estate, + resultRelInfo, + slot, + planSlot); + + /* we don't need slot anymore */ + ExecDropSingleTupleTableSlot(slot); + } + else + { + /* delete the tuple */ +ldelete:; + result = heap_delete(resultRelationDesc, tupleid, + estate->es_output_cid, + estate->es_crosscheck_snapshot, + true /* wait for commit */ , + &hufd); + switch (result) + { + case HeapTupleSelfUpdated: + if (hufd.cmax != estate->es_output_cid) + ereport(ERROR, + (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION), + errmsg("tuple to be updated was already modified by an operation triggered by the current command"), + errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows."))); + + /* Else, already deleted by self; nothing to do */ + return NULL; + + case HeapTupleMayBeUpdated: + break; + + case HeapTupleUpdated: + if (IsolationUsesXactSnapshot()) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("could not serialize access due to concurrent update"))); + if (!ItemPointerEquals(tupleid, &hufd.ctid)) { - *tupleid = hufd.ctid; - goto ldelete; + TupleTableSlot *epqslot; + + epqslot = EvalPlanQual(estate, + epqstate, + resultRelationDesc, + resultRelInfo->ri_RangeTableIndex, + LockTupleExclusive, + &hufd.ctid, + hufd.xmax); + if (!TupIsNull(epqslot)) + { + *tupleid = hufd.ctid; + goto ldelete; + } } - } - /* tuple already deleted; nothing to do */ - return NULL; + /* tuple already deleted; nothing to do */ + return NULL; - default: - elog(ERROR, "unrecognized heap_delete status: %u", result); - return NULL; + default: + elog(ERROR, "unrecognized heap_delete status: %u", result); + return NULL; + } } + /* AFTER ROW DELETE Triggers */ + ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple); + return NULL; } From 5d314ae240f2f33252bceebe8cab4914e25ba62c Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 26 Apr 2017 19:25:34 +0300 Subject: [PATCH 011/528] Move some logic from PartitionUpdate to PartitionFilter --- Makefile | 1 + expected/pathman_update_node.out | 279 +++++++++++++++++++++++++++++++ sql/pathman_update_node.sql | 162 ++++++++++++++++++ src/include/partition_filter.h | 6 +- src/partition_filter.c | 48 +++++- src/partition_update.c | 19 +-- src/planner_tree_modification.c | 3 +- 7 files changed, 497 insertions(+), 21 deletions(-) create mode 100644 expected/pathman_update_node.out create mode 100644 sql/pathman_update_node.sql diff --git a/Makefile b/Makefile index 6d7d56a4..52c0652b 100644 --- a/Makefile +++ b/Makefile @@ -40,6 +40,7 @@ REGRESS = pathman_basic \ pathman_rowmarks \ pathman_runtime_nodes \ pathman_update_trigger \ + pathman_update_node \ pathman_updates \ pathman_utility_stmt diff --git a/expected/pathman_update_node.out b/expected/pathman_update_node.out new file mode 100644 index 00000000..e15d04f9 --- /dev/null +++ b/expected/pathman_update_node.out @@ -0,0 +1,279 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_update_trigger; +SET pg_pathman.enable_partitionupdate=on; +/* Partition table by RANGE (NUMERIC) */ +CREATE TABLE test_update_trigger.test_range(val NUMERIC NOT NULL, comment TEXT); +INSERT INTO test_update_trigger.test_range SELECT i, i FROM generate_series(1, 100) i; +SELECT create_range_partitions('test_update_trigger.test_range', 'val', 1, 10); +NOTICE: sequence "test_range_seq" does not exist, skipping + create_range_partitions +------------------------- + 10 +(1 row) + +/* Update values in 1st partition (rows remain there) */ +UPDATE test_update_trigger.test_range SET val = 5 WHERE val <= 10; +/* Check values #1 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val < 10 +ORDER BY comment; + tableoid | val | comment +----------------------------------+-----+--------- + test_update_trigger.test_range_1 | 5 | 1 + test_update_trigger.test_range_1 | 5 | 10 + test_update_trigger.test_range_1 | 5 | 2 + test_update_trigger.test_range_1 | 5 | 3 + test_update_trigger.test_range_1 | 5 | 4 + test_update_trigger.test_range_1 | 5 | 5 + test_update_trigger.test_range_1 | 5 | 6 + test_update_trigger.test_range_1 | 5 | 7 + test_update_trigger.test_range_1 | 5 | 8 + test_update_trigger.test_range_1 | 5 | 9 +(10 rows) + +SELECT count(*) FROM test_update_trigger.test_range; + count +------- + 100 +(1 row) + +/* Update values in 2nd partition (rows move to 3rd partition) */ +UPDATE test_update_trigger.test_range SET val = val + 10 WHERE val > 10 AND val <= 20; +/* Check values #2 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val > 20 AND val <= 30 +ORDER BY comment; + tableoid | val | comment +----------------------------------+-----+--------- + test_update_trigger.test_range_3 | 21 | 11 + test_update_trigger.test_range_3 | 22 | 12 + test_update_trigger.test_range_3 | 23 | 13 + test_update_trigger.test_range_3 | 24 | 14 + test_update_trigger.test_range_3 | 25 | 15 + test_update_trigger.test_range_3 | 26 | 16 + test_update_trigger.test_range_3 | 27 | 17 + test_update_trigger.test_range_3 | 28 | 18 + test_update_trigger.test_range_3 | 29 | 19 + test_update_trigger.test_range_3 | 30 | 20 + test_update_trigger.test_range_3 | 21 | 21 + test_update_trigger.test_range_3 | 22 | 22 + test_update_trigger.test_range_3 | 23 | 23 + test_update_trigger.test_range_3 | 24 | 24 + test_update_trigger.test_range_3 | 25 | 25 + test_update_trigger.test_range_3 | 26 | 26 + test_update_trigger.test_range_3 | 27 | 27 + test_update_trigger.test_range_3 | 28 | 28 + test_update_trigger.test_range_3 | 29 | 29 + test_update_trigger.test_range_3 | 30 | 30 +(20 rows) + +SELECT count(*) FROM test_update_trigger.test_range; + count +------- + 100 +(1 row) + +/* Move single row */ +UPDATE test_update_trigger.test_range SET val = 90 WHERE val = 80; +/* Check values #3 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val = 90 +ORDER BY comment; + tableoid | val | comment +----------------------------------+-----+--------- + test_update_trigger.test_range_9 | 90 | 80 + test_update_trigger.test_range_9 | 90 | 90 +(2 rows) + +SELECT count(*) FROM test_update_trigger.test_range; + count +------- + 100 +(1 row) + +/* Move single row (create new partition) */ +UPDATE test_update_trigger.test_range SET val = -1 WHERE val = 50; +/* Check values #4 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val = -1 +ORDER BY comment; + tableoid | val | comment +-----------------------------------+-----+--------- + test_update_trigger.test_range_11 | -1 | 50 +(1 row) + +SELECT count(*) FROM test_update_trigger.test_range; + count +------- + 100 +(1 row) + +/* Update non-key column */ +UPDATE test_update_trigger.test_range SET comment = 'test!' WHERE val = 100; +/* Check values #5 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val = 100 +ORDER BY comment; + tableoid | val | comment +-----------------------------------+-----+--------- + test_update_trigger.test_range_10 | 100 | test! +(1 row) + +SELECT count(*) FROM test_update_trigger.test_range; + count +------- + 100 +(1 row) + +/* Try moving row into a gap (ERROR) */ +DROP TABLE test_update_trigger.test_range_4; +UPDATE test_update_trigger.test_range SET val = 35 WHERE val = 70; +ERROR: cannot spawn a partition +/* Check values #6 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val = 70 +ORDER BY comment; + tableoid | val | comment +----------------------------------+-----+--------- + test_update_trigger.test_range_7 | 70 | 70 +(1 row) + +SELECT count(*) FROM test_update_trigger.test_range; + count +------- + 90 +(1 row) + +/* Test trivial move (same key) */ +UPDATE test_update_trigger.test_range SET val = 65 WHERE val = 65; +/* Check values #7 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val = 65 +ORDER BY comment; + tableoid | val | comment +----------------------------------+-----+--------- + test_update_trigger.test_range_7 | 65 | 65 +(1 row) + +SELECT count(*) FROM test_update_trigger.test_range; + count +------- + 90 +(1 row) + +/* Test tuple conversion (attached partition) */ +CREATE TABLE test_update_trigger.test_range_inv(comment TEXT, val NUMERIC NOT NULL); +SELECT attach_range_partition('test_update_trigger.test_range', + 'test_update_trigger.test_range_inv', + 101::NUMERIC, 111::NUMERIC); + attach_range_partition +------------------------------------ + test_update_trigger.test_range_inv +(1 row) + +UPDATE test_update_trigger.test_range SET val = 105 WHERE val = 60; +/* Check values #8 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val = 105 +ORDER BY comment; + tableoid | val | comment +------------------------------------+-----+--------- + test_update_trigger.test_range_inv | 105 | 60 +(1 row) + +SELECT count(*) FROM test_update_trigger.test_range; + count +------- + 90 +(1 row) + +/* Test tuple conversion (dropped column) */ +ALTER TABLE test_update_trigger.test_range DROP COLUMN comment CASCADE; +SELECT append_range_partition('test_update_trigger.test_range'); + append_range_partition +----------------------------------- + test_update_trigger.test_range_12 +(1 row) + +UPDATE test_update_trigger.test_range SET val = 115 WHERE val = 55; +/* Check values #9 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val = 115; + tableoid | val +-----------------------------------+----- + test_update_trigger.test_range_12 | 115 +(1 row) + +SELECT count(*) FROM test_update_trigger.test_range; + count +------- + 90 +(1 row) + +/* Partition table by HASH (INT4) */ +CREATE TABLE test_update_trigger.test_hash(val INT4 NOT NULL, comment TEXT); +INSERT INTO test_update_trigger.test_hash SELECT i, i FROM generate_series(1, 10) i; +SELECT create_hash_partitions('test_update_trigger.test_hash', 'val', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +/* Move all rows into single partition */ +UPDATE test_update_trigger.test_hash SET val = 1; +/* Check values #1 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_hash +WHERE val = 1 +ORDER BY comment; + tableoid | val | comment +---------------------------------+-----+--------- + test_update_trigger.test_hash_2 | 1 | 1 + test_update_trigger.test_hash_2 | 1 | 10 + test_update_trigger.test_hash_2 | 1 | 2 + test_update_trigger.test_hash_2 | 1 | 3 + test_update_trigger.test_hash_2 | 1 | 4 + test_update_trigger.test_hash_2 | 1 | 5 + test_update_trigger.test_hash_2 | 1 | 6 + test_update_trigger.test_hash_2 | 1 | 7 + test_update_trigger.test_hash_2 | 1 | 8 + test_update_trigger.test_hash_2 | 1 | 9 +(10 rows) + +SELECT count(*) FROM test_update_trigger.test_hash; + count +------- + 10 +(1 row) + +/* Don't move any rows */ +UPDATE test_update_trigger.test_hash SET val = 3 WHERE val = 2; +/* Check values #2 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_hash +WHERE val = 3 +ORDER BY comment; + tableoid | val | comment +----------+-----+--------- +(0 rows) + +SELECT count(*) FROM test_update_trigger.test_hash; + count +------- + 10 +(1 row) + +DROP SCHEMA test_update_trigger CASCADE; +NOTICE: drop cascades to 18 other objects +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_update_node.sql b/sql/pathman_update_node.sql new file mode 100644 index 00000000..c3cc8d4d --- /dev/null +++ b/sql/pathman_update_node.sql @@ -0,0 +1,162 @@ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_update_trigger; +SET pg_pathman.enable_partitionupdate=on; + + +/* Partition table by RANGE (NUMERIC) */ +CREATE TABLE test_update_trigger.test_range(val NUMERIC NOT NULL, comment TEXT); +INSERT INTO test_update_trigger.test_range SELECT i, i FROM generate_series(1, 100) i; +SELECT create_range_partitions('test_update_trigger.test_range', 'val', 1, 10); + + +/* Update values in 1st partition (rows remain there) */ +UPDATE test_update_trigger.test_range SET val = 5 WHERE val <= 10; + +/* Check values #1 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val < 10 +ORDER BY comment; + +SELECT count(*) FROM test_update_trigger.test_range; + + +/* Update values in 2nd partition (rows move to 3rd partition) */ +UPDATE test_update_trigger.test_range SET val = val + 10 WHERE val > 10 AND val <= 20; + +/* Check values #2 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val > 20 AND val <= 30 +ORDER BY comment; + +SELECT count(*) FROM test_update_trigger.test_range; + + +/* Move single row */ +UPDATE test_update_trigger.test_range SET val = 90 WHERE val = 80; + +/* Check values #3 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val = 90 +ORDER BY comment; + +SELECT count(*) FROM test_update_trigger.test_range; + + +/* Move single row (create new partition) */ +UPDATE test_update_trigger.test_range SET val = -1 WHERE val = 50; + +/* Check values #4 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val = -1 +ORDER BY comment; + +SELECT count(*) FROM test_update_trigger.test_range; + + +/* Update non-key column */ +UPDATE test_update_trigger.test_range SET comment = 'test!' WHERE val = 100; + +/* Check values #5 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val = 100 +ORDER BY comment; + +SELECT count(*) FROM test_update_trigger.test_range; + + +/* Try moving row into a gap (ERROR) */ +DROP TABLE test_update_trigger.test_range_4; +UPDATE test_update_trigger.test_range SET val = 35 WHERE val = 70; + +/* Check values #6 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val = 70 +ORDER BY comment; + +SELECT count(*) FROM test_update_trigger.test_range; + + +/* Test trivial move (same key) */ +UPDATE test_update_trigger.test_range SET val = 65 WHERE val = 65; + +/* Check values #7 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val = 65 +ORDER BY comment; + +SELECT count(*) FROM test_update_trigger.test_range; + + +/* Test tuple conversion (attached partition) */ +CREATE TABLE test_update_trigger.test_range_inv(comment TEXT, val NUMERIC NOT NULL); +SELECT attach_range_partition('test_update_trigger.test_range', + 'test_update_trigger.test_range_inv', + 101::NUMERIC, 111::NUMERIC); +UPDATE test_update_trigger.test_range SET val = 105 WHERE val = 60; + +/* Check values #8 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val = 105 +ORDER BY comment; + +SELECT count(*) FROM test_update_trigger.test_range; + + +/* Test tuple conversion (dropped column) */ +ALTER TABLE test_update_trigger.test_range DROP COLUMN comment CASCADE; +SELECT append_range_partition('test_update_trigger.test_range'); +UPDATE test_update_trigger.test_range SET val = 115 WHERE val = 55; + +/* Check values #9 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val = 115; + +SELECT count(*) FROM test_update_trigger.test_range; + + + +/* Partition table by HASH (INT4) */ +CREATE TABLE test_update_trigger.test_hash(val INT4 NOT NULL, comment TEXT); +INSERT INTO test_update_trigger.test_hash SELECT i, i FROM generate_series(1, 10) i; +SELECT create_hash_partitions('test_update_trigger.test_hash', 'val', 3); + + +/* Move all rows into single partition */ +UPDATE test_update_trigger.test_hash SET val = 1; + +/* Check values #1 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_hash +WHERE val = 1 +ORDER BY comment; + +SELECT count(*) FROM test_update_trigger.test_hash; + + +/* Don't move any rows */ +UPDATE test_update_trigger.test_hash SET val = 3 WHERE val = 2; + +/* Check values #2 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_hash +WHERE val = 3 +ORDER BY comment; + +SELECT count(*) FROM test_update_trigger.test_hash; + + + +DROP SCHEMA test_update_trigger CASCADE; +DROP EXTENSION pg_pathman; diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index 893200af..c6792451 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -94,6 +94,9 @@ typedef struct bool warning_triggered; /* warning message counter */ TupleTableSlot *tup_convert_slot; /* slot for rebuilt tuples */ + ItemPointer ctid; /* ctid of rubuilt tuple + if there any, or NULL */ + bool keep_ctid; /* if false ctid will not filled */ ExprContext *tup_convert_econtext; /* ExprContext for projections */ } PartitionFilterState; @@ -140,7 +143,8 @@ ResultRelInfoHolder * select_partition_for_insert(Datum value, Oid value_type, Plan * make_partition_filter(Plan *subplan, Oid parent_relid, OnConflictAction conflict_action, - List *returning_list); + List *returning_list, + bool keep_ctid); Node * partition_filter_create_scan_state(CustomScan *node); diff --git a/src/partition_filter.c b/src/partition_filter.c index 2f491830..94826129 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -463,7 +463,8 @@ select_partition_for_insert(Datum value, Oid value_type, Plan * make_partition_filter(Plan *subplan, Oid parent_relid, OnConflictAction conflict_action, - List *returning_list) + List *returning_list, + bool keep_ctid) { CustomScan *cscan = makeNode(CustomScan); Relation parent_rel; @@ -494,9 +495,10 @@ make_partition_filter(Plan *subplan, Oid parent_relid, cscan->custom_scan_tlist = subplan->targetlist; /* Pack partitioned table's Oid and conflict_action */ - cscan->custom_private = list_make3(makeInteger(parent_relid), + cscan->custom_private = list_make4(makeInteger(parent_relid), makeInteger(conflict_action), - returning_list); + returning_list, + makeInteger((int) keep_ctid)); return &cscan->scan.plan; } @@ -517,6 +519,7 @@ partition_filter_create_scan_state(CustomScan *node) state->partitioned_table = intVal(linitial(node->custom_private)); state->on_conflict_action = intVal(lsecond(node->custom_private)); state->returning_list = lthird(node->custom_private); + state->keep_ctid = (bool) intVal(lfourth(node->custom_private)); /* Check boundaries */ Assert(state->on_conflict_action >= ONCONFLICT_NONE || @@ -556,6 +559,9 @@ partition_filter_exec(CustomScanState *node) PlanState *child_ps = (PlanState *) linitial(node->custom_ps); TupleTableSlot *slot; + /* clean ctid for old slot */ + state->ctid = NULL; + slot = ExecProcNode(child_ps); /* Save original ResultRelInfo */ @@ -569,6 +575,7 @@ partition_filter_exec(CustomScanState *node) ResultRelInfoHolder *rri_holder; bool isnull; Datum value; + ResultRelInfo *resultRelInfo; /* Fetch PartRelationInfo for this partitioned relation */ prel = get_pathman_relation_info(state->partitioned_table); @@ -601,14 +608,45 @@ partition_filter_exec(CustomScanState *node) ResetExprContext(econtext); /* Magic: replace parent's ResultRelInfo with ours */ - estate->es_result_relation_info = rri_holder->result_rel_info; + resultRelInfo = rri_holder->result_rel_info; + estate->es_result_relation_info = resultRelInfo; + + if (state->keep_ctid) + { + JunkFilter *junkfilter; + Datum datum; + char relkind; + + /* + * extract `ctid` junk attribute and save it in state, + * we need this step because if there will be conversion + * junk attributes will be removed from slot + */ + junkfilter = resultRelInfo->ri_junkFilter; + Assert(junkfilter != NULL); + + relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind; + if (relkind == RELKIND_RELATION) + { + AttrNumber ctid_attno; + bool isNull; + + ctid_attno = ExecFindJunkAttribute(junkfilter, "ctid"); + datum = ExecGetJunkAttribute(slot, ctid_attno, &isNull); + /* shouldn't ever get a null result... */ + if (isNull) + elog(ERROR, "ctid is NULL"); + + state->ctid = (ItemPointer) DatumGetPointer(datum); + } + } /* If there's a transform map, rebuild the tuple */ if (rri_holder->tuple_map) { HeapTuple htup_old, htup_new; - Relation child_rel = rri_holder->result_rel_info->ri_RelationDesc; + Relation child_rel = resultRelInfo->ri_RelationDesc; htup_old = ExecMaterializeSlot(slot); htup_new = do_convert_tuple(htup_old, rri_holder->tuple_map); diff --git a/src/partition_update.c b/src/partition_update.c index fab7f6f2..b95b7c43 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -77,7 +77,7 @@ make_partition_update(Plan *subplan, /* Setup methods and child plan */ cscan->methods = &partition_update_plan_methods; pfilter = make_partition_filter(subplan, parent_relid, ONCONFLICT_NONE, - returning_list); + returning_list, true); cscan->custom_plans = list_make1(pfilter); cscan->scan.plan.targetlist = pfilter->targetlist; @@ -133,13 +133,14 @@ partition_update_exec(CustomScanState *node) ItemPointerData tuple_ctid; JunkFilter *junkfilter; EPQState epqstate; - AttrNumber ctid_attno; HeapTupleData oldtupdata; HeapTuple oldtuple; PartitionFilterState *child_state = (PartitionFilterState *) child_ps; EState *estate = node->ss.ps.state; + Assert(child_state->keep_ctid); + resultRelInfo = estate->es_result_relation_info; junkfilter = resultRelInfo->ri_junkFilter; Assert(junkfilter != NULL); @@ -148,19 +149,9 @@ partition_update_exec(CustomScanState *node) oldtuple = NULL; relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind; - if (relkind == RELKIND_RELATION) + if (relkind == RELKIND_RELATION && child_state->ctid != NULL) { - /* - * extract the 'ctid' junk attribute. - */ - Assert(resultRelInfo->ri_RelationDesc->rd_rel->relkind == RELKIND_RELATION); - ctid_attno = ExecFindJunkAttribute(junkfilter, "ctid"); - datum = ExecGetJunkAttribute(slot, ctid_attno, &isNull); - /* shouldn't ever get a null result... */ - if (isNull) - elog(ERROR, "ctid is NULL"); - - tupleid = (ItemPointer) DatumGetPointer(datum); + tupleid = child_state->ctid; tuple_ctid = *tupleid; /* be sure we don't free * ctid!! */ tupleid = &tuple_ctid; diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index d4558c4b..5522dfaa 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -403,7 +403,8 @@ partition_filter_visitor(Plan *plan, void *context) lfirst(lc1) = make_partition_filter((Plan *) lfirst(lc1), relid, modify_table->onConflictAction, - returning_list); + returning_list, + false); } } } From 595617d6be8f23d345a94cfcd10d3d2797b22a9c Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 27 Apr 2017 18:55:07 +0300 Subject: [PATCH 012/528] Fix update node when columns have different order --- src/hooks.c | 33 +++++++++++++++++++ src/include/hooks.h | 3 ++ src/include/partition_filter.h | 14 +++++--- src/include/partition_update.h | 1 + src/partition_filter.c | 57 ++++++++++++++++++++++++++------- src/partition_update.c | 31 ++++++++++-------- src/pg_pathman.c | 1 + src/planner_tree_modification.c | 2 +- src/utility_stmt_hooking.c | 3 +- src/utils.c | 8 ++--- 10 files changed, 116 insertions(+), 37 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 00e8ff37..ac2674e5 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -16,6 +16,7 @@ #include "hooks.h" #include "init.h" #include "partition_filter.h" +#include "partition_update.h" #include "pathman_workers.h" #include "planner_tree_modification.h" #include "runtimeappend.h" @@ -766,3 +767,35 @@ pathman_process_utility_hook(Node *parsetree, context, params, dest, completionTag); } + + +void +pathman_executor_hook(QueryDesc *queryDesc, ScanDirection direction, + uint64 count) +{ + PlanState *state = (PlanState *) queryDesc->planstate; + + if (IsA(state, ModifyTableState)) + { + int i; + ModifyTableState *mt_state = (ModifyTableState *) state; + + for (i = 0; i < mt_state->mt_nplans; i++) + { + CustomScanState *subplanstate = (CustomScanState *) mt_state->mt_plans[i]; + + if (IsA(subplanstate, CustomScanState)) + { + if (strcmp(subplanstate->methods->CustomName, "PrepareInsert") == 0) + { + PartitionUpdateState *cstate = (PartitionUpdateState *) subplanstate; + cstate->parent_state = mt_state; + cstate->saved_junkFilter = mt_state->resultRelInfo->ri_junkFilter; + mt_state->resultRelInfo->ri_junkFilter = NULL; + } + } + } + } + + standard_ExecutorRun(queryDesc, direction, count); +} diff --git a/src/include/hooks.h b/src/include/hooks.h index 95400fe2..15fa9906 100644 --- a/src/include/hooks.h +++ b/src/include/hooks.h @@ -13,6 +13,7 @@ #include "postgres.h" +#include "executor/executor.h" #include "optimizer/planner.h" #include "optimizer/paths.h" #include "parser/analyze.h" @@ -60,5 +61,7 @@ void pathman_process_utility_hook(Node *parsetree, DestReceiver *dest, char *completionTag); +void pathman_executor_hook(QueryDesc *queryDesc, ScanDirection direction, + uint64 count); #endif /* PATHMAN_HOOKS_H */ diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index c6792451..ae73d589 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -39,6 +39,8 @@ typedef struct Oid partid; /* partition's relid */ ResultRelInfo *result_rel_info; /* cached ResultRelInfo */ TupleConversionMap *tuple_map; /* tuple conversion map (parent => child) */ + JunkFilter *orig_junkFilter; /* we keep original JunkFilter from + ResultRelInfo here */ } ResultRelInfoHolder; @@ -94,9 +96,10 @@ typedef struct bool warning_triggered; /* warning message counter */ TupleTableSlot *tup_convert_slot; /* slot for rebuilt tuples */ - ItemPointer ctid; /* ctid of rubuilt tuple - if there any, or NULL */ - bool keep_ctid; /* if false ctid will not filled */ + ItemPointer ctid; /* ctid of scanned tuple + if there any, or NULL, + filled when command_type == CMD_UPDATE*/ + CmdType command_type; ExprContext *tup_convert_econtext; /* ExprContext for projections */ } PartitionFilterState; @@ -118,7 +121,8 @@ void init_result_parts_storage(ResultPartsStorage *parts_storage, bool speculative_inserts, Size table_entry_size, on_new_rri_holder on_new_rri_holder_cb, - void *on_new_rri_holder_cb_arg); + void *on_new_rri_holder_cb_arg, + CmdType cmd_type); void fini_result_parts_storage(ResultPartsStorage *parts_storage, bool close_rels); @@ -144,7 +148,7 @@ Plan * make_partition_filter(Plan *subplan, Oid parent_relid, OnConflictAction conflict_action, List *returning_list, - bool keep_ctid); + CmdType command_type); Node * partition_filter_create_scan_state(CustomScan *node); diff --git a/src/include/partition_update.h b/src/include/partition_update.h index b9607c5c..fc0c0033 100644 --- a/src/include/partition_update.h +++ b/src/include/partition_update.h @@ -29,6 +29,7 @@ typedef struct PartitionUpdateState Oid partitioned_table; List *returning_list; ModifyTableState *parent_state; + JunkFilter *saved_junkFilter; Plan *subplan; /* proxy variable to store subplan */ } PartitionUpdateState; diff --git a/src/partition_filter.c b/src/partition_filter.c index 94826129..76d62fe4 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -145,7 +145,8 @@ init_result_parts_storage(ResultPartsStorage *parts_storage, bool speculative_inserts, Size table_entry_size, on_new_rri_holder on_new_rri_holder_cb, - void *on_new_rri_holder_cb_arg) + void *on_new_rri_holder_cb_arg, + CmdType cmd_type) { HASHCTL *result_rels_table_config = &parts_storage->result_rels_table_config; @@ -168,7 +169,7 @@ init_result_parts_storage(ResultPartsStorage *parts_storage, parts_storage->callback_arg = on_new_rri_holder_cb_arg; /* Currenly ResultPartsStorage is used only for INSERTs */ - parts_storage->command_type = CMD_INSERT; + parts_storage->command_type = cmd_type; parts_storage->speculative_inserts = speculative_inserts; /* Partitions must remain locked till transaction's end */ @@ -311,12 +312,42 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) CopyToResultRelInfo(ri_onConflictSetProj); CopyToResultRelInfo(ri_onConflictSetWhere); + if (parts_storage->command_type == CMD_UPDATE) + { + /* For UPDATE/DELETE, find the appropriate junk attr now */ + char relkind; + JunkFilter *junkfilter = child_result_rel_info->ri_junkFilter; + + relkind = child_result_rel_info->ri_RelationDesc->rd_rel->relkind; + if (relkind == RELKIND_RELATION) + { + junkfilter->jf_junkAttNo = ExecFindJunkAttribute(junkfilter, "ctid"); + if (!AttributeNumberIsValid(junkfilter->jf_junkAttNo)) + elog(ERROR, "could not find junk ctid column"); + } + else if (relkind == RELKIND_FOREIGN_TABLE) + { + /* + * When there is an AFTER trigger, there should be a + * wholerow attribute. + */ + junkfilter->jf_junkAttNo = ExecFindJunkAttribute(junkfilter, "wholerow"); + } + else + elog(ERROR, "wrong type of relation"); + + } + /* ri_ConstraintExprs will be initialized by ExecRelCheck() */ child_result_rel_info->ri_ConstraintExprs = NULL; /* Fill the ResultRelInfo holder */ rri_holder->partid = partid; rri_holder->result_rel_info = child_result_rel_info; + rri_holder->orig_junkFilter = child_result_rel_info->ri_junkFilter; + + if (parts_storage->command_type == CMD_UPDATE) + child_result_rel_info->ri_junkFilter = NULL; /* Generate tuple transformation map and some other stuff */ rri_holder->tuple_map = build_part_tuple_map(parent_rel, child_rel); @@ -464,7 +495,7 @@ Plan * make_partition_filter(Plan *subplan, Oid parent_relid, OnConflictAction conflict_action, List *returning_list, - bool keep_ctid) + CmdType command_type) { CustomScan *cscan = makeNode(CustomScan); Relation parent_rel; @@ -498,7 +529,7 @@ make_partition_filter(Plan *subplan, Oid parent_relid, cscan->custom_private = list_make4(makeInteger(parent_relid), makeInteger(conflict_action), returning_list, - makeInteger((int) keep_ctid)); + makeInteger(command_type)); return &cscan->scan.plan; } @@ -519,7 +550,7 @@ partition_filter_create_scan_state(CustomScan *node) state->partitioned_table = intVal(linitial(node->custom_private)); state->on_conflict_action = intVal(lsecond(node->custom_private)); state->returning_list = lthird(node->custom_private); - state->keep_ctid = (bool) intVal(lfourth(node->custom_private)); + state->command_type = (CmdType) intVal(lfourth(node->custom_private)); /* Check boundaries */ Assert(state->on_conflict_action >= ONCONFLICT_NONE || @@ -544,7 +575,8 @@ partition_filter_begin(CustomScanState *node, EState *estate, int eflags) state->on_conflict_action != ONCONFLICT_NONE, ResultPartsStorageStandard, prepare_rri_for_insert, - (void *) state); + (void *) state, + state->command_type); state->warning_triggered = false; } @@ -607,11 +639,12 @@ partition_filter_exec(CustomScanState *node) MemoryContextSwitchTo(old_cxt); ResetExprContext(econtext); - /* Magic: replace parent's ResultRelInfo with ours */ resultRelInfo = rri_holder->result_rel_info; + + /* Magic: replace parent's ResultRelInfo with ours */ estate->es_result_relation_info = resultRelInfo; - if (state->keep_ctid) + if (state->command_type == CMD_UPDATE) { JunkFilter *junkfilter; Datum datum; @@ -622,17 +655,15 @@ partition_filter_exec(CustomScanState *node) * we need this step because if there will be conversion * junk attributes will be removed from slot */ - junkfilter = resultRelInfo->ri_junkFilter; + junkfilter = rri_holder->orig_junkFilter; Assert(junkfilter != NULL); relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind; if (relkind == RELKIND_RELATION) { - AttrNumber ctid_attno; bool isNull; - ctid_attno = ExecFindJunkAttribute(junkfilter, "ctid"); - datum = ExecGetJunkAttribute(slot, ctid_attno, &isNull); + datum = ExecGetJunkAttribute(slot, junkfilter->jf_junkAttNo, &isNull); /* shouldn't ever get a null result... */ if (isNull) elog(ERROR, "ctid is NULL"); @@ -661,6 +692,8 @@ partition_filter_exec(CustomScanState *node) /* Now replace the original slot */ slot = state->tup_convert_slot; } + else if (rri_holder->orig_junkFilter) + slot = ExecFilterJunk(rri_holder->orig_junkFilter, slot); return slot; } diff --git a/src/partition_update.c b/src/partition_update.c index b95b7c43..7923792d 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -77,7 +77,7 @@ make_partition_update(Plan *subplan, /* Setup methods and child plan */ cscan->methods = &partition_update_plan_methods; pfilter = make_partition_filter(subplan, parent_relid, ONCONFLICT_NONE, - returning_list, true); + returning_list, CMD_UPDATE); cscan->custom_plans = list_make1(pfilter); cscan->scan.plan.targetlist = pfilter->targetlist; @@ -117,8 +117,13 @@ partition_update_begin(CustomScanState *node, EState *estate, int eflags) TupleTableSlot * partition_update_exec(CustomScanState *node) { - PlanState *child_ps = (PlanState *) linitial(node->custom_ps); - TupleTableSlot *slot; + EState *estate = node->ss.ps.state; + PlanState *child_ps = (PlanState *) linitial(node->custom_ps); + TupleTableSlot *slot; + PartitionUpdateState *state = (PartitionUpdateState *) node; + + /* restore junkfilter in parent node */ + state->parent_state->resultRelInfo->ri_junkFilter = state->saved_junkFilter; /* execute PartitionFilter child node */ slot = ExecProcNode(child_ps); @@ -137,26 +142,25 @@ partition_update_exec(CustomScanState *node) HeapTuple oldtuple; PartitionFilterState *child_state = (PartitionFilterState *) child_ps; - EState *estate = node->ss.ps.state; - - Assert(child_state->keep_ctid); - - resultRelInfo = estate->es_result_relation_info; - junkfilter = resultRelInfo->ri_junkFilter; - Assert(junkfilter != NULL); + Assert(child_state->command_type == CMD_UPDATE); EvalPlanQualSetSlot(&epqstate, slot); + resultRelInfo = estate->es_result_relation_info; oldtuple = NULL; + junkfilter = resultRelInfo->ri_junkFilter; relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind; - if (relkind == RELKIND_RELATION && child_state->ctid != NULL) + + if (relkind == RELKIND_RELATION) { + Assert(child_state->ctid != NULL); + tupleid = child_state->ctid; tuple_ctid = *tupleid; /* be sure we don't free * ctid!! */ tupleid = &tuple_ctid; } - else if (relkind == RELKIND_FOREIGN_TABLE) + else if (junkfilter != NULL && relkind == RELKIND_FOREIGN_TABLE) { if (AttributeNumberIsValid(junkfilter->jf_junkAttNo)) { @@ -173,8 +177,7 @@ partition_update_exec(CustomScanState *node) ItemPointerSetInvalid(&(oldtupdata.t_self)); /* Historically, view triggers see invalid t_tableOid. */ - oldtupdata.t_tableOid =RelationGetRelid(resultRelInfo->ri_RelationDesc); - + oldtupdata.t_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc); oldtuple = &oldtupdata; } } diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 7061fe9f..ee0f7066 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -155,6 +155,7 @@ _PG_init(void) planner_hook = pathman_planner_hook; process_utility_hook_next = ProcessUtility_hook; ProcessUtility_hook = pathman_process_utility_hook; + ExecutorRun_hook = pathman_executor_hook; /* Initialize PgPro-specific subsystems */ init_expand_rte_hook(); diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 5522dfaa..f863c863 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -404,7 +404,7 @@ partition_filter_visitor(Plan *plan, void *context) relid, modify_table->onConflictAction, returning_list, - false); + CMD_INSERT); } } } diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 2f0b6fa6..c38c3cb8 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -566,7 +566,8 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, /* Initialize ResultPartsStorage */ init_result_parts_storage(&parts_storage, estate, false, ResultPartsStorageStandard, - prepare_rri_for_copy, NULL); + prepare_rri_for_copy, NULL, + CMD_INSERT); parts_storage.saved_rel_info = parent_result_rel; /* Set up a tuple slot too */ diff --git a/src/utils.c b/src/utils.c index 099f5a74..480f7ed7 100644 --- a/src/utils.c +++ b/src/utils.c @@ -118,20 +118,20 @@ get_pathman_schema(void) SysScanDesc scandesc; HeapTuple tuple; ScanKeyData entry[1]; - Oid ext_schema; + Oid ext_oid; /* It's impossible to fetch pg_pathman's schema now */ if (!IsTransactionState()) return InvalidOid; - ext_schema = get_extension_oid("pg_pathman", true); - if (ext_schema == InvalidOid) + ext_oid = get_extension_oid("pg_pathman", true); + if (ext_oid == InvalidOid) return InvalidOid; /* exit if pg_pathman does not exist */ ScanKeyInit(&entry[0], ObjectIdAttributeNumber, BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(ext_schema)); + ObjectIdGetDatum(ext_oid)); rel = heap_open(ExtensionRelationId, AccessShareLock); scandesc = systable_beginscan(rel, ExtensionOidIndexId, true, From 987d30d674d973567cf0270a4b500f60e115e751 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Fri, 28 Apr 2017 12:39:16 +0300 Subject: [PATCH 013/528] Make little changes in code and add more comments --- src/partition_filter.c | 3 +-- src/partition_update.c | 16 +++++++++++----- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/src/partition_filter.c b/src/partition_filter.c index 76d62fe4..84c7287f 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -314,7 +314,6 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) if (parts_storage->command_type == CMD_UPDATE) { - /* For UPDATE/DELETE, find the appropriate junk attr now */ char relkind; JunkFilter *junkfilter = child_result_rel_info->ri_junkFilter; @@ -692,7 +691,7 @@ partition_filter_exec(CustomScanState *node) /* Now replace the original slot */ slot = state->tup_convert_slot; } - else if (rri_holder->orig_junkFilter) + else if (state->command_type == CMD_UPDATE) slot = ExecFilterJunk(rri_holder->orig_junkFilter, slot); return slot; diff --git a/src/partition_update.c b/src/partition_update.c index 7923792d..27d0b300 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -122,7 +122,13 @@ partition_update_exec(CustomScanState *node) TupleTableSlot *slot; PartitionUpdateState *state = (PartitionUpdateState *) node; - /* restore junkfilter in parent node */ + /* + * Restore junkfilter in base resultRelInfo, + * we do it because child's RelResultInfo expects its existence + * for proper initialization. + * Alsowe change junk attribute number in JunkFilter, because + * it wasn't set in ModifyTable node initialization + */ state->parent_state->resultRelInfo->ri_junkFilter = state->saved_junkFilter; /* execute PartitionFilter child node */ @@ -136,7 +142,6 @@ partition_update_exec(CustomScanState *node) ResultRelInfo *resultRelInfo; ItemPointer tupleid = NULL; ItemPointerData tuple_ctid; - JunkFilter *junkfilter; EPQState epqstate; HeapTupleData oldtupdata; HeapTuple oldtuple; @@ -148,7 +153,6 @@ partition_update_exec(CustomScanState *node) resultRelInfo = estate->es_result_relation_info; oldtuple = NULL; - junkfilter = resultRelInfo->ri_junkFilter; relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind; if (relkind == RELKIND_RELATION) @@ -160,9 +164,11 @@ partition_update_exec(CustomScanState *node) * ctid!! */ tupleid = &tuple_ctid; } - else if (junkfilter != NULL && relkind == RELKIND_FOREIGN_TABLE) + else if (relkind == RELKIND_FOREIGN_TABLE) { - if (AttributeNumberIsValid(junkfilter->jf_junkAttNo)) + JunkFilter *junkfilter = resultRelInfo->ri_junkFilter; + + if (junkfilter != NULL && AttributeNumberIsValid(junkfilter->jf_junkAttNo)) { datum = ExecGetJunkAttribute(slot, junkfilter->jf_junkAttNo, From 462d8d5fe3aa4079b08cbe9535a10830843f2434 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Tue, 2 May 2017 17:41:38 +0300 Subject: [PATCH 014/528] Fix updates on same child relation --- src/partition_filter.c | 45 +++++++++++++++++++++++++++--------------- 1 file changed, 29 insertions(+), 16 deletions(-) diff --git a/src/partition_filter.c b/src/partition_filter.c index bd3de8f5..c13ca974 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -167,7 +167,6 @@ init_result_parts_storage(ResultPartsStorage *parts_storage, parts_storage->on_new_rri_holder_callback = on_new_rri_holder_cb; parts_storage->callback_arg = on_new_rri_holder_cb_arg; - /* Currenly ResultPartsStorage is used only for INSERTs */ parts_storage->command_type = cmd_type; parts_storage->speculative_inserts = speculative_inserts; @@ -244,7 +243,7 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) if (!found) { Relation child_rel, - parent_rel = parts_storage->saved_rel_info->ri_RelationDesc; + base_rel = parts_storage->saved_rel_info->ri_RelationDesc; RangeTblEntry *child_rte, *parent_rte; Index child_rte_idx; @@ -267,7 +266,7 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) CheckValidResultRel(child_rel, parts_storage->command_type); /* Build Var translation list for 'inserted_cols' */ - make_inh_translation_list(parent_rel, child_rel, 0, &translated_vars); + make_inh_translation_list(base_rel, child_rel, 0, &translated_vars); /* Create RangeTblEntry for partition */ child_rte = makeNode(RangeTblEntry); @@ -348,7 +347,7 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) child_result_rel_info->ri_junkFilter = NULL; /* Generate tuple transformation map and some other stuff */ - rri_holder->tuple_map = build_part_tuple_map(parent_rel, child_rel); + rri_holder->tuple_map = build_part_tuple_map(base_rel, child_rel); /* Call on_new_rri_holder_callback() if needed */ if (parts_storage->on_new_rri_holder_callback) @@ -367,7 +366,7 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) /* Build tuple conversion map (e.g. parent has a dropped column) */ TupleConversionMap * -build_part_tuple_map(Relation parent_rel, Relation child_rel) +build_part_tuple_map(Relation base_rel, Relation child_rel) { TupleConversionMap *tuple_map; TupleDesc child_tupdesc, @@ -377,7 +376,7 @@ build_part_tuple_map(Relation parent_rel, Relation child_rel) child_tupdesc = CreateTupleDescCopy(RelationGetDescr(child_rel)); child_tupdesc->tdtypeid = InvalidOid; - parent_tupdesc = CreateTupleDescCopy(RelationGetDescr(parent_rel)); + parent_tupdesc = CreateTupleDescCopy(RelationGetDescr(base_rel)); parent_tupdesc->tdtypeid = InvalidOid; /* Generate tuple transformation map and some other stuff */ @@ -565,15 +564,26 @@ partition_filter_create_scan_state(CustomScan *node) void partition_filter_begin(CustomScanState *node, EState *estate, int eflags) { - Index varno = 1; Node *expr; MemoryContext old_cxt; PartitionFilterState *state = (PartitionFilterState *) node; const PartRelationInfo *prel; ListCell *lc; + PlanState *child_state; + Index expr_relid = 1; + + child_state = ExecInitNode(state->subplan, estate, eflags); /* It's convenient to store PlanState in 'custom_ps' */ - node->custom_ps = list_make1(ExecInitNode(state->subplan, estate, eflags)); + node->custom_ps = list_make1(child_state); + if (state->command_type == CMD_UPDATE) + { + Assert(IsA(child_state, SeqScanState)); + expr_relid = ((Scan *) ((ScanState *) child_state)->ps.plan)->scanrelid; + Assert(expr_relid >= 1); + } + else + expr_relid = state->partitioned_table; if (state->expr_state == NULL) { @@ -582,18 +592,21 @@ partition_filter_begin(CustomScanState *node, EState *estate, int eflags) Assert(prel != NULL); /* Change varno in Vars according to range table */ - expr = copyObject(prel->expr); - foreach(lc, estate->es_range_table) + if (expr_relid > 1) { - RangeTblEntry *entry = lfirst(lc); - if (entry->relid == state->partitioned_table) + expr = copyObject(prel->expr); + foreach(lc, estate->es_range_table) { - if (varno > 1) - ChangeVarNodes(expr, 1, varno, 0); - break; + RangeTblEntry *entry = lfirst(lc); + if (entry->relid == expr_relid) + { + ChangeVarNodes(expr, 1, expr_relid, 0); + break; + } } - varno += 1; } + else + expr = prel->expr; /* Prepare state for expression execution */ old_cxt = MemoryContextSwitchTo(estate->es_query_cxt); From 29e22771484352e609ebcbfb43ab46ad65f7df39 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Tue, 2 May 2017 18:12:12 +0300 Subject: [PATCH 015/528] Try to fix clang warning --- src/partition_update.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/partition_update.c b/src/partition_update.c index 17b78b38..828cc6fc 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -142,7 +142,7 @@ partition_update_exec(CustomScanState *node) bool isNull; char relkind; ResultRelInfo *resultRelInfo; - ItemPointer tupleid = NULL; + ItemPointer tupleid; ItemPointerData tuple_ctid; EPQState epqstate; HeapTupleData oldtupdata; @@ -188,6 +188,8 @@ partition_update_exec(CustomScanState *node) oldtupdata.t_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc); oldtuple = &oldtupdata; } + + tupleid = NULL; } else elog(ERROR, "PartitionUpdate supports only relations and foreign tables"); @@ -325,6 +327,7 @@ ldelete:; hufd.xmax); if (!TupIsNull(epqslot)) { + Assert(tupleid != NULL); *tupleid = hufd.ctid; goto ldelete; } From 7d18aa49048606a0ab76406ad789fd43eab5c4b2 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Tue, 2 May 2017 18:25:04 +0300 Subject: [PATCH 016/528] Require tupleid in basic delete function --- src/partition_update.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/partition_update.c b/src/partition_update.c index 828cc6fc..bafa8a68 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -285,7 +285,7 @@ ExecDeleteInternal(ItemPointer tupleid, /* we don't need slot anymore */ ExecDropSingleTupleTableSlot(slot); } - else + else if (tupleid != NULL) { /* delete the tuple */ ldelete:; @@ -340,6 +340,8 @@ ldelete:; return NULL; } } + else + elog(ERROR, "tupleid should be specified for deletion"); /* AFTER ROW DELETE Triggers */ ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple); From 73691513d8bff3996210314f749cde093f4e08a2 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 3 May 2017 16:56:02 +0300 Subject: [PATCH 017/528] Fix tests and tuple conversion for inverted or dropped columns --- Makefile | 6 +- expected/pathman_update_node.out | 313 ++++++++++++++++++++----------- sql/pathman_update_node.sql | 105 ++++++----- src/hooks.c | 24 ++- src/include/hooks.h | 1 + src/include/partition_update.h | 3 +- src/include/relation_info.h | 6 +- src/include/utils.h | 1 - src/partition_filter.c | 59 ++++-- src/partition_update.c | 11 +- src/pg_pathman.c | 1 + src/pl_funcs.c | 47 +---- src/relation_info.c | 45 +++++ src/utils.c | 1 + 14 files changed, 381 insertions(+), 242 deletions(-) diff --git a/Makefile b/Makefile index 793e1579..bb4ff894 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ OBJS = src/init.o src/relation_info.o src/utils.o src/partition_filter.o \ src/hooks.o src/nodes_common.o src/xact_handling.o src/utility_stmt_hooking.o \ src/planner_tree_modification.o src/debug_print.o src/partition_creation.o \ src/compat/pg_compat.o src/compat/relation_tags.o src/compat/expand_rte_hook.o \ - src/compat/rowmarks_fix.o src/partition_update.o $(WIN32RES) + src/compat/rowmarks_fix.o $(WIN32RES) PG_CPPFLAGS = -I$(CURDIR)/src/include @@ -41,10 +41,10 @@ REGRESS = pathman_basic \ pathman_rowmarks \ pathman_runtime_nodes \ pathman_update_trigger \ - pathman_update_node \ pathman_updates \ pathman_utility_stmt \ - pathman_expressions + pathman_expressions \ + pathman_update_node EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add diff --git a/expected/pathman_update_node.out b/expected/pathman_update_node.out index e15d04f9..0867c58c 100644 --- a/expected/pathman_update_node.out +++ b/expected/pathman_update_node.out @@ -1,279 +1,370 @@ \set VERBOSITY terse SET search_path = 'public'; CREATE EXTENSION pg_pathman; -CREATE SCHEMA test_update_trigger; +CREATE SCHEMA test_update_node; SET pg_pathman.enable_partitionupdate=on; /* Partition table by RANGE (NUMERIC) */ -CREATE TABLE test_update_trigger.test_range(val NUMERIC NOT NULL, comment TEXT); -INSERT INTO test_update_trigger.test_range SELECT i, i FROM generate_series(1, 100) i; -SELECT create_range_partitions('test_update_trigger.test_range', 'val', 1, 10); +CREATE TABLE test_update_node.test_range(val NUMERIC NOT NULL, comment TEXT); +CREATE INDEX val_idx ON test_update_node.test_range (val); +INSERT INTO test_update_node.test_range SELECT i, i FROM generate_series(1, 100) i; +SELECT create_range_partitions('test_update_node.test_range', 'val', 1, 10); NOTICE: sequence "test_range_seq" does not exist, skipping create_range_partitions ------------------------- 10 (1 row) +/* Moving from 2st to 1st partition */ +EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 5 WHERE val = 15; + QUERY PLAN +------------------------------------------------------------------- + Insert on test_range_2 + -> Custom Scan (PrepareInsert) + -> Custom Scan (PartitionFilter) + -> Bitmap Heap Scan on test_range_2 + Recheck Cond: (val = '15'::numeric) + -> Bitmap Index Scan on test_range_2_val_idx + Index Cond: (val = '15'::numeric) +(7 rows) + +/* Keep same partition */ +EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 14 WHERE val = 15; + QUERY PLAN +------------------------------------------------------------------- + Insert on test_range_2 + -> Custom Scan (PrepareInsert) + -> Custom Scan (PartitionFilter) + -> Bitmap Heap Scan on test_range_2 + Recheck Cond: (val = '15'::numeric) + -> Bitmap Index Scan on test_range_2_val_idx + Index Cond: (val = '15'::numeric) +(7 rows) + +/* Scan all partitions */ +EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 14 WHERE comment='15'; + QUERY PLAN +---------------------------------------------------- + Insert on test_range + Insert on test_range + Insert on test_range_1 + Insert on test_range_2 + Insert on test_range_3 + Insert on test_range_4 + Insert on test_range_5 + Insert on test_range_6 + Insert on test_range_7 + Insert on test_range_8 + Insert on test_range_9 + Insert on test_range_10 + -> Custom Scan (PrepareInsert) + -> Custom Scan (PartitionFilter) + -> Seq Scan on test_range + Filter: (comment = '15'::text) + -> Custom Scan (PrepareInsert) + -> Custom Scan (PartitionFilter) + -> Seq Scan on test_range_1 + Filter: (comment = '15'::text) + -> Custom Scan (PrepareInsert) + -> Custom Scan (PartitionFilter) + -> Seq Scan on test_range_2 + Filter: (comment = '15'::text) + -> Custom Scan (PrepareInsert) + -> Custom Scan (PartitionFilter) + -> Seq Scan on test_range_3 + Filter: (comment = '15'::text) + -> Custom Scan (PrepareInsert) + -> Custom Scan (PartitionFilter) + -> Seq Scan on test_range_4 + Filter: (comment = '15'::text) + -> Custom Scan (PrepareInsert) + -> Custom Scan (PartitionFilter) + -> Seq Scan on test_range_5 + Filter: (comment = '15'::text) + -> Custom Scan (PrepareInsert) + -> Custom Scan (PartitionFilter) + -> Seq Scan on test_range_6 + Filter: (comment = '15'::text) + -> Custom Scan (PrepareInsert) + -> Custom Scan (PartitionFilter) + -> Seq Scan on test_range_7 + Filter: (comment = '15'::text) + -> Custom Scan (PrepareInsert) + -> Custom Scan (PartitionFilter) + -> Seq Scan on test_range_8 + Filter: (comment = '15'::text) + -> Custom Scan (PrepareInsert) + -> Custom Scan (PartitionFilter) + -> Seq Scan on test_range_9 + Filter: (comment = '15'::text) + -> Custom Scan (PrepareInsert) + -> Custom Scan (PartitionFilter) + -> Seq Scan on test_range_10 + Filter: (comment = '15'::text) +(56 rows) + /* Update values in 1st partition (rows remain there) */ -UPDATE test_update_trigger.test_range SET val = 5 WHERE val <= 10; +UPDATE test_update_node.test_range SET val = 5 WHERE val <= 10; /* Check values #1 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range +FROM test_update_node.test_range WHERE val < 10 ORDER BY comment; - tableoid | val | comment -----------------------------------+-----+--------- - test_update_trigger.test_range_1 | 5 | 1 - test_update_trigger.test_range_1 | 5 | 10 - test_update_trigger.test_range_1 | 5 | 2 - test_update_trigger.test_range_1 | 5 | 3 - test_update_trigger.test_range_1 | 5 | 4 - test_update_trigger.test_range_1 | 5 | 5 - test_update_trigger.test_range_1 | 5 | 6 - test_update_trigger.test_range_1 | 5 | 7 - test_update_trigger.test_range_1 | 5 | 8 - test_update_trigger.test_range_1 | 5 | 9 + tableoid | val | comment +-------------------------------+-----+--------- + test_update_node.test_range_1 | 5 | 1 + test_update_node.test_range_1 | 5 | 10 + test_update_node.test_range_1 | 5 | 2 + test_update_node.test_range_1 | 5 | 3 + test_update_node.test_range_1 | 5 | 4 + test_update_node.test_range_1 | 5 | 5 + test_update_node.test_range_1 | 5 | 6 + test_update_node.test_range_1 | 5 | 7 + test_update_node.test_range_1 | 5 | 8 + test_update_node.test_range_1 | 5 | 9 (10 rows) -SELECT count(*) FROM test_update_trigger.test_range; +SELECT count(*) FROM test_update_node.test_range; count ------- 100 (1 row) /* Update values in 2nd partition (rows move to 3rd partition) */ -UPDATE test_update_trigger.test_range SET val = val + 10 WHERE val > 10 AND val <= 20; +UPDATE test_update_node.test_range SET val = val + 10 WHERE val > 10 AND val <= 20; /* Check values #2 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range +FROM test_update_node.test_range WHERE val > 20 AND val <= 30 ORDER BY comment; - tableoid | val | comment -----------------------------------+-----+--------- - test_update_trigger.test_range_3 | 21 | 11 - test_update_trigger.test_range_3 | 22 | 12 - test_update_trigger.test_range_3 | 23 | 13 - test_update_trigger.test_range_3 | 24 | 14 - test_update_trigger.test_range_3 | 25 | 15 - test_update_trigger.test_range_3 | 26 | 16 - test_update_trigger.test_range_3 | 27 | 17 - test_update_trigger.test_range_3 | 28 | 18 - test_update_trigger.test_range_3 | 29 | 19 - test_update_trigger.test_range_3 | 30 | 20 - test_update_trigger.test_range_3 | 21 | 21 - test_update_trigger.test_range_3 | 22 | 22 - test_update_trigger.test_range_3 | 23 | 23 - test_update_trigger.test_range_3 | 24 | 24 - test_update_trigger.test_range_3 | 25 | 25 - test_update_trigger.test_range_3 | 26 | 26 - test_update_trigger.test_range_3 | 27 | 27 - test_update_trigger.test_range_3 | 28 | 28 - test_update_trigger.test_range_3 | 29 | 29 - test_update_trigger.test_range_3 | 30 | 30 + tableoid | val | comment +-------------------------------+-----+--------- + test_update_node.test_range_3 | 21 | 11 + test_update_node.test_range_3 | 22 | 12 + test_update_node.test_range_3 | 23 | 13 + test_update_node.test_range_3 | 24 | 14 + test_update_node.test_range_3 | 25 | 15 + test_update_node.test_range_3 | 26 | 16 + test_update_node.test_range_3 | 27 | 17 + test_update_node.test_range_3 | 28 | 18 + test_update_node.test_range_3 | 29 | 19 + test_update_node.test_range_3 | 30 | 20 + test_update_node.test_range_3 | 21 | 21 + test_update_node.test_range_3 | 22 | 22 + test_update_node.test_range_3 | 23 | 23 + test_update_node.test_range_3 | 24 | 24 + test_update_node.test_range_3 | 25 | 25 + test_update_node.test_range_3 | 26 | 26 + test_update_node.test_range_3 | 27 | 27 + test_update_node.test_range_3 | 28 | 28 + test_update_node.test_range_3 | 29 | 29 + test_update_node.test_range_3 | 30 | 30 (20 rows) -SELECT count(*) FROM test_update_trigger.test_range; +SELECT count(*) FROM test_update_node.test_range; count ------- 100 (1 row) /* Move single row */ -UPDATE test_update_trigger.test_range SET val = 90 WHERE val = 80; +UPDATE test_update_node.test_range SET val = 90 WHERE val = 80; /* Check values #3 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range +FROM test_update_node.test_range WHERE val = 90 ORDER BY comment; - tableoid | val | comment -----------------------------------+-----+--------- - test_update_trigger.test_range_9 | 90 | 80 - test_update_trigger.test_range_9 | 90 | 90 + tableoid | val | comment +-------------------------------+-----+--------- + test_update_node.test_range_9 | 90 | 80 + test_update_node.test_range_9 | 90 | 90 (2 rows) -SELECT count(*) FROM test_update_trigger.test_range; +SELECT count(*) FROM test_update_node.test_range; count ------- 100 (1 row) /* Move single row (create new partition) */ -UPDATE test_update_trigger.test_range SET val = -1 WHERE val = 50; +UPDATE test_update_node.test_range SET val = -1 WHERE val = 50; /* Check values #4 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range +FROM test_update_node.test_range WHERE val = -1 ORDER BY comment; - tableoid | val | comment ------------------------------------+-----+--------- - test_update_trigger.test_range_11 | -1 | 50 + tableoid | val | comment +--------------------------------+-----+--------- + test_update_node.test_range_11 | -1 | 50 (1 row) -SELECT count(*) FROM test_update_trigger.test_range; +SELECT count(*) FROM test_update_node.test_range; count ------- 100 (1 row) /* Update non-key column */ -UPDATE test_update_trigger.test_range SET comment = 'test!' WHERE val = 100; +UPDATE test_update_node.test_range SET comment = 'test!' WHERE val = 100; /* Check values #5 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range +FROM test_update_node.test_range WHERE val = 100 ORDER BY comment; - tableoid | val | comment ------------------------------------+-----+--------- - test_update_trigger.test_range_10 | 100 | test! + tableoid | val | comment +--------------------------------+-----+--------- + test_update_node.test_range_10 | 100 | test! (1 row) -SELECT count(*) FROM test_update_trigger.test_range; +SELECT count(*) FROM test_update_node.test_range; count ------- 100 (1 row) /* Try moving row into a gap (ERROR) */ -DROP TABLE test_update_trigger.test_range_4; -UPDATE test_update_trigger.test_range SET val = 35 WHERE val = 70; +DROP TABLE test_update_node.test_range_4; +UPDATE test_update_node.test_range SET val = 35 WHERE val = 70; ERROR: cannot spawn a partition /* Check values #6 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range +FROM test_update_node.test_range WHERE val = 70 ORDER BY comment; - tableoid | val | comment -----------------------------------+-----+--------- - test_update_trigger.test_range_7 | 70 | 70 + tableoid | val | comment +-------------------------------+-----+--------- + test_update_node.test_range_7 | 70 | 70 (1 row) -SELECT count(*) FROM test_update_trigger.test_range; +SELECT count(*) FROM test_update_node.test_range; count ------- 90 (1 row) /* Test trivial move (same key) */ -UPDATE test_update_trigger.test_range SET val = 65 WHERE val = 65; +UPDATE test_update_node.test_range SET val = 65 WHERE val = 65; /* Check values #7 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range +FROM test_update_node.test_range WHERE val = 65 ORDER BY comment; - tableoid | val | comment -----------------------------------+-----+--------- - test_update_trigger.test_range_7 | 65 | 65 + tableoid | val | comment +-------------------------------+-----+--------- + test_update_node.test_range_7 | 65 | 65 (1 row) -SELECT count(*) FROM test_update_trigger.test_range; +SELECT count(*) FROM test_update_node.test_range; count ------- 90 (1 row) /* Test tuple conversion (attached partition) */ -CREATE TABLE test_update_trigger.test_range_inv(comment TEXT, val NUMERIC NOT NULL); -SELECT attach_range_partition('test_update_trigger.test_range', - 'test_update_trigger.test_range_inv', +CREATE TABLE test_update_node.test_range_inv(comment TEXT, val NUMERIC NOT NULL); +SELECT attach_range_partition('test_update_node.test_range', + 'test_update_node.test_range_inv', 101::NUMERIC, 111::NUMERIC); attach_range_partition ------------------------------------ - test_update_trigger.test_range_inv + test_update_node.test_range_inv (1 row) -UPDATE test_update_trigger.test_range SET val = 105 WHERE val = 60; +UPDATE test_update_node.test_range SET val = 105 WHERE val = 60; +UPDATE test_update_node.test_range SET val = 105 WHERE val = 105; /* Check values #8 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range +FROM test_update_node.test_range WHERE val = 105 ORDER BY comment; tableoid | val | comment ------------------------------------+-----+--------- - test_update_trigger.test_range_inv | 105 | 60 + test_update_node.test_range_inv | 105 | 60 (1 row) -SELECT count(*) FROM test_update_trigger.test_range; +SELECT count(*) FROM test_update_node.test_range; count ------- 90 (1 row) /* Test tuple conversion (dropped column) */ -ALTER TABLE test_update_trigger.test_range DROP COLUMN comment CASCADE; -SELECT append_range_partition('test_update_trigger.test_range'); +ALTER TABLE test_update_node.test_range DROP COLUMN comment CASCADE; +SELECT append_range_partition('test_update_node.test_range'); append_range_partition ----------------------------------- - test_update_trigger.test_range_12 + test_update_node.test_range_12 (1 row) -UPDATE test_update_trigger.test_range SET val = 115 WHERE val = 55; +UPDATE test_update_node.test_range SET val = 115 WHERE val = 55; +UPDATE test_update_node.test_range SET val = 115 WHERE val = 115; /* Check values #9 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range +FROM test_update_node.test_range WHERE val = 115; tableoid | val -----------------------------------+----- - test_update_trigger.test_range_12 | 115 + test_update_node.test_range_12 | 115 (1 row) -SELECT count(*) FROM test_update_trigger.test_range; +SELECT count(*) FROM test_update_node.test_range; count ------- 90 (1 row) /* Partition table by HASH (INT4) */ -CREATE TABLE test_update_trigger.test_hash(val INT4 NOT NULL, comment TEXT); -INSERT INTO test_update_trigger.test_hash SELECT i, i FROM generate_series(1, 10) i; -SELECT create_hash_partitions('test_update_trigger.test_hash', 'val', 3); +CREATE TABLE test_update_node.test_hash(val INT4 NOT NULL, comment TEXT); +INSERT INTO test_update_node.test_hash SELECT i, i FROM generate_series(1, 10) i; +SELECT create_hash_partitions('test_update_node.test_hash', 'val', 3); create_hash_partitions ------------------------ 3 (1 row) /* Move all rows into single partition */ -UPDATE test_update_trigger.test_hash SET val = 1; +UPDATE test_update_node.test_hash SET val = 1; /* Check values #1 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_hash +FROM test_update_node.test_hash WHERE val = 1 ORDER BY comment; tableoid | val | comment ---------------------------------+-----+--------- - test_update_trigger.test_hash_2 | 1 | 1 - test_update_trigger.test_hash_2 | 1 | 10 - test_update_trigger.test_hash_2 | 1 | 2 - test_update_trigger.test_hash_2 | 1 | 3 - test_update_trigger.test_hash_2 | 1 | 4 - test_update_trigger.test_hash_2 | 1 | 5 - test_update_trigger.test_hash_2 | 1 | 6 - test_update_trigger.test_hash_2 | 1 | 7 - test_update_trigger.test_hash_2 | 1 | 8 - test_update_trigger.test_hash_2 | 1 | 9 + test_update_node.test_hash_2 | 1 | 1 + test_update_node.test_hash_2 | 1 | 10 + test_update_node.test_hash_2 | 1 | 2 + test_update_node.test_hash_2 | 1 | 3 + test_update_node.test_hash_2 | 1 | 4 + test_update_node.test_hash_2 | 1 | 5 + test_update_node.test_hash_2 | 1 | 6 + test_update_node.test_hash_2 | 1 | 7 + test_update_node.test_hash_2 | 1 | 8 + test_update_node.test_hash_2 | 1 | 9 (10 rows) -SELECT count(*) FROM test_update_trigger.test_hash; +SELECT count(*) FROM test_update_node.test_hash; count ------- 10 (1 row) /* Don't move any rows */ -UPDATE test_update_trigger.test_hash SET val = 3 WHERE val = 2; +UPDATE test_update_node.test_hash SET val = 3 WHERE val = 2; /* Check values #2 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_hash +FROM test_update_node.test_hash WHERE val = 3 ORDER BY comment; tableoid | val | comment ----------+-----+--------- (0 rows) -SELECT count(*) FROM test_update_trigger.test_hash; +SELECT count(*) FROM test_update_node.test_hash; count ------- 10 (1 row) -DROP SCHEMA test_update_trigger CASCADE; +DROP SCHEMA test_update_node CASCADE; NOTICE: drop cascades to 18 other objects DROP EXTENSION pg_pathman; diff --git a/sql/pathman_update_node.sql b/sql/pathman_update_node.sql index c3cc8d4d..75fc6c64 100644 --- a/sql/pathman_update_node.sql +++ b/sql/pathman_update_node.sql @@ -2,161 +2,172 @@ SET search_path = 'public'; CREATE EXTENSION pg_pathman; -CREATE SCHEMA test_update_trigger; +CREATE SCHEMA test_update_node; SET pg_pathman.enable_partitionupdate=on; /* Partition table by RANGE (NUMERIC) */ -CREATE TABLE test_update_trigger.test_range(val NUMERIC NOT NULL, comment TEXT); -INSERT INTO test_update_trigger.test_range SELECT i, i FROM generate_series(1, 100) i; -SELECT create_range_partitions('test_update_trigger.test_range', 'val', 1, 10); +CREATE TABLE test_update_node.test_range(val NUMERIC NOT NULL, comment TEXT); +CREATE INDEX val_idx ON test_update_node.test_range (val); +INSERT INTO test_update_node.test_range SELECT i, i FROM generate_series(1, 100) i; +SELECT create_range_partitions('test_update_node.test_range', 'val', 1, 10); +/* Moving from 2st to 1st partition */ +EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 5 WHERE val = 15; + +/* Keep same partition */ +EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 14 WHERE val = 15; + +/* Scan all partitions */ +EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 14 WHERE comment='15'; /* Update values in 1st partition (rows remain there) */ -UPDATE test_update_trigger.test_range SET val = 5 WHERE val <= 10; +UPDATE test_update_node.test_range SET val = 5 WHERE val <= 10; /* Check values #1 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range +FROM test_update_node.test_range WHERE val < 10 ORDER BY comment; -SELECT count(*) FROM test_update_trigger.test_range; +SELECT count(*) FROM test_update_node.test_range; /* Update values in 2nd partition (rows move to 3rd partition) */ -UPDATE test_update_trigger.test_range SET val = val + 10 WHERE val > 10 AND val <= 20; +UPDATE test_update_node.test_range SET val = val + 10 WHERE val > 10 AND val <= 20; /* Check values #2 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range +FROM test_update_node.test_range WHERE val > 20 AND val <= 30 ORDER BY comment; -SELECT count(*) FROM test_update_trigger.test_range; +SELECT count(*) FROM test_update_node.test_range; /* Move single row */ -UPDATE test_update_trigger.test_range SET val = 90 WHERE val = 80; +UPDATE test_update_node.test_range SET val = 90 WHERE val = 80; /* Check values #3 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range +FROM test_update_node.test_range WHERE val = 90 ORDER BY comment; -SELECT count(*) FROM test_update_trigger.test_range; +SELECT count(*) FROM test_update_node.test_range; /* Move single row (create new partition) */ -UPDATE test_update_trigger.test_range SET val = -1 WHERE val = 50; +UPDATE test_update_node.test_range SET val = -1 WHERE val = 50; /* Check values #4 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range +FROM test_update_node.test_range WHERE val = -1 ORDER BY comment; -SELECT count(*) FROM test_update_trigger.test_range; +SELECT count(*) FROM test_update_node.test_range; /* Update non-key column */ -UPDATE test_update_trigger.test_range SET comment = 'test!' WHERE val = 100; +UPDATE test_update_node.test_range SET comment = 'test!' WHERE val = 100; /* Check values #5 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range +FROM test_update_node.test_range WHERE val = 100 ORDER BY comment; -SELECT count(*) FROM test_update_trigger.test_range; +SELECT count(*) FROM test_update_node.test_range; /* Try moving row into a gap (ERROR) */ -DROP TABLE test_update_trigger.test_range_4; -UPDATE test_update_trigger.test_range SET val = 35 WHERE val = 70; +DROP TABLE test_update_node.test_range_4; +UPDATE test_update_node.test_range SET val = 35 WHERE val = 70; /* Check values #6 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range +FROM test_update_node.test_range WHERE val = 70 ORDER BY comment; -SELECT count(*) FROM test_update_trigger.test_range; +SELECT count(*) FROM test_update_node.test_range; /* Test trivial move (same key) */ -UPDATE test_update_trigger.test_range SET val = 65 WHERE val = 65; +UPDATE test_update_node.test_range SET val = 65 WHERE val = 65; /* Check values #7 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range +FROM test_update_node.test_range WHERE val = 65 ORDER BY comment; -SELECT count(*) FROM test_update_trigger.test_range; +SELECT count(*) FROM test_update_node.test_range; /* Test tuple conversion (attached partition) */ -CREATE TABLE test_update_trigger.test_range_inv(comment TEXT, val NUMERIC NOT NULL); -SELECT attach_range_partition('test_update_trigger.test_range', - 'test_update_trigger.test_range_inv', +CREATE TABLE test_update_node.test_range_inv(comment TEXT, val NUMERIC NOT NULL); +SELECT attach_range_partition('test_update_node.test_range', + 'test_update_node.test_range_inv', 101::NUMERIC, 111::NUMERIC); -UPDATE test_update_trigger.test_range SET val = 105 WHERE val = 60; +UPDATE test_update_node.test_range SET val = 105 WHERE val = 60; +UPDATE test_update_node.test_range SET val = 105 WHERE val = 105; /* Check values #8 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range +FROM test_update_node.test_range WHERE val = 105 ORDER BY comment; -SELECT count(*) FROM test_update_trigger.test_range; +SELECT count(*) FROM test_update_node.test_range; /* Test tuple conversion (dropped column) */ -ALTER TABLE test_update_trigger.test_range DROP COLUMN comment CASCADE; -SELECT append_range_partition('test_update_trigger.test_range'); -UPDATE test_update_trigger.test_range SET val = 115 WHERE val = 55; +ALTER TABLE test_update_node.test_range DROP COLUMN comment CASCADE; +SELECT append_range_partition('test_update_node.test_range'); +UPDATE test_update_node.test_range SET val = 115 WHERE val = 55; +UPDATE test_update_node.test_range SET val = 115 WHERE val = 115; /* Check values #9 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range +FROM test_update_node.test_range WHERE val = 115; -SELECT count(*) FROM test_update_trigger.test_range; +SELECT count(*) FROM test_update_node.test_range; /* Partition table by HASH (INT4) */ -CREATE TABLE test_update_trigger.test_hash(val INT4 NOT NULL, comment TEXT); -INSERT INTO test_update_trigger.test_hash SELECT i, i FROM generate_series(1, 10) i; -SELECT create_hash_partitions('test_update_trigger.test_hash', 'val', 3); +CREATE TABLE test_update_node.test_hash(val INT4 NOT NULL, comment TEXT); +INSERT INTO test_update_node.test_hash SELECT i, i FROM generate_series(1, 10) i; +SELECT create_hash_partitions('test_update_node.test_hash', 'val', 3); /* Move all rows into single partition */ -UPDATE test_update_trigger.test_hash SET val = 1; +UPDATE test_update_node.test_hash SET val = 1; /* Check values #1 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_hash +FROM test_update_node.test_hash WHERE val = 1 ORDER BY comment; -SELECT count(*) FROM test_update_trigger.test_hash; +SELECT count(*) FROM test_update_node.test_hash; /* Don't move any rows */ -UPDATE test_update_trigger.test_hash SET val = 3 WHERE val = 2; +UPDATE test_update_node.test_hash SET val = 3 WHERE val = 2; /* Check values #2 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_hash +FROM test_update_node.test_hash WHERE val = 3 ORDER BY comment; -SELECT count(*) FROM test_update_trigger.test_hash; +SELECT count(*) FROM test_update_node.test_hash; -DROP SCHEMA test_update_trigger CASCADE; +DROP SCHEMA test_update_node CASCADE; DROP EXTENSION pg_pathman; diff --git a/src/hooks.c b/src/hooks.c index e6a4887d..7e6118b7 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -65,6 +65,7 @@ planner_hook_type planner_hook_next = NULL; post_parse_analyze_hook_type post_parse_analyze_hook_next = NULL; shmem_startup_hook_type shmem_startup_hook_next = NULL; ProcessUtility_hook_type process_utility_hook_next = NULL; +ExecutorRun_hook_type executor_run_hook_next = NULL; /* Take care of joins */ @@ -856,18 +857,23 @@ pathman_executor_hook(QueryDesc *queryDesc, ScanDirection direction, { CustomScanState *subplanstate = (CustomScanState *) mt_state->mt_plans[i]; - if (IsA(subplanstate, CustomScanState)) + if (!IsA(subplanstate, CustomScanState)) + continue; + + if (strcmp(subplanstate->methods->CustomName, UPDATE_NODE_DESCRIPTION) == 0) { - if (strcmp(subplanstate->methods->CustomName, "PrepareInsert") == 0) - { - PartitionUpdateState *cstate = (PartitionUpdateState *) subplanstate; - cstate->parent_state = mt_state; - cstate->saved_junkFilter = mt_state->resultRelInfo->ri_junkFilter; - mt_state->resultRelInfo->ri_junkFilter = NULL; - } + PartitionUpdateState *cstate = (PartitionUpdateState *) subplanstate; + cstate->parent_state = mt_state; + cstate->saved_junkFilter = mt_state->resultRelInfo->ri_junkFilter; + mt_state->resultRelInfo->ri_junkFilter = NULL; } } } - standard_ExecutorRun(queryDesc, direction, count); + /* Call hooks set by other extensions if needed */ + if (executor_run_hook_next) + executor_run_hook_next(queryDesc, direction, count); + /* Else call internal implementation */ + else + standard_ExecutorRun(queryDesc, direction, count); } diff --git a/src/include/hooks.h b/src/include/hooks.h index 15fa9906..b93b4ba8 100644 --- a/src/include/hooks.h +++ b/src/include/hooks.h @@ -27,6 +27,7 @@ extern planner_hook_type planner_hook_next; extern post_parse_analyze_hook_type post_parse_analyze_hook_next; extern shmem_startup_hook_type shmem_startup_hook_next; extern ProcessUtility_hook_type process_utility_hook_next; +extern ExecutorRun_hook_type executor_run_hook_next; void pathman_join_pathlist_hook(PlannerInfo *root, diff --git a/src/include/partition_update.h b/src/include/partition_update.h index fc0c0033..ea73bfed 100644 --- a/src/include/partition_update.h +++ b/src/include/partition_update.h @@ -33,7 +33,8 @@ typedef struct PartitionUpdateState Plan *subplan; /* proxy variable to store subplan */ } PartitionUpdateState; -extern bool pg_pathman_enable_partition_update; +extern bool pg_pathman_enable_partition_update; +extern const char *UPDATE_NODE_DESCRIPTION; extern CustomScanMethods partition_update_plan_methods; extern CustomExecMethods partition_update_exec_methods; diff --git a/src/include/relation_info.h b/src/include/relation_info.h index d5e81b28..0dae9458 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -24,6 +24,7 @@ #include "storage/lock.h" #include "utils/datum.h" #include "utils/lsyscache.h" +#include "utils/relcache.h" /* Range bound */ @@ -370,6 +371,9 @@ extern bool pg_pathman_enable_bounds_cache; void init_relation_info_static_data(void); +AttrNumber * build_attributes_map(const PartRelationInfo *prel, + Relation child_rel, + int *map_length); -#endif /* RELATION_INFO_H */ +#endif /* RELATION_INFO_H */ diff --git a/src/include/utils.h b/src/include/utils.h index b54e4e0f..3e5c65a8 100644 --- a/src/include/utils.h +++ b/src/include/utils.h @@ -61,5 +61,4 @@ Datum extract_binary_interval_from_text(Datum interval_text, char ** deconstruct_text_array(Datum array, int *array_size); RangeVar ** qualified_relnames_to_rangevars(char **relnames, size_t nrelnames); - #endif /* PATHMAN_UTILS_H */ diff --git a/src/partition_filter.c b/src/partition_filter.c index c13ca974..9daf8251 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -570,20 +570,31 @@ partition_filter_begin(CustomScanState *node, EState *estate, int eflags) const PartRelationInfo *prel; ListCell *lc; PlanState *child_state; - Index expr_relid = 1; + Index expr_varno = 1; child_state = ExecInitNode(state->subplan, estate, eflags); /* It's convenient to store PlanState in 'custom_ps' */ node->custom_ps = list_make1(child_state); + if (state->command_type == CMD_UPDATE) + expr_varno = ((Scan *) child_state->plan)->scanrelid; + else { - Assert(IsA(child_state, SeqScanState)); - expr_relid = ((Scan *) ((ScanState *) child_state)->ps.plan)->scanrelid; - Assert(expr_relid >= 1); + Index varno = 1; + + foreach(lc, estate->es_range_table) + { + RangeTblEntry *entry = lfirst(lc); + if (entry->relid == state->partitioned_table) + break; + varno++; + } + + expr_varno = varno; + Assert(expr_varno <= list_length(estate->es_range_table)); } - else - expr_relid = state->partitioned_table; + if (state->expr_state == NULL) { @@ -591,23 +602,35 @@ partition_filter_begin(CustomScanState *node, EState *estate, int eflags) prel = get_pathman_relation_info(state->partitioned_table); Assert(prel != NULL); - /* Change varno in Vars according to range table */ - if (expr_relid > 1) + /* Change varno in expression Vars according to range table */ + Assert(expr_varno >= 1); + if (expr_varno > 1) { expr = copyObject(prel->expr); - foreach(lc, estate->es_range_table) - { - RangeTblEntry *entry = lfirst(lc); - if (entry->relid == expr_relid) - { - ChangeVarNodes(expr, 1, expr_relid, 0); - break; - } - } + ChangeVarNodes(expr, 1, expr_varno, 0); } else expr = prel->expr; + /* + * Also in updates we would operate with child relation, but + * expression expects varattnos like in base relation, so we map + * parent varattnos to child varattnos + */ + if (state->command_type == CMD_UPDATE) + { + int natts; + bool found_whole_row; + AttrNumber *attr_map; + Oid child_relid = getrelid(expr_varno, estate->es_range_table); + Relation child_rel = heap_open(child_relid, NoLock); + + attr_map = build_attributes_map(prel, child_rel, &natts); + expr = map_variable_attnos(expr, expr_varno, 0, attr_map, natts, + &found_whole_row); + heap_close(child_rel, NoLock); + } + /* Prepare state for expression execution */ old_cxt = MemoryContextSwitchTo(estate->es_query_cxt); state->expr_state = ExecInitExpr((Expr *) expr, NULL); @@ -704,7 +727,7 @@ partition_filter_exec(CustomScanState *node) /* * extract `ctid` junk attribute and save it in state, * we need this step because if there will be conversion - * junk attributes will be removed from slot + * then junk attributes will be removed from slot */ junkfilter = rri_holder->orig_junkFilter; Assert(junkfilter != NULL); diff --git a/src/partition_update.c b/src/partition_update.c index bafa8a68..66fdde4b 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -22,6 +22,7 @@ #include "utils/guc.h" #include "utils/rel.h" +const char *UPDATE_NODE_DESCRIPTION = "PrepareInsert"; bool pg_pathman_enable_partition_update = true; CustomScanMethods partition_update_plan_methods; @@ -36,10 +37,10 @@ static TupleTableSlot *ExecDeleteInternal(ItemPointer tupleid, void init_partition_update_static_data(void) { - partition_update_plan_methods.CustomName = "PrepareInsert"; + partition_update_plan_methods.CustomName = UPDATE_NODE_DESCRIPTION; partition_update_plan_methods.CreateCustomScanState = partition_update_create_scan_state; - partition_update_exec_methods.CustomName = "PrepareInsert"; + partition_update_exec_methods.CustomName = UPDATE_NODE_DESCRIPTION; partition_update_exec_methods.BeginCustomScan = partition_update_begin; partition_update_exec_methods.ExecCustomScan = partition_update_exec; partition_update_exec_methods.EndCustomScan = partition_update_end; @@ -278,9 +279,9 @@ ExecDeleteInternal(ItemPointer tupleid, */ ExecSetSlotDescriptor(slot, RelationGetDescr(resultRelationDesc)); resultRelInfo->ri_FdwRoutine->ExecForeignDelete(estate, - resultRelInfo, - slot, - planSlot); + resultRelInfo, + slot, + planSlot); /* we don't need slot anymore */ ExecDropSingleTupleTableSlot(slot); diff --git a/src/pg_pathman.c b/src/pg_pathman.c index a6aa2c73..2d44f55b 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -165,6 +165,7 @@ _PG_init(void) planner_hook = pathman_planner_hook; process_utility_hook_next = ProcessUtility_hook; ProcessUtility_hook = pathman_process_utility_hook; + executor_run_hook_next = ExecutorRun_hook; ExecutorRun_hook = pathman_executor_hook; /* Initialize PgPro-specific subsystems */ diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 41297f02..19d16432 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -100,9 +100,6 @@ typedef struct } show_cache_stats_cxt; -static AttrNumber *pathman_update_trigger_build_attr_map(const PartRelationInfo *prel, - Relation child_rel); - static ExprState *pathman_update_trigger_build_expr_state(const PartRelationInfo *prel, Relation source_rel, HeapTuple new_tuple, @@ -1258,48 +1255,6 @@ replace_vars_with_consts(Node *node, struct replace_vars_cxt *ctx) return expression_tree_mutator(node, replace_vars_with_consts, (void *) ctx); } -/* - * Get attributes map between parent and child relation. - * This is simplified version of functions that return TupleConversionMap. - * And it should be faster if expression uses not all fields from relation. - */ -static AttrNumber * -pathman_update_trigger_build_attr_map(const PartRelationInfo *prel, - Relation child_rel) -{ - AttrNumber i = -1; - Oid parent_relid = PrelParentRelid(prel); - TupleDesc child_descr = RelationGetDescr(child_rel); - int natts = child_descr->natts; - AttrNumber *result = (AttrNumber *) palloc0(natts * sizeof(AttrNumber)); - - while ((i = bms_next_member(prel->expr_atts, i)) >= 0) - { - int j; - AttrNumber attnum = i + FirstLowInvalidHeapAttributeNumber; - char *attname = get_attname(parent_relid, attnum); - - for (j = 0; j < natts; j++) - { - Form_pg_attribute att = child_descr->attrs[j]; - - if (att->attisdropped) - continue; /* attrMap[attnum - 1] is already 0 */ - - if (strcmp(NameStr(att->attname), attname) == 0) - { - result[attnum - 1] = (AttrNumber) (j + 1); - break; - } - } - - if (result[attnum - 1] == 0) - elog(ERROR, "Couldn't find '%s' column in child relation", attname); - } - - return result; -} - static ExprState * pathman_update_trigger_build_expr_state(const PartRelationInfo *prel, Relation source_rel, @@ -1311,7 +1266,7 @@ pathman_update_trigger_build_expr_state(const PartRelationInfo *prel, ExprState *expr_state; ctx.new_tuple = new_tuple; - ctx.attributes_map = pathman_update_trigger_build_attr_map(prel, source_rel); + ctx.attributes_map = build_attributes_map(prel, source_rel, NULL); ctx.tuple_desc = RelationGetDescr(source_rel); expr = replace_vars_with_consts(prel->expr, &ctx); diff --git a/src/relation_info.c b/src/relation_info.c index e5f25e38..d69f41ae 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -1348,3 +1348,48 @@ shout_if_prel_is_invalid(const Oid parent_oid, expected_str); } } + +/* + * Get attributes map between parent and child relation. + * This is simplified version of functions that return TupleConversionMap. + * And it should be faster if expression uses not all fields from relation. + */ +AttrNumber * +build_attributes_map(const PartRelationInfo *prel, Relation child_rel, + int *map_length) +{ + AttrNumber i = -1; + Oid parent_relid = PrelParentRelid(prel); + TupleDesc child_descr = RelationGetDescr(child_rel); + int natts = child_descr->natts; + AttrNumber *result = (AttrNumber *) palloc0(natts * sizeof(AttrNumber)); + + if (map_length != NULL) + *map_length = natts; + + while ((i = bms_next_member(prel->expr_atts, i)) >= 0) + { + int j; + AttrNumber attnum = i + FirstLowInvalidHeapAttributeNumber; + char *attname = get_attname(parent_relid, attnum); + + for (j = 0; j < natts; j++) + { + Form_pg_attribute att = child_descr->attrs[j]; + + if (att->attisdropped) + continue; /* attrMap[attnum - 1] is already 0 */ + + if (strcmp(NameStr(att->attname), attname) == 0) + { + result[attnum - 1] = (AttrNumber) (j + 1); + break; + } + } + + if (result[attnum - 1] == 0) + elog(ERROR, "Couldn't find '%s' column in child relation", attname); + } + + return result; +} diff --git a/src/utils.c b/src/utils.c index a6b05189..f1576b9a 100644 --- a/src/utils.c +++ b/src/utils.c @@ -575,3 +575,4 @@ qualified_relnames_to_rangevars(char **relnames, size_t nrelnames) return rangevars; } + From c4c530c6e13b8e62b98a050110734537fdcb0a36 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 3 May 2017 17:29:39 +0300 Subject: [PATCH 018/528] Fix tests --- Makefile | 2 +- expected/pathman_expressions.out | 3 +++ expected/pathman_update_node.out | 34 ++++++++++++++++---------------- sql/pathman_expressions.sql | 3 +++ src/include/partition_update.h | 3 ++- src/partition_update.c | 3 +-- 6 files changed, 27 insertions(+), 21 deletions(-) diff --git a/Makefile b/Makefile index bb4ff894..eb4bda02 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ OBJS = src/init.o src/relation_info.o src/utils.o src/partition_filter.o \ src/hooks.o src/nodes_common.o src/xact_handling.o src/utility_stmt_hooking.o \ src/planner_tree_modification.o src/debug_print.o src/partition_creation.o \ src/compat/pg_compat.o src/compat/relation_tags.o src/compat/expand_rte_hook.o \ - src/compat/rowmarks_fix.o $(WIN32RES) + src/compat/rowmarks_fix.o src/partition_update.o $(WIN32RES) PG_CPPFLAGS = -I$(CURDIR)/src/include diff --git a/expected/pathman_expressions.out b/expected/pathman_expressions.out index b462bf20..ee56306c 100644 --- a/expected/pathman_expressions.out +++ b/expected/pathman_expressions.out @@ -152,3 +152,6 @@ SELECT COUNT(*) FROM test.range_rel_2; 24 (1 row) +DROP SCHEMA test CASCADE; +NOTICE: drop cascades to 17 other objects +DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_update_node.out b/expected/pathman_update_node.out index 0867c58c..2cd7688a 100644 --- a/expected/pathman_update_node.out +++ b/expected/pathman_update_node.out @@ -109,7 +109,7 @@ SELECT tableoid::REGCLASS, * FROM test_update_node.test_range WHERE val < 10 ORDER BY comment; - tableoid | val | comment + tableoid | val | comment -------------------------------+-----+--------- test_update_node.test_range_1 | 5 | 1 test_update_node.test_range_1 | 5 | 10 @@ -136,7 +136,7 @@ SELECT tableoid::REGCLASS, * FROM test_update_node.test_range WHERE val > 20 AND val <= 30 ORDER BY comment; - tableoid | val | comment + tableoid | val | comment -------------------------------+-----+--------- test_update_node.test_range_3 | 21 | 11 test_update_node.test_range_3 | 22 | 12 @@ -173,7 +173,7 @@ SELECT tableoid::REGCLASS, * FROM test_update_node.test_range WHERE val = 90 ORDER BY comment; - tableoid | val | comment + tableoid | val | comment -------------------------------+-----+--------- test_update_node.test_range_9 | 90 | 80 test_update_node.test_range_9 | 90 | 90 @@ -192,7 +192,7 @@ SELECT tableoid::REGCLASS, * FROM test_update_node.test_range WHERE val = -1 ORDER BY comment; - tableoid | val | comment + tableoid | val | comment --------------------------------+-----+--------- test_update_node.test_range_11 | -1 | 50 (1 row) @@ -210,7 +210,7 @@ SELECT tableoid::REGCLASS, * FROM test_update_node.test_range WHERE val = 100 ORDER BY comment; - tableoid | val | comment + tableoid | val | comment --------------------------------+-----+--------- test_update_node.test_range_10 | 100 | test! (1 row) @@ -230,7 +230,7 @@ SELECT tableoid::REGCLASS, * FROM test_update_node.test_range WHERE val = 70 ORDER BY comment; - tableoid | val | comment + tableoid | val | comment -------------------------------+-----+--------- test_update_node.test_range_7 | 70 | 70 (1 row) @@ -248,7 +248,7 @@ SELECT tableoid::REGCLASS, * FROM test_update_node.test_range WHERE val = 65 ORDER BY comment; - tableoid | val | comment + tableoid | val | comment -------------------------------+-----+--------- test_update_node.test_range_7 | 65 | 65 (1 row) @@ -264,8 +264,8 @@ CREATE TABLE test_update_node.test_range_inv(comment TEXT, val NUMERIC NOT NULL) SELECT attach_range_partition('test_update_node.test_range', 'test_update_node.test_range_inv', 101::NUMERIC, 111::NUMERIC); - attach_range_partition ------------------------------------- + attach_range_partition +--------------------------------- test_update_node.test_range_inv (1 row) @@ -276,8 +276,8 @@ SELECT tableoid::REGCLASS, * FROM test_update_node.test_range WHERE val = 105 ORDER BY comment; - tableoid | val | comment -------------------------------------+-----+--------- + tableoid | val | comment +---------------------------------+-----+--------- test_update_node.test_range_inv | 105 | 60 (1 row) @@ -290,8 +290,8 @@ SELECT count(*) FROM test_update_node.test_range; /* Test tuple conversion (dropped column) */ ALTER TABLE test_update_node.test_range DROP COLUMN comment CASCADE; SELECT append_range_partition('test_update_node.test_range'); - append_range_partition ------------------------------------ + append_range_partition +-------------------------------- test_update_node.test_range_12 (1 row) @@ -301,8 +301,8 @@ UPDATE test_update_node.test_range SET val = 115 WHERE val = 115; SELECT tableoid::REGCLASS, * FROM test_update_node.test_range WHERE val = 115; - tableoid | val ------------------------------------+----- + tableoid | val +--------------------------------+----- test_update_node.test_range_12 | 115 (1 row) @@ -328,8 +328,8 @@ SELECT tableoid::REGCLASS, * FROM test_update_node.test_hash WHERE val = 1 ORDER BY comment; - tableoid | val | comment ----------------------------------+-----+--------- + tableoid | val | comment +------------------------------+-----+--------- test_update_node.test_hash_2 | 1 | 1 test_update_node.test_hash_2 | 1 | 10 test_update_node.test_hash_2 | 1 | 2 diff --git a/sql/pathman_expressions.sql b/sql/pathman_expressions.sql index bc24e30f..c543548b 100644 --- a/sql/pathman_expressions.sql +++ b/sql/pathman_expressions.sql @@ -51,3 +51,6 @@ UPDATE test.range_rel SET dt = '2016-12-01' WHERE dt >= '2015-10-10' AND dt <= ' SELECT COUNT(*) FROM test.range_rel; SELECT COUNT(*) FROM test.range_rel_1; SELECT COUNT(*) FROM test.range_rel_2; + +DROP SCHEMA test CASCADE; +DROP EXTENSION pg_pathman CASCADE; diff --git a/src/include/partition_update.h b/src/include/partition_update.h index ea73bfed..7aed09d1 100644 --- a/src/include/partition_update.h +++ b/src/include/partition_update.h @@ -22,6 +22,8 @@ #include "nodes/extensible.h" #endif +#define UPDATE_NODE_DESCRIPTION ("PrepareInsert") + typedef struct PartitionUpdateState { CustomScanState css; @@ -34,7 +36,6 @@ typedef struct PartitionUpdateState } PartitionUpdateState; extern bool pg_pathman_enable_partition_update; -extern const char *UPDATE_NODE_DESCRIPTION; extern CustomScanMethods partition_update_plan_methods; extern CustomExecMethods partition_update_exec_methods; diff --git a/src/partition_update.c b/src/partition_update.c index 66fdde4b..9d122d04 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -22,8 +22,7 @@ #include "utils/guc.h" #include "utils/rel.h" -const char *UPDATE_NODE_DESCRIPTION = "PrepareInsert"; -bool pg_pathman_enable_partition_update = true; +bool pg_pathman_enable_partition_update = true; CustomScanMethods partition_update_plan_methods; CustomExecMethods partition_update_exec_methods; From bf4fe65da2706111aefd1f041e5ff5379b1e182d Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 3 May 2017 18:59:14 +0300 Subject: [PATCH 019/528] Fix updates, add returning tests --- expected/pathman_update_node.out | 41 ++++++++++++++++++++++++++++++++ sql/pathman_update_node.sql | 11 +++++++++ src/hooks.c | 18 ++++++++++---- src/include/partition_update.h | 2 +- src/partition_update.c | 4 ++-- 5 files changed, 69 insertions(+), 7 deletions(-) diff --git a/expected/pathman_update_node.out b/expected/pathman_update_node.out index 2cd7688a..7fd01a0e 100644 --- a/expected/pathman_update_node.out +++ b/expected/pathman_update_node.out @@ -281,6 +281,46 @@ ORDER BY comment; test_update_node.test_range_inv | 105 | 60 (1 row) +UPDATE test_update_node.test_range SET val = 60 WHERE val = 105; +SELECT count(*) FROM test_update_node.test_range; + count +------- + 90 +(1 row) + +/* Test RETURNING */ +UPDATE test_update_node.test_range SET val = 71 WHERE val = 41 RETURNING val, comment; + val | comment +-----+--------- + 71 | 41 +(1 row) + +UPDATE test_update_node.test_range SET val = 71 WHERE val = 71 RETURNING val, comment; + val | comment +-----+--------- + 71 | 71 + 71 | 41 +(2 rows) + +UPDATE test_update_node.test_range SET val = 106 WHERE val = 61 RETURNING val, comment; + val | comment +-----+--------- + 106 | 61 +(1 row) + +UPDATE test_update_node.test_range SET val = 106 WHERE val = 106 RETURNING val, comment; + val | comment +-----+--------- + 106 | 61 +(1 row) + +UPDATE test_update_node.test_range SET val = 61 WHERE val = 106 RETURNING val, comment; + val | comment +-----+--------- + 61 | 61 +(1 row) + +/* Just in case, check we don't duplicate anything */ SELECT count(*) FROM test_update_node.test_range; count ------- @@ -306,6 +346,7 @@ WHERE val = 115; test_update_node.test_range_12 | 115 (1 row) +UPDATE test_update_node.test_range SET val = 55 WHERE val = 115; SELECT count(*) FROM test_update_node.test_range; count ------- diff --git a/sql/pathman_update_node.sql b/sql/pathman_update_node.sql index 75fc6c64..5ba660fe 100644 --- a/sql/pathman_update_node.sql +++ b/sql/pathman_update_node.sql @@ -120,8 +120,18 @@ FROM test_update_node.test_range WHERE val = 105 ORDER BY comment; +UPDATE test_update_node.test_range SET val = 60 WHERE val = 105; SELECT count(*) FROM test_update_node.test_range; +/* Test RETURNING */ +UPDATE test_update_node.test_range SET val = 71 WHERE val = 41 RETURNING val, comment; +UPDATE test_update_node.test_range SET val = 71 WHERE val = 71 RETURNING val, comment; +UPDATE test_update_node.test_range SET val = 106 WHERE val = 61 RETURNING val, comment; +UPDATE test_update_node.test_range SET val = 106 WHERE val = 106 RETURNING val, comment; +UPDATE test_update_node.test_range SET val = 61 WHERE val = 106 RETURNING val, comment; + +/* Just in case, check we don't duplicate anything */ +SELECT count(*) FROM test_update_node.test_range; /* Test tuple conversion (dropped column) */ ALTER TABLE test_update_node.test_range DROP COLUMN comment CASCADE; @@ -134,6 +144,7 @@ SELECT tableoid::REGCLASS, * FROM test_update_node.test_range WHERE val = 115; +UPDATE test_update_node.test_range SET val = 55 WHERE val = 115; SELECT count(*) FROM test_update_node.test_range; diff --git a/src/hooks.c b/src/hooks.c index 7e6118b7..3fdb3abb 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -862,10 +862,20 @@ pathman_executor_hook(QueryDesc *queryDesc, ScanDirection direction, if (strcmp(subplanstate->methods->CustomName, UPDATE_NODE_DESCRIPTION) == 0) { - PartitionUpdateState *cstate = (PartitionUpdateState *) subplanstate; - cstate->parent_state = mt_state; - cstate->saved_junkFilter = mt_state->resultRelInfo->ri_junkFilter; - mt_state->resultRelInfo->ri_junkFilter = NULL; + PartitionUpdateState *cstate = (PartitionUpdateState *) subplanstate; + + /* Save parent resultRelInfo in PartitionUpdate node */ + cstate->resultRelInfo = mt_state->resultRelInfo + i; + + /* + * We unset junkfilter to disable junk cleaning in + * ExecModifyTable. We don't need junk cleaning because + * there is possible modification of tuple in `partition_filter_exec` + * Same time we need this junkfilter in PartitionFilter + * nodes, so we save it in node. + */ + cstate->saved_junkFilter = cstate->resultRelInfo->ri_junkFilter; + cstate->resultRelInfo->ri_junkFilter = NULL; } } } diff --git a/src/include/partition_update.h b/src/include/partition_update.h index 7aed09d1..84668587 100644 --- a/src/include/partition_update.h +++ b/src/include/partition_update.h @@ -30,7 +30,7 @@ typedef struct PartitionUpdateState Oid partitioned_table; List *returning_list; - ModifyTableState *parent_state; + ResultRelInfo *resultRelInfo; JunkFilter *saved_junkFilter; Plan *subplan; /* proxy variable to store subplan */ } PartitionUpdateState; diff --git a/src/partition_update.c b/src/partition_update.c index 9d122d04..aaaa4555 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -128,10 +128,10 @@ partition_update_exec(CustomScanState *node) * Restore junkfilter in base resultRelInfo, * we do it because child's RelResultInfo expects its existence * for proper initialization. - * Alsowe change junk attribute number in JunkFilter, because + * Also we set jf_junkAttNo there, because * it wasn't set in ModifyTable node initialization */ - state->parent_state->resultRelInfo->ri_junkFilter = state->saved_junkFilter; + state->resultRelInfo->ri_junkFilter = state->saved_junkFilter; /* execute PartitionFilter child node */ slot = ExecProcNode(child_ps); From 2184b916f8bfc7a23d2ed6f4f32794474758ccfe Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 4 May 2017 13:52:44 +0300 Subject: [PATCH 020/528] Fix compability with 9.5 --- expected/pathman_update_node.out | 62 ---------------------- sql/pathman_update_node.sql | 3 -- src/hooks.c | 2 +- src/include/hooks.h | 8 ++- tests/python/partitioning_test.py | 87 +++++++++++++++++++++++++++++-- 5 files changed, 90 insertions(+), 72 deletions(-) diff --git a/expected/pathman_update_node.out b/expected/pathman_update_node.out index 7fd01a0e..f4312b2c 100644 --- a/expected/pathman_update_node.out +++ b/expected/pathman_update_node.out @@ -40,68 +40,6 @@ EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 14 WHERE val = Index Cond: (val = '15'::numeric) (7 rows) -/* Scan all partitions */ -EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 14 WHERE comment='15'; - QUERY PLAN ----------------------------------------------------- - Insert on test_range - Insert on test_range - Insert on test_range_1 - Insert on test_range_2 - Insert on test_range_3 - Insert on test_range_4 - Insert on test_range_5 - Insert on test_range_6 - Insert on test_range_7 - Insert on test_range_8 - Insert on test_range_9 - Insert on test_range_10 - -> Custom Scan (PrepareInsert) - -> Custom Scan (PartitionFilter) - -> Seq Scan on test_range - Filter: (comment = '15'::text) - -> Custom Scan (PrepareInsert) - -> Custom Scan (PartitionFilter) - -> Seq Scan on test_range_1 - Filter: (comment = '15'::text) - -> Custom Scan (PrepareInsert) - -> Custom Scan (PartitionFilter) - -> Seq Scan on test_range_2 - Filter: (comment = '15'::text) - -> Custom Scan (PrepareInsert) - -> Custom Scan (PartitionFilter) - -> Seq Scan on test_range_3 - Filter: (comment = '15'::text) - -> Custom Scan (PrepareInsert) - -> Custom Scan (PartitionFilter) - -> Seq Scan on test_range_4 - Filter: (comment = '15'::text) - -> Custom Scan (PrepareInsert) - -> Custom Scan (PartitionFilter) - -> Seq Scan on test_range_5 - Filter: (comment = '15'::text) - -> Custom Scan (PrepareInsert) - -> Custom Scan (PartitionFilter) - -> Seq Scan on test_range_6 - Filter: (comment = '15'::text) - -> Custom Scan (PrepareInsert) - -> Custom Scan (PartitionFilter) - -> Seq Scan on test_range_7 - Filter: (comment = '15'::text) - -> Custom Scan (PrepareInsert) - -> Custom Scan (PartitionFilter) - -> Seq Scan on test_range_8 - Filter: (comment = '15'::text) - -> Custom Scan (PrepareInsert) - -> Custom Scan (PartitionFilter) - -> Seq Scan on test_range_9 - Filter: (comment = '15'::text) - -> Custom Scan (PrepareInsert) - -> Custom Scan (PartitionFilter) - -> Seq Scan on test_range_10 - Filter: (comment = '15'::text) -(56 rows) - /* Update values in 1st partition (rows remain there) */ UPDATE test_update_node.test_range SET val = 5 WHERE val <= 10; /* Check values #1 */ diff --git a/sql/pathman_update_node.sql b/sql/pathman_update_node.sql index 5ba660fe..754dffc2 100644 --- a/sql/pathman_update_node.sql +++ b/sql/pathman_update_node.sql @@ -18,9 +18,6 @@ EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 5 WHERE val = 1 /* Keep same partition */ EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 14 WHERE val = 15; -/* Scan all partitions */ -EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 14 WHERE comment='15'; - /* Update values in 1st partition (rows remain there) */ UPDATE test_update_node.test_range SET val = 5 WHERE val <= 10; diff --git a/src/hooks.c b/src/hooks.c index 3fdb3abb..8fb7c954 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -844,7 +844,7 @@ pathman_process_utility_hook(Node *parsetree, void pathman_executor_hook(QueryDesc *queryDesc, ScanDirection direction, - uint64 count) + ExecutorRun_CountArgType count) { PlanState *state = (PlanState *) queryDesc->planstate; diff --git a/src/include/hooks.h b/src/include/hooks.h index b93b4ba8..fec0a8c0 100644 --- a/src/include/hooks.h +++ b/src/include/hooks.h @@ -62,7 +62,13 @@ void pathman_process_utility_hook(Node *parsetree, DestReceiver *dest, char *completionTag); +#if PG_VERSION_NUM >= 90600 +typedef uint64 ExecutorRun_CountArgType; +#else +typedef long ExecutorRun_CountArgType; +#endif + void pathman_executor_hook(QueryDesc *queryDesc, ScanDirection direction, - uint64 count); + ExecutorRun_CountArgType count); #endif /* PATHMAN_HOOKS_H */ diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index cda00c62..41b8d5c4 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -15,16 +15,18 @@ import re import subprocess import threading +import json from testgres import get_new_node, stop_all # Helper function for json equality -def ordered(obj): +def ordered(obj, skip_keys=None): if isinstance(obj, dict): - return sorted((k, ordered(v)) for k, v in obj.items()) + return sorted((k, ordered(v, skip_keys=skip_keys)) for k, v in obj.items() + if skip_keys is None or (skip_keys and k not in skip_keys)) if isinstance(obj, list): - return sorted(ordered(x) for x in obj) + return sorted(ordered(x, skip_keys=skip_keys) for x in obj) else: return obj @@ -470,8 +472,6 @@ def test_foreign_table(self): def test_parallel_nodes(self): """Test parallel queries under partitions""" - import json - # Init and start postgres instance with preload pg_pathman module node = get_new_node('test') node.init() @@ -990,6 +990,83 @@ def test_concurrent_detach(self): node.cleanup() FNULL.close() + def test_update_node_plan1(self): + ''' Test scan on all partititions when using update node. + We can't use regression tests here because 9.5 and 9.5 give + different plans + ''' + + node = get_new_node('test_update_node') + node.init() + node.append_conf( + 'postgresql.conf', + """ + shared_preload_libraries=\'pg_pathman\' + pg_pathman.override_copy=false + """) + node.start() + + # Prepare test database + node.psql('postgres', 'CREATE EXTENSION pg_pathman;') + node.psql('postgres', 'CREATE SCHEMA test_update_node;') + node.psql('postgres', 'CREATE TABLE test_update_node.test_range(val NUMERIC NOT NULL, comment TEXT)') + node.psql('postgres', 'INSERT INTO test_update_node.test_range SELECT i, i FROM generate_series(1, 100) i;') + node.psql('postgres', "SELECT create_range_partitions('test_update_node.test_range', 'val', 1, 10);") + + node.psql('postgres', """ + create or replace function query_plan(query text) returns jsonb as $$ + declare + plan jsonb; + begin + execute 'explain (costs off, format json)' || query into plan; + return plan; + end; + $$ language plpgsql; + """) + + with node.connect() as con: + con.execute("SET pg_pathman.enable_partitionupdate=on") + + test_query = "UPDATE test_update_node.test_range SET val = 14 WHERE comment=''15''" + plan = con.execute('SELECT query_plan(\'%s\')' % test_query)[0][0] + plan = plan[0]["Plan"] + + self.assertEqual(plan["Node Type"], "ModifyTable") + self.assertEqual(plan["Operation"], "Insert") + self.assertEqual(plan["Relation Name"], "test_range") + self.assertEqual(len(plan["Target Tables"]), 11) + + expected_format = ''' + { + "Plans": [ + { + "Plans": [ + { + "Filter": "(comment = '15'::text)", + "Node Type": "Seq Scan", + "Relation Name": "test_range%s", + "Parent Relationship": "child" + } + ], + "Node Type": "Custom Scan", + "Parent Relationship": "child", + "Custom Plan Provider": "PartitionFilter" + } + ], + "Node Type": "Custom Scan", + "Parent Relationship": "Member", + "Custom Plan Provider": "PrepareInsert" + } + ''' + for i, f in enumerate([''] + list(map(str, range(1, 10)))): + num = '_' + f if f else '' + expected = json.loads(expected_format % num) + p = ordered(plan["Plans"][i], skip_keys=['Parallel Aware', 'Alias']) + self.assertEqual(p, ordered(expected)) + + node.stop() + node.cleanup() + if __name__ == "__main__": unittest.main() From a1f8149a73e5221fb88a05739fd4dc78981a8478 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 4 May 2017 13:56:41 +0300 Subject: [PATCH 021/528] Add proper cleaning in update node test --- tests/python/partitioning_test.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 41b8d5c4..26e77037 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -1067,6 +1067,9 @@ def test_update_node_plan1(self): node.stop() node.cleanup() + node.psql('postgres', 'DROP SCHEMA test_update_node CASCADE;') + node.psql('postgres', 'DROP EXTENSION pg_pathman CASCADE;') + if __name__ == "__main__": unittest.main() From 0a3c84a3cc18b930c918c60d817b2eb1debaab28 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Thu, 4 May 2017 13:57:41 +0300 Subject: [PATCH 022/528] multilevel partitioning --- src/include/partition_filter.h | 16 ++++-- src/partition_filter.c | 90 ++++++++++++++++++++++++++++------ src/pg_pathman.c | 18 ++++++- src/pl_funcs.c | 6 +-- src/utility_stmt_hooking.c | 24 ++++----- 5 files changed, 117 insertions(+), 37 deletions(-) diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index cccacf2f..9b0d8391 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -40,6 +40,8 @@ typedef struct Oid partid; /* partition's relid */ ResultRelInfo *result_rel_info; /* cached ResultRelInfo */ TupleConversionMap *tuple_map; /* tuple conversion map (parent => child) */ + bool has_subpartitions; + ExprState *expr_state; /* if has_subpartitions true */ } ResultRelInfoHolder; @@ -133,11 +135,15 @@ Oid * find_partitions_for_value(Datum value, Oid value_type, const PartRelationInfo *prel, int *nparts); -ResultRelInfoHolder * select_partition_for_insert(Datum value, Oid value_type, - const PartRelationInfo *prel, - ResultPartsStorage *parts_storage, - EState *estate); - +// ResultRelInfoHolder * select_partition_for_insert(Datum value, Oid value_type, +// const PartRelationInfo *prel, +// ResultPartsStorage *parts_storage, +// EState *estate); +ResultRelInfoHolder * +select_partition_for_insert(ExprContext *econtext, ExprState *expr_state, + const PartRelationInfo *prel, + ResultPartsStorage *parts_storage, + EState *estate); Plan * make_partition_filter(Plan *subplan, Oid parent_relid, diff --git a/src/partition_filter.c b/src/partition_filter.c index ac0e5528..93fad0ae 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -319,6 +319,11 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) /* Generate tuple transformation map and some other stuff */ rri_holder->tuple_map = build_part_tuple_map(parent_rel, child_rel); + /* Are there subpartitions? */ + rri_holder->has_subpartitions = + (get_pathman_relation_info(partid) != NULL); + rri_holder->expr_state = NULL; + /* Call on_new_rri_holder_callback() if needed */ if (parts_storage->on_new_rri_holder_callback) parts_storage->on_new_rri_holder_callback(parts_storage->estate, @@ -412,7 +417,7 @@ find_partitions_for_value(Datum value, Oid value_type, * Smart wrapper for scan_result_parts_storage(). */ ResultRelInfoHolder * -select_partition_for_insert(Datum value, Oid value_type, +select_partition_for_insert(ExprContext *econtext, ExprState *expr_state, const PartRelationInfo *prel, ResultPartsStorage *parts_storage, EState *estate) @@ -422,9 +427,23 @@ select_partition_for_insert(Datum value, Oid value_type, Oid selected_partid = InvalidOid; Oid *parts; int nparts; + TupleTableSlot *tmp_slot; + // const PartRelationInfo *subprel; + bool isnull; + ExprDoneCond itemIsDone; + Datum value; + + /* Execute expression */ + value = ExecEvalExpr(expr_state, econtext, &isnull, &itemIsDone); + + if (isnull) + elog(ERROR, ERR_PART_ATTR_NULL); + + if (itemIsDone != ExprSingleResult) + elog(ERROR, ERR_PART_ATTR_MULTIPLE_RESULTS); /* Search for matching partitions */ - parts = find_partitions_for_value(value, value_type, prel, &nparts); + parts = find_partitions_for_value(value, prel->atttype, prel, &nparts); if (nparts > 1) elog(ERROR, ERR_PART_ATTR_MULTIPLE); @@ -438,9 +457,57 @@ select_partition_for_insert(Datum value, Oid value_type, } else selected_partid = parts[0]; + // subprel = get_pathman_relation_info(state->partitioned_table)) /* Replace parent table with a suitable partition */ old_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); rri_holder = scan_result_parts_storage(selected_partid, parts_storage); + + /* If partition has subpartitions */ + if (rri_holder->has_subpartitions) + { + const PartRelationInfo *subprel; + + /* Fetch PartRelationInfo for this partitioned relation */ + subprel = get_pathman_relation_info(selected_partid); + Assert(subprel != NULL); + + /* Build an expression state if not yet */ + if (!rri_holder->expr_state) + { + MemoryContext tmp_mcxt; + Node *expr; + Index varno = 1; + ListCell *lc; + + /* Change varno in Vars according to range table */ + expr = copyObject(subprel->expr); + foreach(lc, estate->es_range_table) + { + RangeTblEntry *entry = lfirst(lc); + if (entry->relid == selected_partid) + { + if (varno > 1) + ChangeVarNodes(expr, 1, varno, 0); + break; + } + varno += 1; + } + + /* Prepare state for expression execution */ + tmp_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); + rri_holder->expr_state = ExecInitExpr((Expr *) expr, NULL); + MemoryContextSwitchTo(tmp_mcxt); + } + + Assert(rri_holder->expr_state != NULL); + + /* Dive in */ + rri_holder = select_partition_for_insert(econtext, rri_holder->expr_state, + subprel, + parts_storage, + estate); + } + MemoryContextSwitchTo(old_mcxt); /* Could not find suitable partition */ @@ -598,9 +665,9 @@ partition_filter_exec(CustomScanState *node) MemoryContext old_mcxt; const PartRelationInfo *prel; ResultRelInfoHolder *rri_holder; - bool isnull; - Datum value; - ExprDoneCond itemIsDone; + // bool isnull; + // Datum value; + // ExprDoneCond itemIsDone; TupleTableSlot *tmp_slot; /* Fetch PartRelationInfo for this partitioned relation */ @@ -618,22 +685,15 @@ partition_filter_exec(CustomScanState *node) /* Switch to per-tuple context */ old_mcxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); - /* Execute expression */ tmp_slot = econtext->ecxt_scantuple; econtext->ecxt_scantuple = slot; - value = ExecEvalExpr(state->expr_state, econtext, &isnull, &itemIsDone); - econtext->ecxt_scantuple = tmp_slot; - - if (isnull) - elog(ERROR, ERR_PART_ATTR_NULL); - - if (itemIsDone != ExprSingleResult) - elog(ERROR, ERR_PART_ATTR_MULTIPLE_RESULTS); /* Search for a matching partition */ - rri_holder = select_partition_for_insert(value, prel->atttype, prel, + rri_holder = select_partition_for_insert(econtext, state->expr_state, prel, &state->result_parts, estate); + econtext->ecxt_scantuple = tmp_slot; + /* Switch back and clean up per-tuple context */ MemoryContextSwitchTo(old_mcxt); ResetExprContext(econtext); diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 9455b856..7e80aa92 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -249,7 +249,9 @@ append_child_relation(PlannerInfo *root, Relation parent_relation, child_rte = copyObject(parent_rte); child_rte->relid = child_oid; child_rte->relkind = child_relation->rd_rel->relkind; - child_rte->inh = false; /* relation has no children */ + // child_rte->inh = false; /* relation has no children */ + child_rte->inh = (child_oid != parent_rte->relid) ? + child_relation->rd_rel->relhassubclass : false; child_rte->requiredPerms = 0; /* perform all checks on parent */ /* Add 'child_rte' to rtable and 'root->simple_rte_array' */ @@ -391,6 +393,17 @@ append_child_relation(PlannerInfo *root, Relation parent_relation, parent_rowmark->isParent = true; } + /* + * TODO: new!!! + */ + if (child_rte->inh) + { + pathman_rel_pathlist_hook(root, + child_rel, + childRTindex, + child_rte); + } + return childRTindex; } @@ -1659,7 +1672,8 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, set_foreign_pathlist(root, childrel, childRTE); } - else + /* TODO: temporary!!! */ + else if(!childRTE->inh || childrel->pathlist == NIL) { /* childrel->rows should be >= 1 */ set_plain_rel_size(root, childrel, childRTE); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 41297f02..ef06c581 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -612,9 +612,9 @@ is_tuple_convertible(PG_FUNCTION_ARGS) void *map; /* we don't actually need it */ /* Try to build a conversion map */ - map = convert_tuples_by_name_map(RelationGetDescr(rel1), - RelationGetDescr(rel2), - ERR_PART_DESC_CONVERT); + map = convert_tuples_by_name(RelationGetDescr(rel1), + RelationGetDescr(rel2), + ERR_PART_DESC_CONVERT); /* Now free map */ pfree(map); } diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 64d563db..97a33574 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -597,11 +597,11 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, { TupleTableSlot *slot, *tmp_slot; - ExprDoneCond itemIsDone; - bool skip_tuple, - isnull; + // ExprDoneCond itemIsDone; + bool skip_tuple; + // isnull; Oid tuple_oid = InvalidOid; - Datum value; + // Datum value; const PartRelationInfo *prel; ResultRelInfoHolder *rri_holder; @@ -641,19 +641,19 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, /* Execute expression */ tmp_slot = econtext->ecxt_scantuple; econtext->ecxt_scantuple = slot; - value = ExecEvalExpr(expr_state, econtext, &isnull, &itemIsDone); - econtext->ecxt_scantuple = tmp_slot; + // value = ExecEvalExpr(expr_state, econtext, &isnull, &itemIsDone); - if (isnull) - elog(ERROR, ERR_PART_ATTR_NULL); + // if (isnull) + // elog(ERROR, ERR_PART_ATTR_NULL); - if (itemIsDone != ExprSingleResult) - elog(ERROR, ERR_PART_ATTR_MULTIPLE_RESULTS); + // if (itemIsDone != ExprSingleResult) + // elog(ERROR, ERR_PART_ATTR_MULTIPLE_RESULTS); /* Search for a matching partition */ - rri_holder = select_partition_for_insert(value, - prel->atttype, prel, + // rri_holder = select_partition_for_insert(value, + rri_holder = select_partition_for_insert(econtext, expr_state, prel, &parts_storage, estate); + econtext->ecxt_scantuple = tmp_slot; child_result_rel = rri_holder->result_rel_info; estate->es_result_relation_info = child_result_rel; From 29619a500e87a3f8a3f135bdb2f895f1c53ccb67 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Thu, 4 May 2017 14:49:57 +0300 Subject: [PATCH 023/528] clean up --- src/include/compat/debug_compat_features.h | 2 - src/include/partition_filter.h | 13 ++---- src/partition_creation.c | 1 - src/partition_filter.c | 6 --- src/pg_pathman.c | 53 +++++++++++++--------- src/utility_stmt_hooking.c | 11 ----- 6 files changed, 36 insertions(+), 50 deletions(-) diff --git a/src/include/compat/debug_compat_features.h b/src/include/compat/debug_compat_features.h index c668d4ce..8968b572 100644 --- a/src/include/compat/debug_compat_features.h +++ b/src/include/compat/debug_compat_features.h @@ -12,8 +12,6 @@ #define ENABLE_PGPRO_PATCHES /* PgPro exclusive features */ -//#define ENABLE_EXPAND_RTE_HOOK -//#define ENABLE_RELATION_TAGS #define ENABLE_PATHMAN_AWARE_COPY_WIN32 /* Hacks for vanilla */ diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index 9b0d8391..e053d2a5 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -135,15 +135,10 @@ Oid * find_partitions_for_value(Datum value, Oid value_type, const PartRelationInfo *prel, int *nparts); -// ResultRelInfoHolder * select_partition_for_insert(Datum value, Oid value_type, -// const PartRelationInfo *prel, -// ResultPartsStorage *parts_storage, -// EState *estate); -ResultRelInfoHolder * -select_partition_for_insert(ExprContext *econtext, ExprState *expr_state, - const PartRelationInfo *prel, - ResultPartsStorage *parts_storage, - EState *estate); +ResultRelInfoHolder *select_partition_for_insert(ExprContext *econtext, ExprState *expr_state, + const PartRelationInfo *prel, + ResultPartsStorage *parts_storage, + EState *estate); Plan * make_partition_filter(Plan *subplan, Oid parent_relid, diff --git a/src/partition_creation.c b/src/partition_creation.c index 68d431b7..4bb54b2e 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1300,7 +1300,6 @@ build_raw_hash_check_tree(Node *raw_expression, A_Expr *eq_oper = makeNode(A_Expr); FuncCall *part_idx_call = makeNode(FuncCall), *hash_call = makeNode(FuncCall); - //ColumnRef *hashed_column = makeNode(ColumnRef); A_Const *part_idx_c = makeNode(A_Const), *part_count_c = makeNode(A_Const); diff --git a/src/partition_filter.c b/src/partition_filter.c index 93fad0ae..47ad1e88 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -427,8 +427,6 @@ select_partition_for_insert(ExprContext *econtext, ExprState *expr_state, Oid selected_partid = InvalidOid; Oid *parts; int nparts; - TupleTableSlot *tmp_slot; - // const PartRelationInfo *subprel; bool isnull; ExprDoneCond itemIsDone; Datum value; @@ -457,7 +455,6 @@ select_partition_for_insert(ExprContext *econtext, ExprState *expr_state, } else selected_partid = parts[0]; - // subprel = get_pathman_relation_info(state->partitioned_table)) /* Replace parent table with a suitable partition */ old_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); rri_holder = scan_result_parts_storage(selected_partid, parts_storage); @@ -665,9 +662,6 @@ partition_filter_exec(CustomScanState *node) MemoryContext old_mcxt; const PartRelationInfo *prel; ResultRelInfoHolder *rri_holder; - // bool isnull; - // Datum value; - // ExprDoneCond itemIsDone; TupleTableSlot *tmp_slot; /* Fetch PartRelationInfo for this partitioned relation */ diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 7e80aa92..e10d1172 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -249,10 +249,14 @@ append_child_relation(PlannerInfo *root, Relation parent_relation, child_rte = copyObject(parent_rte); child_rte->relid = child_oid; child_rte->relkind = child_relation->rd_rel->relkind; - // child_rte->inh = false; /* relation has no children */ + child_rte->requiredPerms = 0; /* perform all checks on parent */ + /* + * If it is the parent relation, then set inh flag to false to prevent + * further recursive unrolling. Else if relation is a child and has subclass + * then we will need to check if there are subpartitions + */ child_rte->inh = (child_oid != parent_rte->relid) ? child_relation->rd_rel->relhassubclass : false; - child_rte->requiredPerms = 0; /* perform all checks on parent */ /* Add 'child_rte' to rtable and 'root->simple_rte_array' */ root->parse->rtable = lappend(root->parse->rtable, child_rte); @@ -394,7 +398,7 @@ append_child_relation(PlannerInfo *root, Relation parent_relation, } /* - * TODO: new!!! + * Recursively expand child partition if it has subpartitions */ if (child_rte->inh) { @@ -1660,29 +1664,36 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, set_rel_consider_parallel_compat(root, childrel, childRTE); #endif - /* Compute child's access paths & sizes */ - if (childRTE->relkind == RELKIND_FOREIGN_TABLE) + /* + * If inh is True and pathlist is not null then it is a partitioned + * table and we've already filled it, skip it. Otherwise build a + * pathlist for it + */ + if(!childRTE->inh || childrel->pathlist == NIL) { - /* childrel->rows should be >= 1 */ - set_foreign_size(root, childrel, childRTE); + /* Compute child's access paths & sizes */ + if (childRTE->relkind == RELKIND_FOREIGN_TABLE) + { + /* childrel->rows should be >= 1 */ + set_foreign_size(root, childrel, childRTE); - /* If child IS dummy, ignore it */ - if (IS_DUMMY_REL(childrel)) - continue; + /* If child IS dummy, ignore it */ + if (IS_DUMMY_REL(childrel)) + continue; - set_foreign_pathlist(root, childrel, childRTE); - } - /* TODO: temporary!!! */ - else if(!childRTE->inh || childrel->pathlist == NIL) - { - /* childrel->rows should be >= 1 */ - set_plain_rel_size(root, childrel, childRTE); + set_foreign_pathlist(root, childrel, childRTE); + } + else + { + /* childrel->rows should be >= 1 */ + set_plain_rel_size(root, childrel, childRTE); - /* If child IS dummy, ignore it */ - if (IS_DUMMY_REL(childrel)) - continue; + /* If child IS dummy, ignore it */ + if (IS_DUMMY_REL(childrel)) + continue; - set_plain_rel_pathlist(root, childrel, childRTE); + set_plain_rel_pathlist(root, childrel, childRTE); + } } /* Set cheapest path for child */ diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 97a33574..b2e46c43 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -597,11 +597,8 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, { TupleTableSlot *slot, *tmp_slot; - // ExprDoneCond itemIsDone; bool skip_tuple; - // isnull; Oid tuple_oid = InvalidOid; - // Datum value; const PartRelationInfo *prel; ResultRelInfoHolder *rri_holder; @@ -641,16 +638,8 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, /* Execute expression */ tmp_slot = econtext->ecxt_scantuple; econtext->ecxt_scantuple = slot; - // value = ExecEvalExpr(expr_state, econtext, &isnull, &itemIsDone); - - // if (isnull) - // elog(ERROR, ERR_PART_ATTR_NULL); - - // if (itemIsDone != ExprSingleResult) - // elog(ERROR, ERR_PART_ATTR_MULTIPLE_RESULTS); /* Search for a matching partition */ - // rri_holder = select_partition_for_insert(value, rri_holder = select_partition_for_insert(econtext, expr_state, prel, &parts_storage, estate); econtext->ecxt_scantuple = tmp_slot; From 055065983966343657a2381481baa350c3e4b330 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 4 May 2017 15:43:42 +0300 Subject: [PATCH 024/528] Fix tests --- tests/python/partitioning_test.py | 46 +++++++++++++++---------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 26e77037..9dc404af 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -1037,26 +1037,26 @@ def test_update_node_plan1(self): self.assertEqual(len(plan["Target Tables"]), 11) expected_format = ''' - { - "Plans": [ - { - "Plans": [ - { - "Filter": "(comment = '15'::text)", - "Node Type": "Seq Scan", - "Relation Name": "test_range%s", - "Parent Relationship": "child" - } - ], - "Node Type": "Custom Scan", - "Parent Relationship": "child", - "Custom Plan Provider": "PartitionFilter" - } - ], - "Node Type": "Custom Scan", - "Parent Relationship": "Member", - "Custom Plan Provider": "PrepareInsert" - } + { + "Plans": [ + { + "Plans": [ + { + "Filter": "(comment = '15'::text)", + "Node Type": "Seq Scan", + "Relation Name": "test_range%s", + "Parent Relationship": "child" + } + ], + "Node Type": "Custom Scan", + "Parent Relationship": "child", + "Custom Plan Provider": "PartitionFilter" + } + ], + "Node Type": "Custom Scan", + "Parent Relationship": "Member", + "Custom Plan Provider": "PrepareInsert" + } ''' for i, f in enumerate([''] + list(map(str, range(1, 10)))): num = '_' + f if f else '' @@ -1064,12 +1064,12 @@ def test_update_node_plan1(self): p = ordered(plan["Plans"][i], skip_keys=['Parallel Aware', 'Alias']) self.assertEqual(p, ordered(expected)) - node.stop() - node.cleanup() - node.psql('postgres', 'DROP SCHEMA test_update_node CASCADE;') node.psql('postgres', 'DROP EXTENSION pg_pathman CASCADE;') + node.stop() + node.cleanup() + if __name__ == "__main__": unittest.main() From 5a12ecc5e6a725be6fedf7de1d921996ab5154ea Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Thu, 4 May 2017 17:28:17 +0300 Subject: [PATCH 025/528] make UPDATE and DELETE queries work with multilevel partitioning --- src/planner_tree_modification.c | 182 +++++++++++++++++++------------- 1 file changed, 106 insertions(+), 76 deletions(-) diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 4a804101..19b4a34b 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -36,6 +36,8 @@ static void partition_filter_visitor(Plan *plan, void *context); static rel_parenthood_status tag_extract_parenthood_status(List *relation_tag); +static Oid find_deepest_partition(Oid relid, Index idx, Expr *quals); + /* * HACK: We have to mark each Query with a unique @@ -238,14 +240,10 @@ disable_standard_inheritance(Query *parse) static void handle_modification_query(Query *parse) { - const PartRelationInfo *prel; - Node *prel_expr; - List *ranges; RangeTblEntry *rte; - WrapperNode *wrap; - Expr *expr; - WalkerContext context; + Expr *quals; Index result_rel; + Oid child; /* Fetch index of result relation */ result_rel = parse->resultRelation; @@ -261,101 +259,133 @@ handle_modification_query(Query *parse) /* Exit if it's DELETE FROM ONLY table */ if (!rte->inh) return; - prel = get_pathman_relation_info(rte->relid); - - /* Exit if it's not partitioned */ - if (!prel) return; - - /* Exit if we must include parent */ - if (prel->enable_parent) return; - - /* Parse syntax tree and extract partition ranges */ - ranges = list_make1_irange_full(prel, IR_COMPLETE); - expr = (Expr *) eval_const_expressions(NULL, parse->jointree->quals); - - /* Exit if there's no expr (no use) */ - if (!expr) return; - - /* Prepare partitioning expression */ - prel_expr = PrelExpressionForRelid(prel, result_rel); - - /* Parse syntax tree and extract partition ranges */ - InitWalkerContext(&context, prel_expr, prel, NULL, false); - wrap = walk_expr_tree(expr, &context); + quals = (Expr *) eval_const_expressions(NULL, parse->jointree->quals); - ranges = irange_list_intersection(ranges, wrap->rangeset); + /* + * Parse syntax tree and extract deepest partition (if there is only one + * satisfying quals) + */ + child = find_deepest_partition(rte->relid, result_rel, quals); /* * If only one partition is affected, * substitute parent table with partition. */ - if (irange_list_length(ranges) == 1) + if (OidIsValid(child)) { - IndexRange irange = linitial_irange(ranges); + Relation child_rel, + parent_rel; - /* Exactly one partition (bounds are equal) */ - if (irange_lower(irange) == irange_upper(irange)) + void *tuple_map; /* we don't need the map itself */ + + LOCKMODE lockmode = RowExclusiveLock; /* UPDATE | DELETE */ + + HeapTuple syscache_htup; + char child_relkind; + Oid parent = rte->relid; + + /* Lock 'child' table */ + LockRelationOid(child, lockmode); + + /* Make sure that 'child' exists */ + syscache_htup = SearchSysCache1(RELOID, ObjectIdGetDatum(child)); + if (HeapTupleIsValid(syscache_htup)) { - Oid *children = PrelGetChildrenArray(prel), - child = children[irange_lower(irange)], - parent = rte->relid; + Form_pg_class reltup = (Form_pg_class) GETSTRUCT(syscache_htup); - Relation child_rel, - parent_rel; + /* Fetch child's relkind and free cache entry */ + child_relkind = reltup->relkind; + ReleaseSysCache(syscache_htup); + } + else + { + UnlockRelationOid(child, lockmode); + return; /* nothing to do here */ + } - void *tuple_map; /* we don't need the map itself */ + /* Both tables are already locked */ + child_rel = heap_open(child, NoLock); + parent_rel = heap_open(parent, NoLock); - LOCKMODE lockmode = RowExclusiveLock; /* UPDATE | DELETE */ + /* Build a conversion map (may be trivial, i.e. NULL) */ + tuple_map = build_part_tuple_map(parent_rel, child_rel); + if (tuple_map) + free_conversion_map((TupleConversionMap *) tuple_map); - HeapTuple syscache_htup; - char child_relkind; + /* Close relations (should remain locked, though) */ + heap_close(child_rel, NoLock); + heap_close(parent_rel, NoLock); - /* Lock 'child' table */ - LockRelationOid(child, lockmode); + /* Exit if tuple map was NOT trivial */ + if (tuple_map) /* just checking the pointer! */ + return; - /* Make sure that 'child' exists */ - syscache_htup = SearchSysCache1(RELOID, ObjectIdGetDatum(child)); - if (HeapTupleIsValid(syscache_htup)) - { - Form_pg_class reltup = (Form_pg_class) GETSTRUCT(syscache_htup); + /* Update RTE's relid and relkind (for FDW) */ + rte->relid = child; + rte->relkind = child_relkind; - /* Fetch child's relkind and free cache entry */ - child_relkind = reltup->relkind; - ReleaseSysCache(syscache_htup); - } - else - { - UnlockRelationOid(child, lockmode); - return; /* nothing to do here */ - } + /* HACK: unset the 'inh' flag (no children) */ + rte->inh = false; + } +} + +/* + * Find a single deepest subpartition. If there are more than one partitions + * satisfies quals or no such partition at all then return InvalidOid. + */ +static Oid +find_deepest_partition(Oid relid, Index idx, Expr *quals) +{ + const PartRelationInfo *prel; + Node *prel_expr; + WalkerContext context; + List *ranges; + WrapperNode *wrap; + + /* Exit if there's no quals (no use) */ + if (!quals) return InvalidOid; + + prel = get_pathman_relation_info(relid); + + /* Exit if it's not partitioned */ + if (!prel) return InvalidOid; - /* Both tables are already locked */ - child_rel = heap_open(child, NoLock); - parent_rel = heap_open(parent, NoLock); + /* Exit if we must include parent */ + if (prel->enable_parent) return InvalidOid; - /* Build a conversion map (may be trivial, i.e. NULL) */ - tuple_map = build_part_tuple_map(parent_rel, child_rel); - if (tuple_map) - free_conversion_map((TupleConversionMap *) tuple_map); + /* Prepare partitioning expression */ + prel_expr = PrelExpressionForRelid(prel, idx); - /* Close relations (should remain locked, though) */ - heap_close(child_rel, NoLock); - heap_close(parent_rel, NoLock); + ranges = list_make1_irange_full(prel, IR_COMPLETE); - /* Exit if tuple map was NOT trivial */ - if (tuple_map) /* just checking the pointer! */ - return; + /* Parse syntax tree and extract partition ranges */ + InitWalkerContext(&context, prel_expr, prel, NULL, false); + wrap = walk_expr_tree(quals, &context); + ranges = irange_list_intersection(ranges, wrap->rangeset); - /* Update RTE's relid and relkind (for FDW) */ - rte->relid = child; - rte->relkind = child_relkind; + if (irange_list_length(ranges) == 1) + { + IndexRange irange = linitial_irange(ranges); + + if (irange_lower(irange) == irange_upper(irange)) + { + Oid *children = PrelGetChildrenArray(prel), + partition = children[irange_lower(irange)], + subpartition; + + /* + * Try to go deeper and see if there is subpartition + */ + subpartition = find_deepest_partition(partition, idx, quals); + if (OidIsValid(subpartition)) + return subpartition; - /* HACK: unset the 'inh' flag (no children) */ - rte->inh = false; + return partition; } } -} + return InvalidOid; +} /* * ------------------------------- From 146061b1d4ca1e2b26b2f140d7f7770ab761ddaf Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Thu, 4 May 2017 18:53:04 +0300 Subject: [PATCH 026/528] fixes for UPDATE and DELETE --- src/planner_tree_modification.c | 56 +++++++++++++++++++++++---------- 1 file changed, 39 insertions(+), 17 deletions(-) diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 19b4a34b..f14593d2 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -27,6 +27,13 @@ #define PARENTHOOD_TAG CppAsString(PARENTHOOD) +typedef enum +{ + FP_FOUND, /* Found partition */ + FP_PLAIN_TABLE, /* Table isn't partitioned by pg_pathman */ + FP_NON_SINGULAR_RESULT /* Multiple or no partitions */ +} FindPartitionResult; + static bool pathman_transform_query_walker(Node *node, void *context); static void disable_standard_inheritance(Query *parse); @@ -36,7 +43,7 @@ static void partition_filter_visitor(Plan *plan, void *context); static rel_parenthood_status tag_extract_parenthood_status(List *relation_tag); -static Oid find_deepest_partition(Oid relid, Index idx, Expr *quals); +static FindPartitionResult find_deepest_partition(Oid relid, Index idx, Expr *quals, Oid *partition); /* @@ -244,6 +251,7 @@ handle_modification_query(Query *parse) Expr *quals; Index result_rel; Oid child; + FindPartitionResult fp_result; /* Fetch index of result relation */ result_rel = parse->resultRelation; @@ -265,13 +273,13 @@ handle_modification_query(Query *parse) * Parse syntax tree and extract deepest partition (if there is only one * satisfying quals) */ - child = find_deepest_partition(rte->relid, result_rel, quals); + fp_result = find_deepest_partition(rte->relid, result_rel, quals, &child); /* * If only one partition is affected, * substitute parent table with partition. */ - if (OidIsValid(child)) + if (fp_result == FP_FOUND) { Relation child_rel, parent_rel; @@ -333,8 +341,8 @@ handle_modification_query(Query *parse) * Find a single deepest subpartition. If there are more than one partitions * satisfies quals or no such partition at all then return InvalidOid. */ -static Oid -find_deepest_partition(Oid relid, Index idx, Expr *quals) +static FindPartitionResult +find_deepest_partition(Oid relid, Index idx, Expr *quals, Oid *partition) { const PartRelationInfo *prel; Node *prel_expr; @@ -342,16 +350,19 @@ find_deepest_partition(Oid relid, Index idx, Expr *quals) List *ranges; WrapperNode *wrap; - /* Exit if there's no quals (no use) */ - if (!quals) return InvalidOid; - prel = get_pathman_relation_info(relid); /* Exit if it's not partitioned */ - if (!prel) return InvalidOid; + if (!prel) + return FP_PLAIN_TABLE; /* Exit if we must include parent */ - if (prel->enable_parent) return InvalidOid; + if (prel->enable_parent) + return FP_NON_SINGULAR_RESULT; + + /* Exit if there's no quals (no use) */ + if (!quals) + return FP_NON_SINGULAR_RESULT; /* Prepare partitioning expression */ prel_expr = PrelExpressionForRelid(prel, idx); @@ -370,21 +381,32 @@ find_deepest_partition(Oid relid, Index idx, Expr *quals) if (irange_lower(irange) == irange_upper(irange)) { Oid *children = PrelGetChildrenArray(prel), - partition = children[irange_lower(irange)], + child = children[irange_lower(irange)], subpartition; + FindPartitionResult result; /* * Try to go deeper and see if there is subpartition */ - subpartition = find_deepest_partition(partition, idx, quals); - if (OidIsValid(subpartition)) - return subpartition; - - return partition; + result = find_deepest_partition(child, + idx, + quals, + &subpartition); + switch(result) + { + case FP_FOUND: + *partition = subpartition; + return FP_FOUND; + case FP_PLAIN_TABLE: + *partition = child; + return FP_FOUND; + case FP_NON_SINGULAR_RESULT: + return FP_NON_SINGULAR_RESULT; + } } } - return InvalidOid; + return FP_NON_SINGULAR_RESULT; } /* From 310d76be751671621b0f22f47d078f566c6c815b Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Fri, 5 May 2017 16:33:30 +0300 Subject: [PATCH 027/528] fix update trigger to support multilevel partitioning --- src/partition_filter.c | 1 + src/pl_funcs.c | 218 ++++++++++++++++++++++++++++++++--------- 2 files changed, 171 insertions(+), 48 deletions(-) diff --git a/src/partition_filter.c b/src/partition_filter.c index 47ad1e88..bca97afe 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -456,6 +456,7 @@ select_partition_for_insert(ExprContext *econtext, ExprState *expr_state, else selected_partid = parts[0]; /* Replace parent table with a suitable partition */ + /* TODO: write a correct comment */ old_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); rri_holder = scan_result_parts_storage(selected_partid, parts_storage); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index ef06c581..b21dad7e 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -113,6 +113,10 @@ static void pathman_update_trigger_func_move_tuple(Relation source_rel, HeapTuple old_tuple, HeapTuple new_tuple); +static Oid find_target_partition(Relation source_rel, HeapTuple tuple); +static Oid find_topmost_parent(Oid partition); +static Oid find_deepest_partition(Oid parent, Relation source_rel, HeapTuple tuple); + /* * ------------------------ @@ -1086,26 +1090,26 @@ pathman_update_trigger_func(PG_FUNCTION_ARGS) Relation source_rel; - Oid parent_relid, - source_relid, + // Oid parent_relid, + Oid source_relid, target_relid; HeapTuple old_tuple, new_tuple; - Datum value; - Oid value_type; - bool isnull; - ExprDoneCond itemIsDone; + // Datum value; + // Oid value_type; + // bool isnull; + // ExprDoneCond itemIsDone; - Oid *parts; - int nparts; + // Oid *parts; + // int nparts; - ExprContext *econtext; - ExprState *expr_state; - MemoryContext old_mcxt; - PartParentSearch parent_search; - const PartRelationInfo *prel; + // ExprContext *econtext; + // ExprState *expr_state; + // MemoryContext old_mcxt; + // PartParentSearch parent_search; + // const PartRelationInfo *prel; /* Handle user calls */ if (!CALLED_AS_TRIGGER(fcinfo)) @@ -1128,22 +1132,161 @@ pathman_update_trigger_func(PG_FUNCTION_ARGS) old_tuple = trigdata->tg_trigtuple; new_tuple = trigdata->tg_newtuple; - /* Find parent relation and partitioning info */ - parent_relid = get_parent_of_partition(source_relid, &parent_search); - if (parent_search != PPS_ENTRY_PART_PARENT) + // /* Find parent relation and partitioning info */ + // parent_relid = get_parent_of_partition(source_relid, &parent_search); + // if (parent_search != PPS_ENTRY_PART_PARENT) + // elog(ERROR, "relation \"%s\" is not a partition", + // RelationGetRelationName(source_rel)); + + // /* Fetch partition dispatch info */ + // prel = get_pathman_relation_info(parent_relid); + // shout_if_prel_is_invalid(parent_relid, prel, PT_ANY); + + // /* Execute partitioning expression */ + // econtext = CreateStandaloneExprContext(); + // old_mcxt = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory); + // expr_state = pathman_update_trigger_build_expr_state(prel, + // source_rel, + // new_tuple, + // &value_type); + // value = ExecEvalExpr(expr_state, econtext, &isnull, &itemIsDone); + // MemoryContextSwitchTo(old_mcxt); + + // if (isnull) + // elog(ERROR, ERR_PART_ATTR_NULL); + + // if (itemIsDone != ExprSingleResult) + // elog(ERROR, ERR_PART_ATTR_MULTIPLE_RESULTS); + + // /* Search for matching partitions */ + // parts = find_partitions_for_value(value, value_type, prel, &nparts); + + + // /* We can free expression context now */ + // FreeExprContext(econtext, false); + + // if (nparts > 1) + // elog(ERROR, ERR_PART_ATTR_MULTIPLE); + // else if (nparts == 0) + // { + // target_relid = create_partitions_for_value(PrelParentRelid(prel), + // value, value_type); + + // /* get_pathman_relation_info() will refresh this entry */ + // invalidate_pathman_relation_info(PrelParentRelid(prel), NULL); + // } + // else target_relid = parts[0]; + + // pfree(parts); + target_relid = find_target_partition(source_rel, new_tuple); + + /* Convert tuple if target partition has changed */ + if (target_relid != source_relid) + { + Relation target_rel; + LOCKMODE lockmode = RowExclusiveLock; /* UPDATE */ + + /* Lock partition and check if it exists */ + LockRelationOid(target_relid, lockmode); + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(target_relid))) + /* TODO: !!! */ + elog(ERROR, ERR_PART_ATTR_NO_PART, "()"); + // elog(ERROR, ERR_PART_ATTR_NO_PART, datum_to_cstring(value, value_type)); + + /* Open partition */ + target_rel = heap_open(target_relid, lockmode); + + /* Move tuple from source relation to the selected partition */ + pathman_update_trigger_func_move_tuple(source_rel, target_rel, + old_tuple, new_tuple); + + /* Close partition */ + heap_close(target_rel, lockmode); + + /* We've made some changes */ + PG_RETURN_VOID(); + } + + /* Just return NEW tuple */ + PG_RETURN_POINTER(new_tuple); +} + +/* + * Find partition satisfying values of the tuple + */ +static Oid +find_target_partition(Relation source_rel, HeapTuple tuple) +{ + Oid source_relid, + target_relid, + parent_relid; + + source_relid = RelationGetRelid(source_rel); + parent_relid = find_topmost_parent(source_relid); + target_relid = find_deepest_partition(parent_relid, source_rel, tuple); + + return target_relid; +} + +static Oid +find_topmost_parent(Oid relid) +{ + Oid last; + PartParentSearch parent_search; + + last = relid; + + /* Iterate through parents until the topmost */ + while (1) + { + Oid parent = get_parent_of_partition(last, &parent_search); + + if (parent_search != PPS_ENTRY_PART_PARENT) + break; + last = parent; + } + + /* If relation doesn't have parent then just throw an error */ + if (last == relid) elog(ERROR, "relation \"%s\" is not a partition", - RelationGetRelationName(source_rel)); + get_rel_name(relid)); + + return last; +} + +/* + * Recursive search for the deepest partition satisfying the given tuple + */ +static Oid +find_deepest_partition(Oid parent, Relation source_rel, HeapTuple tuple) +{ + const PartRelationInfo *prel; + Oid *parts; + int nparts; + + ExprContext *econtext; + ExprState *expr_state; + MemoryContext old_mcxt; + + Datum value; + Oid value_type; + bool isnull; + ExprDoneCond itemIsDone; + + Oid target_relid; + Oid subpartition; /* Fetch partition dispatch info */ - prel = get_pathman_relation_info(parent_relid); - shout_if_prel_is_invalid(parent_relid, prel, PT_ANY); + prel = get_pathman_relation_info(parent); + if (!prel) + return InvalidOid; /* Execute partitioning expression */ econtext = CreateStandaloneExprContext(); old_mcxt = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory); expr_state = pathman_update_trigger_build_expr_state(prel, source_rel, - new_tuple, + tuple, &value_type); value = ExecEvalExpr(expr_state, econtext, &isnull, &itemIsDone); MemoryContextSwitchTo(old_mcxt); @@ -1170,37 +1313,16 @@ pathman_update_trigger_func(PG_FUNCTION_ARGS) /* get_pathman_relation_info() will refresh this entry */ invalidate_pathman_relation_info(PrelParentRelid(prel), NULL); } - else target_relid = parts[0]; - + else + target_relid = parts[0]; pfree(parts); - /* Convert tuple if target partition has changed */ - if (target_relid != source_relid) - { - Relation target_rel; - LOCKMODE lockmode = RowExclusiveLock; /* UPDATE */ - - /* Lock partition and check if it exists */ - LockRelationOid(target_relid, lockmode); - if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(target_relid))) - elog(ERROR, ERR_PART_ATTR_NO_PART, datum_to_cstring(value, value_type)); - - /* Open partition */ - target_rel = heap_open(target_relid, lockmode); - - /* Move tuple from source relation to the selected partition */ - pathman_update_trigger_func_move_tuple(source_rel, target_rel, - old_tuple, new_tuple); - - /* Close partition */ - heap_close(target_rel, lockmode); - - /* We've made some changes */ - PG_RETURN_VOID(); - } + /* Try to go deeper recursively and see if there is subpartition */ + subpartition = find_deepest_partition(target_relid, source_rel, tuple); + if (OidIsValid(subpartition)) + return subpartition; - /* Just return NEW tuple */ - PG_RETURN_POINTER(new_tuple); + return target_relid; } struct replace_vars_cxt From 85823b2cabbdca59d8439268f14be77264410225 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Fri, 5 May 2017 17:58:30 +0300 Subject: [PATCH 028/528] add parent`s columns to column list for update trigger --- src/pl_funcs.c | 95 +++++++++++++++----------------------------------- 1 file changed, 29 insertions(+), 66 deletions(-) diff --git a/src/pl_funcs.c b/src/pl_funcs.c index b21dad7e..ef8e6550 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -113,6 +113,7 @@ static void pathman_update_trigger_func_move_tuple(Relation source_rel, HeapTuple old_tuple, HeapTuple new_tuple); +static void collect_update_trigger_columns(Oid relid, List **columns); static Oid find_target_partition(Relation source_rel, HeapTuple tuple); static Oid find_topmost_parent(Oid partition); static Oid find_deepest_partition(Oid parent, Relation source_rel, HeapTuple tuple); @@ -1087,30 +1088,12 @@ Datum pathman_update_trigger_func(PG_FUNCTION_ARGS) { TriggerData *trigdata = (TriggerData *) fcinfo->context; - Relation source_rel; - - // Oid parent_relid, Oid source_relid, target_relid; - HeapTuple old_tuple, new_tuple; - // Datum value; - // Oid value_type; - // bool isnull; - // ExprDoneCond itemIsDone; - - // Oid *parts; - // int nparts; - - // ExprContext *econtext; - // ExprState *expr_state; - // MemoryContext old_mcxt; - // PartParentSearch parent_search; - // const PartRelationInfo *prel; - /* Handle user calls */ if (!CALLED_AS_TRIGGER(fcinfo)) elog(ERROR, "this function should not be called directly"); @@ -1132,54 +1115,11 @@ pathman_update_trigger_func(PG_FUNCTION_ARGS) old_tuple = trigdata->tg_trigtuple; new_tuple = trigdata->tg_newtuple; - // /* Find parent relation and partitioning info */ - // parent_relid = get_parent_of_partition(source_relid, &parent_search); - // if (parent_search != PPS_ENTRY_PART_PARENT) - // elog(ERROR, "relation \"%s\" is not a partition", - // RelationGetRelationName(source_rel)); - - // /* Fetch partition dispatch info */ - // prel = get_pathman_relation_info(parent_relid); - // shout_if_prel_is_invalid(parent_relid, prel, PT_ANY); - - // /* Execute partitioning expression */ - // econtext = CreateStandaloneExprContext(); - // old_mcxt = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory); - // expr_state = pathman_update_trigger_build_expr_state(prel, - // source_rel, - // new_tuple, - // &value_type); - // value = ExecEvalExpr(expr_state, econtext, &isnull, &itemIsDone); - // MemoryContextSwitchTo(old_mcxt); - - // if (isnull) - // elog(ERROR, ERR_PART_ATTR_NULL); - - // if (itemIsDone != ExprSingleResult) - // elog(ERROR, ERR_PART_ATTR_MULTIPLE_RESULTS); - - // /* Search for matching partitions */ - // parts = find_partitions_for_value(value, value_type, prel, &nparts); - - - // /* We can free expression context now */ - // FreeExprContext(econtext, false); - - // if (nparts > 1) - // elog(ERROR, ERR_PART_ATTR_MULTIPLE); - // else if (nparts == 0) - // { - // target_relid = create_partitions_for_value(PrelParentRelid(prel), - // value, value_type); - - // /* get_pathman_relation_info() will refresh this entry */ - // invalidate_pathman_relation_info(PrelParentRelid(prel), NULL); - // } - // else target_relid = parts[0]; - - // pfree(parts); + /* Find (or create) target partition */ target_relid = find_target_partition(source_rel, new_tuple); + /* TODO: check for InvalidOid */ + /* Convert tuple if target partition has changed */ if (target_relid != source_relid) { @@ -1549,7 +1489,7 @@ create_update_triggers(PG_FUNCTION_ARGS) const char *trigname; const PartRelationInfo *prel; uint32 i; - List *columns; + List *columns = NIL; /* Check that table is partitioned */ prel = get_pathman_relation_info(parent); @@ -1559,7 +1499,8 @@ create_update_triggers(PG_FUNCTION_ARGS) trigname = build_update_trigger_name_internal(parent); /* Create trigger for parent */ - columns = PrelExpressionColumnNames(prel); + // columns = PrelExpressionColumnNames(prel); + collect_update_trigger_columns(parent, &columns); create_single_update_trigger_internal(parent, trigname, columns); /* Fetch children array */ @@ -1572,6 +1513,28 @@ create_update_triggers(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } +static void +collect_update_trigger_columns(Oid relid, List **columns) +{ + const PartRelationInfo *prel; + Oid parent; + PartParentSearch parent_search; + + prel = get_pathman_relation_info(relid); + if (!prel) + return; + + /* Collect columns from current level */ + *columns = list_concat(*columns, PrelExpressionColumnNames(prel)); + + /* Collect columns from parent */ + parent = get_parent_of_partition(relid, &parent_search); + if (parent_search != PPS_ENTRY_PART_PARENT) + return; + + collect_update_trigger_columns(parent, columns); +} + /* Create an UPDATE trigger for partition */ Datum create_single_update_trigger(PG_FUNCTION_ARGS) From 0f7f4beb8502cf03e961b3c962f46a4cbf370567 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Fri, 5 May 2017 18:49:59 +0300 Subject: [PATCH 029/528] Start working on test for fdw, add debug print for slots --- .gitignore | 1 + src/debug_print.c | 84 ++++++++++++++++++++ src/partition_filter.c | 6 +- src/partition_update.c | 8 +- tests/python/partitioning_test.py | 123 +++++++++++++++++++++++++++--- 5 files changed, 203 insertions(+), 19 deletions(-) diff --git a/.gitignore b/.gitignore index 9cf8da8f..55a84f78 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,7 @@ regression.out *.gcda *.gcno *.gcov +*.log pg_pathman--*.sql tags cscope* diff --git a/src/debug_print.c b/src/debug_print.c index 36016861..d70aac51 100644 --- a/src/debug_print.c +++ b/src/debug_print.c @@ -8,12 +8,16 @@ * ------------------------------------------------------------------------ */ +#include #include "rangeset.h" #include "postgres.h" +#include "fmgr.h" +#include "executor/tuptable.h" #include "nodes/bitmapset.h" #include "nodes/pg_list.h" #include "lib/stringinfo.h" +#include "utils/lsyscache.h" /* @@ -99,3 +103,83 @@ irange_print(IndexRange irange) return str.data; } + + +/* ---------------- + * printatt + * ---------------- + */ +static char * +printatt(unsigned attributeId, + Form_pg_attribute attributeP, + char *value) +{ + return psprintf("\t%2d: %s%s%s%s\t(typeid = %u, len = %d, typmod = %d, byval = %c)\n", + attributeId, + NameStr(attributeP->attname), + value != NULL ? " = \"" : "", + value != NULL ? value : "", + value != NULL ? "\"" : "", + (unsigned int) (attributeP->atttypid), + attributeP->attlen, + attributeP->atttypmod, + attributeP->attbyval ? 't' : 'f'); +} + +/* ---------------- + * debugtup - print one tuple for an interactive backend + * ---------------- + */ +static char * +debugtup(TupleTableSlot *slot) +{ + TupleDesc typeinfo = slot->tts_tupleDescriptor; + int natts = typeinfo->natts; + int i; + Datum attr; + char *value; + bool isnull; + Oid typoutput; + bool typisvarlena; + + int result_len = 0; + char *result = (char *) palloc(result_len + 1); + + for (i = 0; i < natts; ++i) + { + char *s; + int len; + + attr = slot_getattr(slot, i + 1, &isnull); + if (isnull) + continue; + getTypeOutputInfo(typeinfo->attrs[i]->atttypid, + &typoutput, &typisvarlena); + + value = OidOutputFunctionCall(typoutput, attr); + + s = printatt((unsigned) i + 1, typeinfo->attrs[i], value); + len = strlen(s); + result = (char *) repalloc(result, result_len + len + 1); + strncpy(result + result_len, s, len); + result_len += len; + } + + result[result_len] = '\0'; + return result; +} + +#ifdef __GNUC__ +__attribute__((unused)) +#endif +static char * +slot_print(TupleTableSlot *slot) +{ + if (TupIsNull(slot)) + return NULL; + + if (!slot->tts_tupleDescriptor) + return NULL; + + return debugtup(slot); +} diff --git a/src/partition_filter.c b/src/partition_filter.c index 9daf8251..874a064d 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -657,6 +657,7 @@ partition_filter_exec(CustomScanState *node) EState *estate = node->ss.ps.state; PlanState *child_ps = (PlanState *) linitial(node->custom_ps); TupleTableSlot *slot; + ResultRelInfo *saved_resultRelInfo; /* clean ctid for old slot */ state->ctid = NULL; @@ -664,8 +665,9 @@ partition_filter_exec(CustomScanState *node) slot = ExecProcNode(child_ps); /* Save original ResultRelInfo */ + saved_resultRelInfo = estate->es_result_relation_info; if (!state->result_parts.saved_rel_info) - state->result_parts.saved_rel_info = estate->es_result_relation_info; + state->result_parts.saved_rel_info = saved_resultRelInfo; if (!TupIsNull(slot)) { @@ -732,7 +734,7 @@ partition_filter_exec(CustomScanState *node) junkfilter = rri_holder->orig_junkFilter; Assert(junkfilter != NULL); - relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind; + relkind = saved_resultRelInfo->ri_RelationDesc->rd_rel->relkind; if (relkind == RELKIND_RELATION) { bool isNull; diff --git a/src/partition_update.c b/src/partition_update.c index aaaa4555..74a05f84 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -157,10 +157,8 @@ partition_update_exec(CustomScanState *node) oldtuple = NULL; relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind; - if (relkind == RELKIND_RELATION) + if (child_state->ctid != NULL) { - Assert(child_state->ctid != NULL); - tupleid = child_state->ctid; tuple_ctid = *tupleid; /* be sure we don't free * ctid!! */ @@ -192,7 +190,7 @@ partition_update_exec(CustomScanState *node) tupleid = NULL; } else - elog(ERROR, "PartitionUpdate supports only relations and foreign tables"); + elog(ERROR, "updates supported only on basic relations and foreign tables"); /* delete old tuple */ estate->es_result_relation_info = child_state->result_parts.saved_rel_info; @@ -266,7 +264,7 @@ ExecDeleteInternal(ItemPointer tupleid, tupleid, oldtuple); if (!dodelete) - elog(ERROR, "In partitioned tables the old row always should be deleted"); + elog(ERROR, "the old row always should be deleted from child table"); } if (resultRelInfo->ri_FdwRoutine) diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 9dc404af..e1384945 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -19,6 +19,40 @@ from testgres import get_new_node, stop_all +# set setup base logging config, it can be turned on by `use_logging` +# parameter on node setup + +import logging +import logging.config + +logfile = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'tests.log') +LOG_CONFIG = { + 'version':1, + 'handlers': { + 'console': { + 'class': 'logging.StreamHandler', + 'formatter': 'base_format', + 'level': logging.DEBUG, + }, + 'file': { + 'class': 'logging.FileHandler', + 'filename': logfile, + 'formatter': 'base_format', + 'level': logging.DEBUG, + }, + }, + 'formatters': { + 'base_format': { + 'format': '%(node)-5s: %(message)s', + }, + }, + 'root': { + 'handlers': ('file', ), + 'level': 'DEBUG', + }, +} + +logging.config.dictConfig(LOG_CONFIG) # Helper function for json equality def ordered(obj, skip_keys=None): @@ -53,6 +87,14 @@ def setUp(self): def tearDown(self): stop_all() + def set_trace(self, con, external_command): + ''' this function starts gdb on selected connection ''' + + pid = con.execute('SELECT pg_backend_pid()')[0][0] + p = subprocess.Popen([external_command], stdin=subprocess.PIPE) + p.communicate(str.encode(str(pid))) + input("press ENTER to continue..") + def start_new_pathman_cluster(self, name='test', allows_streaming=False): node = get_new_node(name) node.init(allows_streaming=allows_streaming) @@ -368,12 +410,19 @@ def check_tablespace(node, tablename, tablespace): self.assertTrue(check_tablespace(node, 'abc_added_2', 'pg_default')) self.assertTrue(check_tablespace(node, 'abc_splitted_2', 'pg_default')) - @if_fdw_enabled - def test_foreign_table(self): - """Test foreign tables""" + def make_basic_fdw_setup(self): + '''' + Create basic FDW setup: + - create range partitioned table in master + - create foreign server + - create foreign table and insert some data into it + - attach foreign table to partitioned one + + Do not forget to cleanup after use + ''' # Start master server - master = get_new_node('test') + master = get_new_node('test', use_logging=True) master.init() master.append_conf( 'postgresql.conf', @@ -382,13 +431,6 @@ def test_foreign_table(self): master.psql('postgres', 'create extension pg_pathman') master.psql('postgres', 'create extension postgres_fdw') - # RANGE partitioning test with FDW: - # - create range partitioned table in master - # - create foreign server - # - create foreign table and insert some data into it - # - attach foreign table to partitioned one - # - try inserting data into foreign partition via parent - # - drop partitions master.psql( 'postgres', '''create table abc(id serial, name text); @@ -425,6 +467,22 @@ def test_foreign_table(self): 'postgres', 'select attach_range_partition(\'abc\', \'ftable\', 20, 30)') + return (master, fserv) + + @if_fdw_enabled + def test_foreign_table(self): + """Test foreign tables""" + + # RANGE partitioning test with FDW: + # - create range partitioned table in master + # - create foreign server + # - create foreign table and insert some data into it + # - attach foreign table to partitioned one + # - try inserting data into foreign partition via parent + # - drop partitions + + master, fserv = self.make_basic_fdw_setup() + # Check that table attached to partitioned table self.assertEqual( master.safe_psql('postgres', 'select * from ftable'), @@ -469,6 +527,48 @@ def test_foreign_table(self): ) master.safe_psql('postgres', 'select drop_partitions(\'hash_test\')') + fserv.cleanup() + master.cleanup() + + fserv.stop() + master.stop() + + def test_update_node_on_fdw_tables(self): + ''' Test update node on foreign tables ''' + + master, fserv = self.make_basic_fdw_setup() + + # create second foreign table + fserv.safe_psql('postgres', 'create table ftable2(id serial, name text)') + fserv.safe_psql('postgres', 'insert into ftable2 values (35, \'foreign\')') + + master.safe_psql( + 'postgres', + '''import foreign schema public limit to (ftable2) + from server fserv into public''' + ) + master.safe_psql( + 'postgres', + 'select attach_range_partition(\'abc\', \'ftable2\', 30, 40)') + + master.safe_psql('postgres', + 'set pg_pathman.enable_partitionupdate=on') + + with master.connect() as con: + con.begin() + con.execute("set pg_pathman.enable_partitionupdate=on") + con.execute("insert into abc select i, 'local' from generate_series(1, 19) i") + con.commit() + + self.set_trace(con, 'pg_debug') + import ipdb; ipdb.set_trace() + pass + + # cases + # - update from local to foreign + # - update from foreign to local + # - update from foreign to foreign + def test_parallel_nodes(self): """Test parallel queries under partitions""" @@ -1070,7 +1170,6 @@ def test_update_node_plan1(self): node.stop() node.cleanup() - if __name__ == "__main__": unittest.main() From 87224c9abb568c71496500658eaac66d48a124c6 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 10 May 2017 13:37:21 +0300 Subject: [PATCH 030/528] Fix UPDATEs from local table to foreign --- src/debug_print.c | 68 +++++++++++++++++++++++++++++++ src/include/partition_filter.h | 2 +- src/partition_filter.c | 31 +++++++------- tests/python/partitioning_test.py | 33 +++++++++------ 4 files changed, 106 insertions(+), 28 deletions(-) diff --git a/src/debug_print.c b/src/debug_print.c index d70aac51..9734ca06 100644 --- a/src/debug_print.c +++ b/src/debug_print.c @@ -15,6 +15,7 @@ #include "fmgr.h" #include "executor/tuptable.h" #include "nodes/bitmapset.h" +#include "nodes/parsenodes.h" #include "nodes/pg_list.h" #include "lib/stringinfo.h" #include "utils/lsyscache.h" @@ -183,3 +184,70 @@ slot_print(TupleTableSlot *slot) return debugtup(slot); } + +/* + * rt_print + * return contents of range table + */ +#ifdef __GNUC__ +__attribute__((unused)) +#endif +static char * +rt_print(const List *rtable) +{ +#define APPEND_STR(si, ...) \ +{ \ + char *line = psprintf(__VA_ARGS__); \ + appendStringInfo(&si, "%s", line); \ + pfree(line); \ +} + + const ListCell *l; + int i = 1; + + StringInfoData str; + + initStringInfo(&str); + APPEND_STR(str, "resno\trefname \trelid\tinFromCl\n"); + APPEND_STR(str, "-----\t---------\t-----\t--------\n"); + + foreach(l, rtable) + { + RangeTblEntry *rte = lfirst(l); + + switch (rte->rtekind) + { + case RTE_RELATION: + APPEND_STR(str, "%d\t%s\t%u\t%c", + i, rte->eref->aliasname, rte->relid, rte->relkind); + break; + case RTE_SUBQUERY: + APPEND_STR(str, "%d\t%s\t[subquery]", + i, rte->eref->aliasname); + break; + case RTE_JOIN: + APPEND_STR(str, "%d\t%s\t[join]", + i, rte->eref->aliasname); + break; + case RTE_FUNCTION: + APPEND_STR(str, "%d\t%s\t[rangefunction]", i, rte->eref->aliasname); + break; + case RTE_VALUES: + APPEND_STR(str, "%d\t%s\t[values list]", i, rte->eref->aliasname); + break; + case RTE_CTE: + APPEND_STR(str, "%d\t%s\t[cte]", i, rte->eref->aliasname); + break; + default: + elog(ERROR, "%d\t%s\t[unknown rtekind]", + i, rte->eref->aliasname); + } + + APPEND_STR(str, "\t%s\t%s\n", (rte->inh ? "inh" : ""), + (rte->inFromCl ? "inFromCl" : "")); + + i++; + } + return str.data; +#undef APPEND_STR +} diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index 943d5d32..68c57aef 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -40,7 +40,7 @@ typedef struct Oid partid; /* partition's relid */ ResultRelInfo *result_rel_info; /* cached ResultRelInfo */ TupleConversionMap *tuple_map; /* tuple conversion map (parent => child) */ - JunkFilter *orig_junkFilter; /* we keep original JunkFilter from + JunkFilter *updates_junkFilter; /* we keep junkfilter from scanned ResultRelInfo here */ } ResultRelInfoHolder; diff --git a/src/partition_filter.c b/src/partition_filter.c index 874a064d..e8c99af2 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -310,12 +310,23 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) CopyToResultRelInfo(ri_onConflictSetProj); CopyToResultRelInfo(ri_onConflictSetWhere); + /* ri_ConstraintExprs will be initialized by ExecRelCheck() */ + child_result_rel_info->ri_ConstraintExprs = NULL; + + /* Fill the ResultRelInfo holder */ + rri_holder->partid = partid; + rri_holder->result_rel_info = child_result_rel_info; + rri_holder->updates_junkFilter = NULL; + if (parts_storage->command_type == CMD_UPDATE) { char relkind; - JunkFilter *junkfilter = child_result_rel_info->ri_junkFilter; + JunkFilter *junkfilter = parts_storage->saved_rel_info->ri_junkFilter; - relkind = child_result_rel_info->ri_RelationDesc->rd_rel->relkind; + /* we don't need junk work in UPDATE */ + child_result_rel_info->ri_junkFilter = NULL; + + relkind = base_rel->rd_rel->relkind; if (relkind == RELKIND_RELATION) { junkfilter->jf_junkAttNo = ExecFindJunkAttribute(junkfilter, "ctid"); @@ -333,19 +344,9 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) else elog(ERROR, "wrong type of relation"); + rri_holder->updates_junkFilter = junkfilter; } - /* ri_ConstraintExprs will be initialized by ExecRelCheck() */ - child_result_rel_info->ri_ConstraintExprs = NULL; - - /* Fill the ResultRelInfo holder */ - rri_holder->partid = partid; - rri_holder->result_rel_info = child_result_rel_info; - rri_holder->orig_junkFilter = child_result_rel_info->ri_junkFilter; - - if (parts_storage->command_type == CMD_UPDATE) - child_result_rel_info->ri_junkFilter = NULL; - /* Generate tuple transformation map and some other stuff */ rri_holder->tuple_map = build_part_tuple_map(base_rel, child_rel); @@ -731,7 +732,7 @@ partition_filter_exec(CustomScanState *node) * we need this step because if there will be conversion * then junk attributes will be removed from slot */ - junkfilter = rri_holder->orig_junkFilter; + junkfilter = rri_holder->updates_junkFilter; Assert(junkfilter != NULL); relkind = saved_resultRelInfo->ri_RelationDesc->rd_rel->relkind; @@ -769,7 +770,7 @@ partition_filter_exec(CustomScanState *node) slot = state->tup_convert_slot; } else if (state->command_type == CMD_UPDATE) - slot = ExecFilterJunk(rri_holder->orig_junkFilter, slot); + slot = ExecFilterJunk(rri_holder->updates_junkFilter, slot); return slot; } diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index e1384945..e102c332 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -88,7 +88,12 @@ def tearDown(self): stop_all() def set_trace(self, con, external_command): - ''' this function starts gdb on selected connection ''' + ''' this function is used to debug selected backend: + `self.set_trace(con, 'pg_debug')` where `pg_debug` is your + external script that expects pid of postgres backend + + !! don't forget to remove calls of this function after debug + ''' pid = con.execute('SELECT pg_backend_pid()')[0][0] p = subprocess.Popen([external_command], stdin=subprocess.PIPE) @@ -422,7 +427,7 @@ def make_basic_fdw_setup(self): ''' # Start master server - master = get_new_node('test', use_logging=True) + master = get_new_node('test') master.init() master.append_conf( 'postgresql.conf', @@ -556,18 +561,22 @@ def test_update_node_on_fdw_tables(self): with master.connect() as con: con.begin() - con.execute("set pg_pathman.enable_partitionupdate=on") - con.execute("insert into abc select i, 'local' from generate_series(1, 19) i") + con.execute('set pg_pathman.enable_partitionupdate=on') + con.execute('insert into abc select i from generate_series(1, 19) i') con.commit() - self.set_trace(con, 'pg_debug') - import ipdb; ipdb.set_trace() - pass + source_relid = con.execute('select tableoid from abc where id=9')[0][0] + dest_relid = con.execute('select tableoid from abc where id=35')[0][0] + self.assertNotEqual(source_relid, dest_relid) + + # cases + # - update from local to foreign + # - update from foreign to local + # - update from foreign to foreign - # cases - # - update from local to foreign - # - update from foreign to local - # - update from foreign to foreign + con.execute('update abc set id=36 where id=9') + result_relid = con.execute('select tableoid from abc where id=35')[0][0] + self.assertEqual(result_relid, dest_relid) def test_parallel_nodes(self): """Test parallel queries under partitions""" @@ -1092,7 +1101,7 @@ def test_concurrent_detach(self): def test_update_node_plan1(self): ''' Test scan on all partititions when using update node. - We can't use regression tests here because 9.5 and 9.5 give + We can't use regression tests here because 9.5 and 9.6 give different plans ''' From c0cfde5182c1442396fc5bfbd569eee57fbe8700 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Wed, 10 May 2017 15:50:44 +0300 Subject: [PATCH 031/528] fix update trigger column list collecting function; refactoring --- src/partition_filter.c | 96 +++++++++++++++++++----------------------- src/pl_funcs.c | 31 +++++++------- 2 files changed, 59 insertions(+), 68 deletions(-) diff --git a/src/partition_filter.c b/src/partition_filter.c index bca97afe..63c59282 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -66,7 +66,9 @@ int pg_pathman_insert_into_fdw = PF_FDW_INSERT_POSTGRES; CustomScanMethods partition_filter_plan_methods; CustomExecMethods partition_filter_exec_methods; - +static ExprState *prepare_expr_state(Node *expr, + Oid relid, + EState *estate); static void prepare_rri_for_insert(EState *estate, ResultRelInfoHolder *rri_holder, const ResultPartsStorage *rps_storage, @@ -455,8 +457,6 @@ select_partition_for_insert(ExprContext *econtext, ExprState *expr_state, } else selected_partid = parts[0]; - /* Replace parent table with a suitable partition */ - /* TODO: write a correct comment */ old_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); rri_holder = scan_result_parts_storage(selected_partid, parts_storage); @@ -471,35 +471,13 @@ select_partition_for_insert(ExprContext *econtext, ExprState *expr_state, /* Build an expression state if not yet */ if (!rri_holder->expr_state) - { - MemoryContext tmp_mcxt; - Node *expr; - Index varno = 1; - ListCell *lc; - - /* Change varno in Vars according to range table */ - expr = copyObject(subprel->expr); - foreach(lc, estate->es_range_table) - { - RangeTblEntry *entry = lfirst(lc); - if (entry->relid == selected_partid) - { - if (varno > 1) - ChangeVarNodes(expr, 1, varno, 0); - break; - } - varno += 1; - } - - /* Prepare state for expression execution */ - tmp_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); - rri_holder->expr_state = ExecInitExpr((Expr *) expr, NULL); - MemoryContextSwitchTo(tmp_mcxt); - } + rri_holder->expr_state = prepare_expr_state(subprel->expr, + selected_partid, + estate); Assert(rri_holder->expr_state != NULL); - /* Dive in */ + /* Recursively search for subpartitions */ rri_holder = select_partition_for_insert(econtext, rri_holder->expr_state, subprel, parts_storage, @@ -516,6 +494,38 @@ select_partition_for_insert(ExprContext *econtext, ExprState *expr_state, return rri_holder; } +static ExprState * +prepare_expr_state(Node *expr, + Oid relid, + EState *estate) +{ + ExprState *expr_state; + MemoryContext old_mcxt; + Index varno = 1; + Node *expr_copy; + ListCell *lc; + + /* Change varno in Vars according to range table */ + expr_copy = copyObject(expr); + foreach(lc, estate->es_range_table) + { + RangeTblEntry *entry = lfirst(lc); + if (entry->relid == relid) + { + if (varno > 1) + ChangeVarNodes(expr_copy, 1, varno, 0); + break; + } + varno += 1; + } + + /* Prepare state for expression execution */ + old_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); + expr_state = ExecInitExpr((Expr *) expr_copy, NULL); + MemoryContextSwitchTo(old_mcxt); + + return expr_state; +} /* * -------------------------------- @@ -596,40 +606,22 @@ partition_filter_create_scan_state(CustomScan *node) void partition_filter_begin(CustomScanState *node, EState *estate, int eflags) { - Index varno = 1; - Node *expr; - MemoryContext old_mcxt; PartitionFilterState *state = (PartitionFilterState *) node; - const PartRelationInfo *prel; - ListCell *lc; /* It's convenient to store PlanState in 'custom_ps' */ node->custom_ps = list_make1(ExecInitNode(state->subplan, estate, eflags)); if (state->expr_state == NULL) { + const PartRelationInfo *prel; + /* Fetch PartRelationInfo for this partitioned relation */ prel = get_pathman_relation_info(state->partitioned_table); Assert(prel != NULL); - /* Change varno in Vars according to range table */ - expr = copyObject(prel->expr); - foreach(lc, estate->es_range_table) - { - RangeTblEntry *entry = lfirst(lc); - if (entry->relid == state->partitioned_table) - { - if (varno > 1) - ChangeVarNodes(expr, 1, varno, 0); - break; - } - varno += 1; - } - - /* Prepare state for expression execution */ - old_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); - state->expr_state = ExecInitExpr((Expr *) expr, NULL); - MemoryContextSwitchTo(old_mcxt); + state->expr_state = prepare_expr_state(prel->expr, + state->partitioned_table, + estate); } /* Init ResultRelInfo cache */ diff --git a/src/pl_funcs.c b/src/pl_funcs.c index ef8e6550..982292e5 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -1118,8 +1118,6 @@ pathman_update_trigger_func(PG_FUNCTION_ARGS) /* Find (or create) target partition */ target_relid = find_target_partition(source_rel, new_tuple); - /* TODO: check for InvalidOid */ - /* Convert tuple if target partition has changed */ if (target_relid != source_relid) { @@ -1129,9 +1127,7 @@ pathman_update_trigger_func(PG_FUNCTION_ARGS) /* Lock partition and check if it exists */ LockRelationOid(target_relid, lockmode); if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(target_relid))) - /* TODO: !!! */ - elog(ERROR, ERR_PART_ATTR_NO_PART, "()"); - // elog(ERROR, ERR_PART_ATTR_NO_PART, datum_to_cstring(value, value_type)); + elog(ERROR, "no suitable target partition"); /* Open partition */ target_rel = heap_open(target_relid, lockmode); @@ -1152,7 +1148,7 @@ pathman_update_trigger_func(PG_FUNCTION_ARGS) } /* - * Find partition satisfying values of the tuple + * Find partition satisfying values of the tuple or return InvalidOid */ static Oid find_target_partition(Relation source_rel, HeapTuple tuple) @@ -1247,20 +1243,24 @@ find_deepest_partition(Oid parent, Relation source_rel, HeapTuple tuple) elog(ERROR, ERR_PART_ATTR_MULTIPLE); else if (nparts == 0) { - target_relid = create_partitions_for_value(PrelParentRelid(prel), + /* No partition found, create a new one */ + target_relid = create_partitions_for_value(PrelParentRelid(prel), value, value_type); - /* get_pathman_relation_info() will refresh this entry */ - invalidate_pathman_relation_info(PrelParentRelid(prel), NULL); + /* Get_pathman_relation_info() will refresh this entry */ + invalidate_pathman_relation_info(PrelParentRelid(prel), NULL); } else + { + /* Found partition */ target_relid = parts[0]; - pfree(parts); - /* Try to go deeper recursively and see if there is subpartition */ - subpartition = find_deepest_partition(target_relid, source_rel, tuple); - if (OidIsValid(subpartition)) - return subpartition; + /* Try to go deeper recursively and see if there is subpartition */ + subpartition = find_deepest_partition(target_relid, source_rel, tuple); + if (OidIsValid(subpartition)) + return subpartition; + } + pfree(parts); return target_relid; } @@ -1499,7 +1499,6 @@ create_update_triggers(PG_FUNCTION_ARGS) trigname = build_update_trigger_name_internal(parent); /* Create trigger for parent */ - // columns = PrelExpressionColumnNames(prel); collect_update_trigger_columns(parent, &columns); create_single_update_trigger_internal(parent, trigname, columns); @@ -1525,7 +1524,7 @@ collect_update_trigger_columns(Oid relid, List **columns) return; /* Collect columns from current level */ - *columns = list_concat(*columns, PrelExpressionColumnNames(prel)); + *columns = list_union(*columns, PrelExpressionColumnNames(prel)); /* Collect columns from parent */ parent = get_parent_of_partition(relid, &parent_search); From be140b9ac18a03dfd5c91261001fe8b5a7b783ab Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 11 May 2017 11:51:52 +0300 Subject: [PATCH 032/528] Add NOP trigger for FDW updates --- hash.sql | 4 ++ init.sql | 24 ++++++++++++ range.sql | 6 ++- src/include/init.h | 1 + src/include/partition_creation.h | 5 +++ src/init.c | 11 ++++++ src/partition_creation.c | 65 +++++++++++++++++++++++++------ src/partition_update.c | 13 +++++++ src/pl_funcs.c | 65 +++++++++++++++++++++++++++++++ tests/python/partitioning_test.py | 5 +++ 10 files changed, 187 insertions(+), 12 deletions(-) diff --git a/hash.sql b/hash.sql index 4c21f9df..b0eff3b8 100644 --- a/hash.sql +++ b/hash.sql @@ -138,6 +138,10 @@ BEGIN @extschema@.build_check_constraint_name(new_partition::REGCLASS), old_constr_def); + IF @extschema@.is_relation_foreign(partition_relid) THEN + PERFORM @extschema@.create_single_nop_trigger(parent_relid, partition_relid); + END IF; + /* Fetch init_callback from 'params' table */ WITH stub_callback(stub) as (values (0)) SELECT init_callback diff --git a/init.sql b/init.sql index bb6da5bc..1ea4355b 100644 --- a/init.sql +++ b/init.sql @@ -737,6 +737,22 @@ CREATE OR REPLACE FUNCTION @extschema@.has_update_trigger( RETURNS BOOL AS 'pg_pathman', 'has_update_trigger' LANGUAGE C STRICT; +/* + * Function for NOP triggers. + * NOP trigger is a trigger that we use to turn off direct modify of FDW tables + */ +CREATE OR REPLACE FUNCTION @extschema@.pathman_nop_trigger_func() +RETURNS TRIGGER AS 'pg_pathman', 'pathman_nop_trigger_func' +LANGUAGE C STRICT; + +/* + * Creates single NOP trigger. + */ +CREATE OR REPLACE FUNCTION @extschema@.create_single_nop_trigger( + parent_relid REGCLASS, + partition_relid REGCLASS) +RETURNS VOID AS 'pg_pathman', 'create_single_nop_trigger' +LANGUAGE C STRICT; /* * Partitioning key @@ -930,3 +946,11 @@ LANGUAGE C STRICT; CREATE OR REPLACE FUNCTION @extschema@.get_pathman_lib_version() RETURNS CSTRING AS 'pg_pathman', 'get_pathman_lib_version' LANGUAGE C STRICT; + +/* + * Check if relation is foreign table + */ +CREATE OR REPLACE FUNCTION @extschema@.is_relation_foreign( + relid REGCLASS) +RETURNS BOOL AS 'pg_pathman', 'is_relation_foreign' +LANGUAGE C STRICT; diff --git a/range.sql b/range.sql index 371a9f83..009f11f1 100644 --- a/range.sql +++ b/range.sql @@ -948,10 +948,14 @@ BEGIN INTO v_init_callback; /* If update trigger is enabled then create one for this partition */ - if @extschema@.has_update_trigger(parent_relid) THEN + IF @extschema@.has_update_trigger(parent_relid) THEN PERFORM @extschema@.create_single_update_trigger(parent_relid, partition_relid); END IF; + IF @extschema@.is_relation_foreign(partition_relid) THEN + PERFORM @extschema@.create_single_nop_trigger(parent_relid, partition_relid); + END IF; + /* Invoke an initialization callback */ PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, partition_relid, diff --git a/src/include/init.h b/src/include/init.h index 778da9bb..769bf119 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -199,6 +199,7 @@ char *build_sequence_name_internal(Oid relid); char *build_update_trigger_name_internal(Oid relid); char *build_update_trigger_func_name_internal(Oid relid); +char *build_nop_trigger_name_internal(Oid relid); bool pathman_config_contains_relation(Oid relid, Datum *values, diff --git a/src/include/partition_creation.h b/src/include/partition_creation.h index 42454ca9..106054c9 100644 --- a/src/include/partition_creation.h +++ b/src/include/partition_creation.h @@ -86,6 +86,11 @@ void create_single_update_trigger_internal(Oid partition_relid, bool has_update_trigger_internal(Oid parent); +/* NOP triggers */ +void create_single_nop_trigger_internal(Oid relid, + const char *trigname, + List *columns); + /* Partitioning callback type */ typedef enum { diff --git a/src/init.c b/src/init.c index 0333d263..25ce724c 100644 --- a/src/init.c +++ b/src/init.c @@ -583,6 +583,17 @@ build_update_trigger_name_internal(Oid relid) return psprintf("%s_upd_trig", get_rel_name(relid)); } +/* + * Generate name for NOP trigger. + * NOTE: this function does not perform sanity checks at all. + */ +char * +build_nop_trigger_name_internal(Oid relid) +{ + AssertArg(OidIsValid(relid)); + return psprintf("%s_nop_trig", get_rel_name(relid)); +} + /* * Generate name for update trigger's function. * NOTE: this function does not perform sanity checks at all. diff --git a/src/partition_creation.c b/src/partition_creation.c index 7707bec1..5f237575 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -84,6 +84,8 @@ static Node *build_partitioning_expression(Oid parent_relid, Oid *expr_type, List **columns); +static bool has_trigger_internal(Oid relid, const char *trigname); + /* * --------------------------------------- * Public interface (partition creation) @@ -215,7 +217,8 @@ create_single_partition_common(Oid parent_relid, init_callback_params *callback_params, List *trigger_columns) { - Relation child_relation; + Relation child_relation; + const char *trigger_name; /* Open the relation and add new check constraint & fkeys */ child_relation = heap_open(partition_relid, AccessExclusiveLock); @@ -230,8 +233,6 @@ create_single_partition_common(Oid parent_relid, /* Create trigger if needed */ if (has_update_trigger_internal(parent_relid)) { - const char *trigger_name; - trigger_name = build_update_trigger_name_internal(parent_relid); create_single_update_trigger_internal(partition_relid, trigger_name, @@ -1782,26 +1783,22 @@ create_single_update_trigger_internal(Oid partition_relid, InvalidOid, InvalidOid, false); } -/* Check if relation has pg_pathman's update trigger */ -bool -has_update_trigger_internal(Oid parent_relid) +/* Check if relation has some trigger */ +static bool +has_trigger_internal(Oid relid, const char *trigname) { bool res = false; Relation tgrel; SysScanDesc scan; ScanKeyData key[1]; HeapTuple tuple; - const char *trigname; - - /* Build update trigger's name */ - trigname = build_update_trigger_name_internal(parent_relid); tgrel = heap_open(TriggerRelationId, RowExclusiveLock); ScanKeyInit(&key[0], Anum_pg_trigger_tgrelid, BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(parent_relid)); + ObjectIdGetDatum(relid)); scan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true, NULL, lengthof(key), key); @@ -1822,3 +1819,49 @@ has_update_trigger_internal(Oid parent_relid) return res; } + +/* Check if relation has pg_pathman's update trigger */ +bool +has_update_trigger_internal(Oid parent_relid) +{ + const char *trigname; + + /* Build update trigger's name */ + trigname = build_update_trigger_name_internal(parent_relid); + return has_trigger_internal(parent_relid, trigname); +} + +/* Create trigger for partition that does nothing */ +void +create_single_nop_trigger_internal(Oid relid, + const char *trigname, + List *columns) +{ + CreateTrigStmt *stmt; + List *func; + + /* do nothing if relation has trigger already */ + if (has_trigger_internal(relid, trigname)) + return; + + func = list_make2(makeString(get_namespace_name(get_pathman_schema())), + makeString(CppAsString(pathman_nop_trigger_func))); + + stmt = makeNode(CreateTrigStmt); + stmt->trigname = (char *) trigname; + stmt->relation = makeRangeVarFromRelid(relid); + stmt->funcname = func; + stmt->args = NIL; + stmt->row = true; + stmt->timing = TRIGGER_TYPE_BEFORE; + stmt->events = TRIGGER_TYPE_UPDATE; + stmt->columns = columns; + stmt->whenClause = NULL; + stmt->isconstraint = false; + stmt->deferrable = false; + stmt->initdeferred = false; + stmt->constrrel = NULL; + + (void) CreateTrigger(stmt, NULL, InvalidOid, InvalidOid, + InvalidOid, InvalidOid, false); +} diff --git a/src/partition_update.c b/src/partition_update.c index 74a05f84..fe6ddc64 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -60,6 +60,16 @@ init_partition_update_static_data(void) NULL); } +/* + * By default UPDATE queries will make ForeignUpdate nodes for foreign tables. + * This function modifies these nodes so they will work as SELECTs + */ +static void +modify_fdw_scan(ForeignScan *node) +{ + node->scan.plan.plan_node_id = CMD_SELECT; + node->operation = CMD_SELECT; +} Plan * make_partition_update(Plan *subplan, @@ -76,6 +86,9 @@ make_partition_update(Plan *subplan, cscan->scan.plan.plan_rows = subplan->plan_rows; cscan->scan.plan.plan_width = subplan->plan_width; + if (IsA(subplan, ForeignScan)) + modify_fdw_scan((ForeignScan *) subplan); + /* Setup methods and child plan */ cscan->methods = &partition_update_plan_methods; pfilter = make_partition_filter(subplan, parent_relid, ONCONFLICT_NONE, diff --git a/src/pl_funcs.c b/src/pl_funcs.c index e57832f7..f7c17f3d 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -71,8 +71,12 @@ PG_FUNCTION_INFO_V1( check_security_policy ); PG_FUNCTION_INFO_V1( create_update_triggers ); PG_FUNCTION_INFO_V1( pathman_update_trigger_func ); +PG_FUNCTION_INFO_V1( pathman_nop_trigger_func ); PG_FUNCTION_INFO_V1( create_single_update_trigger ); PG_FUNCTION_INFO_V1( has_update_trigger ); +PG_FUNCTION_INFO_V1( is_relation_foreign ); + +PG_FUNCTION_INFO_V1( create_single_nop_trigger ); PG_FUNCTION_INFO_V1( debug_capture ); PG_FUNCTION_INFO_V1( get_pathman_lib_version ); @@ -1213,6 +1217,24 @@ pathman_update_trigger_func(PG_FUNCTION_ARGS) PG_RETURN_POINTER(new_tuple); } +Datum +pathman_nop_trigger_func(PG_FUNCTION_ARGS) +{ + TriggerData *trigdata = (TriggerData *) fcinfo->context; + + /* Handle user calls */ + if (!CALLED_AS_TRIGGER(fcinfo)) + elog(ERROR, "this function should not be called directly"); + + /* Handle wrong fire mode */ + if (!TRIGGER_FIRED_FOR_ROW(trigdata->tg_event)) + elog(ERROR, "%s: must be fired for row", + trigdata->tg_trigger->tgname); + + /* Just return NEW tuple */ + PG_RETURN_POINTER(trigdata->tg_newtuple); +} + struct replace_vars_cxt { HeapTuple new_tuple; @@ -1456,6 +1478,49 @@ has_update_trigger(PG_FUNCTION_ARGS) PG_RETURN_BOOL(has_update_trigger_internal(parent_relid)); } +/* Check if relation is foreign table */ +Datum +is_relation_foreign(PG_FUNCTION_ARGS) +{ + Oid relid = PG_GETARG_OID(0); + Relation rel; + bool res; + + /* Check that relation exists */ + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relid))) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("relation \"%u\" does not exist", relid))); + + rel = heap_open(relid, NoLock); + res = (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE); + heap_close(rel, NoLock); + PG_RETURN_BOOL(res); +} + +/* Create a trigger for partition that does nothing */ +Datum +create_single_nop_trigger(PG_FUNCTION_ARGS) +{ + Oid parent = PG_GETARG_OID(0); + Oid child = PG_GETARG_OID(1); + const char *trigname; + const PartRelationInfo *prel; + List *columns; + + /* Check that table is partitioned */ + prel = get_pathman_relation_info(parent); + shout_if_prel_is_invalid(parent, prel, PT_ANY); + + /* Acquire trigger and attribute names */ + trigname = build_nop_trigger_name_internal(parent); + + /* Generate list of columns used in expression */ + columns = PrelExpressionColumnNames(prel); + create_single_nop_trigger_internal(child, trigname, columns); + + PG_RETURN_VOID(); +} + /* * ------- diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index e102c332..0a5bef57 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -538,6 +538,7 @@ def test_foreign_table(self): fserv.stop() master.stop() + @if_fdw_enabled def test_update_node_on_fdw_tables(self): ''' Test update node on foreign tables ''' @@ -578,6 +579,10 @@ def test_update_node_on_fdw_tables(self): result_relid = con.execute('select tableoid from abc where id=35')[0][0] self.assertEqual(result_relid, dest_relid) + self.set_trace(con, 'pg_debug') + import ipdb; ipdb.set_trace() + pass + def test_parallel_nodes(self): """Test parallel queries under partitions""" From 55e0fed889231298927108d734113b4041e1598e Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 11 May 2017 13:58:43 +0300 Subject: [PATCH 033/528] Move junk cleaning code to PartitionUpdate --- hash.sql | 4 +- src/include/partition_filter.h | 6 +- src/partition_filter.c | 39 ++-------- src/partition_update.c | 115 +++++++++++++++++------------- tests/python/partitioning_test.py | 12 ++-- 5 files changed, 84 insertions(+), 92 deletions(-) diff --git a/hash.sql b/hash.sql index b0eff3b8..677239b6 100644 --- a/hash.sql +++ b/hash.sql @@ -138,8 +138,8 @@ BEGIN @extschema@.build_check_constraint_name(new_partition::REGCLASS), old_constr_def); - IF @extschema@.is_relation_foreign(partition_relid) THEN - PERFORM @extschema@.create_single_nop_trigger(parent_relid, partition_relid); + IF @extschema@.is_relation_foreign(new_partition) THEN + PERFORM @extschema@.create_single_nop_trigger(parent_relid, new_partition); END IF; /* Fetch init_callback from 'params' table */ diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index 68c57aef..1519a246 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -97,11 +97,11 @@ typedef struct bool warning_triggered; /* warning message counter */ TupleTableSlot *tup_convert_slot; /* slot for rebuilt tuples */ - ItemPointer ctid; /* ctid of scanned tuple - if there any, or NULL, - filled when command_type == CMD_UPDATE*/ CmdType command_type; + TupleTableSlot *subplan_slot; /* slot that was returned from subplan */ + JunkFilter *src_junkFilter; /* junkfilter for subplan_slot */ + ExprContext *tup_convert_econtext; /* ExprContext for projections */ ExprState *expr_state; /* for partitioning expression */ } PartitionFilterState; diff --git a/src/partition_filter.c b/src/partition_filter.c index 88c470a3..5def7e1f 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -656,16 +656,18 @@ partition_filter_exec(CustomScanState *node) TupleTableSlot *slot; ResultRelInfo *saved_resultRelInfo; - /* clean ctid for old slot */ - state->ctid = NULL; - slot = ExecProcNode(child_ps); + state->subplan_slot = slot; + state->src_junkFilter = NULL; /* Save original ResultRelInfo */ saved_resultRelInfo = estate->es_result_relation_info; if (!state->result_parts.saved_rel_info) state->result_parts.saved_rel_info = saved_resultRelInfo; + if (state->tup_convert_slot) + ExecClearTuple(state->tup_convert_slot); + if (!TupIsNull(slot)) { MemoryContext old_mcxt; @@ -717,33 +719,8 @@ partition_filter_exec(CustomScanState *node) /* Magic: replace parent's ResultRelInfo with ours */ estate->es_result_relation_info = resultRelInfo; - if (state->command_type == CMD_UPDATE) - { - JunkFilter *junkfilter; - Datum datum; - char relkind; - - /* - * extract `ctid` junk attribute and save it in state, - * we need this step because if there will be conversion - * then junk attributes will be removed from slot - */ - junkfilter = rri_holder->updates_junkFilter; - Assert(junkfilter != NULL); - - relkind = saved_resultRelInfo->ri_RelationDesc->rd_rel->relkind; - if (relkind == RELKIND_RELATION) - { - bool isNull; - - datum = ExecGetJunkAttribute(slot, junkfilter->jf_junkAttNo, &isNull); - /* shouldn't ever get a null result... */ - if (isNull) - elog(ERROR, "ctid is NULL"); - - state->ctid = (ItemPointer) DatumGetPointer(datum); - } - } + /* pass junkfilter to upper node */ + state->src_junkFilter = rri_holder->updates_junkFilter; /* If there's a transform map, rebuild the tuple */ if (rri_holder->tuple_map) @@ -765,8 +742,6 @@ partition_filter_exec(CustomScanState *node) /* Now replace the original slot */ slot = state->tup_convert_slot; } - else if (state->command_type == CMD_UPDATE) - slot = ExecFilterJunk(rri_holder->updates_junkFilter, slot); return slot; } diff --git a/src/partition_update.c b/src/partition_update.c index fe6ddc64..e925ac6d 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -151,72 +151,89 @@ partition_update_exec(CustomScanState *node) if (!TupIsNull(slot)) { - Datum datum; - bool isNull; - char relkind; - ResultRelInfo *resultRelInfo; - ItemPointer tupleid; - ItemPointerData tuple_ctid; - EPQState epqstate; - HeapTupleData oldtupdata; - HeapTuple oldtuple; - - PartitionFilterState *child_state = (PartitionFilterState *) child_ps; + Datum datum; + bool isNull; + char relkind; + ResultRelInfo *resultRelInfo, + *sourceRelInfo; + ItemPointer tupleid = NULL; + ItemPointerData tuple_ctid; + EPQState epqstate; + HeapTupleData oldtupdata; + HeapTuple oldtuple = NULL; + PartitionFilterState *child_state; + JunkFilter *junkfilter; + + child_state = (PartitionFilterState *) child_ps; Assert(child_state->command_type == CMD_UPDATE); - EvalPlanQualSetSlot(&epqstate, slot); + EvalPlanQualSetSlot(&epqstate, child_state->subplan_slot); + sourceRelInfo = child_state->result_parts.saved_rel_info; resultRelInfo = estate->es_result_relation_info; - oldtuple = NULL; - relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind; + junkfilter = child_state->src_junkFilter; - if (child_state->ctid != NULL) + if (junkfilter != NULL) { - tupleid = child_state->ctid; - tuple_ctid = *tupleid; /* be sure we don't free - * ctid!! */ - tupleid = &tuple_ctid; - } - else if (relkind == RELKIND_FOREIGN_TABLE) - { - JunkFilter *junkfilter = resultRelInfo->ri_junkFilter; - - if (junkfilter != NULL && AttributeNumberIsValid(junkfilter->jf_junkAttNo)) + relkind = sourceRelInfo->ri_RelationDesc->rd_rel->relkind; + if (relkind == RELKIND_RELATION) { - datum = ExecGetJunkAttribute(slot, - junkfilter->jf_junkAttNo, - &isNull); + bool isNull; + + datum = ExecGetJunkAttribute(child_state->subplan_slot, + junkfilter->jf_junkAttNo, &isNull); /* shouldn't ever get a null result... */ if (isNull) - elog(ERROR, "wholerow is NULL"); + elog(ERROR, "ctid is NULL"); - oldtupdata.t_data = DatumGetHeapTupleHeader(datum); - oldtupdata.t_len = - HeapTupleHeaderGetDatumLength(oldtupdata.t_data); - ItemPointerSetInvalid(&(oldtupdata.t_self)); - - /* Historically, view triggers see invalid t_tableOid. */ - oldtupdata.t_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc); - oldtuple = &oldtupdata; + tupleid = (ItemPointer) DatumGetPointer(datum); + tuple_ctid = *tupleid; /* be sure we don't free + * ctid!! */ + tupleid = &tuple_ctid; } - - tupleid = NULL; + else if (relkind == RELKIND_FOREIGN_TABLE) + { + if (AttributeNumberIsValid(junkfilter->jf_junkAttNo)) + { + datum = ExecGetJunkAttribute(child_state->subplan_slot, + junkfilter->jf_junkAttNo, + &isNull); + /* shouldn't ever get a null result... */ + if (isNull) + elog(ERROR, "wholerow is NULL"); + + oldtupdata.t_data = DatumGetHeapTupleHeader(datum); + oldtupdata.t_len = + HeapTupleHeaderGetDatumLength(oldtupdata.t_data); + ItemPointerSetInvalid(&(oldtupdata.t_self)); + + /* Historically, view triggers see invalid t_tableOid. */ + oldtupdata.t_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc); + oldtuple = &oldtupdata; + } + } + else + elog(ERROR, "got unexpected type of relation"); + + /* + * Clean from junk attributes before INSERT, + * but only if slot wasn't converted in PartitionFilter + */ + if (TupIsNull(child_state->tup_convert_slot)) + slot = ExecFilterJunk(junkfilter, slot); } - else - elog(ERROR, "updates supported only on basic relations and foreign tables"); - - /* delete old tuple */ - estate->es_result_relation_info = child_state->result_parts.saved_rel_info; /* - * We have two cases here: - * normal relations - tupleid points to actual tuple - * foreign tables - tupleid is invalid, slot is required + * Delete old tuple. We have two cases here: + * 1) local tables - tupleid points to actual tuple + * 2) foreign tables - tupleid is invalid, slot is required */ - ExecDeleteInternal(tupleid, oldtuple, slot, &epqstate, estate); - estate->es_result_relation_info = resultRelInfo; + estate->es_result_relation_info = sourceRelInfo; + ExecDeleteInternal(tupleid, oldtuple, child_state->subplan_slot, + &epqstate, estate); /* we've got the slot that can be inserted to child partition */ + estate->es_result_relation_info = resultRelInfo; return slot; } diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 0a5bef57..9bda06d9 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -557,12 +557,12 @@ def test_update_node_on_fdw_tables(self): 'postgres', 'select attach_range_partition(\'abc\', \'ftable2\', 30, 40)') - master.safe_psql('postgres', - 'set pg_pathman.enable_partitionupdate=on') + #master.safe_psql('postgres', + # 'set pg_pathman.enable_partitionupdate=on') with master.connect() as con: con.begin() - con.execute('set pg_pathman.enable_partitionupdate=on') + #con.execute('set pg_pathman.enable_partitionupdate=on') con.execute('insert into abc select i from generate_series(1, 19) i') con.commit() @@ -575,9 +575,9 @@ def test_update_node_on_fdw_tables(self): # - update from foreign to local # - update from foreign to foreign - con.execute('update abc set id=36 where id=9') - result_relid = con.execute('select tableoid from abc where id=35')[0][0] - self.assertEqual(result_relid, dest_relid) + #con.execute('update abc set id=36 where id=9') + #result_relid = con.execute('select tableoid from abc where id=35')[0][0] + #self.assertEqual(result_relid, dest_relid) self.set_trace(con, 'pg_debug') import ipdb; ipdb.set_trace() From 9c167fef34157824d6fe70e0ef8e9becbd28d0a8 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Thu, 11 May 2017 18:41:52 +0300 Subject: [PATCH 034/528] rewrite drop_triggers() in C and adapted it to multilevel partitioning --- init.sql | 34 +-------- src/include/partition_creation.h | 3 + src/include/utils.h | 11 +-- src/partition_creation.c | 15 +++- src/partition_filter.c | 3 +- src/pl_funcs.c | 127 +++++++++++++++++++++++++++---- src/pl_range_funcs.c | 14 ---- src/utils.c | 13 ++++ 8 files changed, 156 insertions(+), 64 deletions(-) diff --git a/init.sql b/init.sql index e27b533e..59a563d5 100644 --- a/init.sql +++ b/init.sql @@ -534,35 +534,8 @@ LANGUAGE plpgsql; */ CREATE OR REPLACE FUNCTION @extschema@.drop_triggers( parent_relid REGCLASS) -RETURNS VOID AS -$$ -DECLARE - triggername TEXT; - rec RECORD; - -BEGIN - triggername := @extschema@.build_update_trigger_name(parent_relid); - - /* Drop trigger for each partition if exists */ - FOR rec IN (SELECT pg_catalog.pg_inherits.* FROM pg_catalog.pg_inherits - JOIN pg_catalog.pg_trigger ON inhrelid = tgrelid - WHERE inhparent = parent_relid AND tgname = triggername) - LOOP - EXECUTE format('DROP TRIGGER IF EXISTS %s ON %s', - triggername, - rec.inhrelid::REGCLASS::TEXT); - END LOOP; - - /* Drop trigger on parent */ - IF EXISTS (SELECT * FROM pg_catalog.pg_trigger - WHERE tgname = triggername AND tgrelid = parent_relid) - THEN - EXECUTE format('DROP TRIGGER IF EXISTS %s ON %s', - triggername, - parent_relid::TEXT); - END IF; -END -$$ LANGUAGE plpgsql STRICT; +RETURNS VOID AS 'pg_pathman', 'drop_update_triggers' +LANGUAGE C STRICT; /* * Drop partitions. If delete_data set to TRUE, partitions @@ -767,7 +740,8 @@ LANGUAGE C STRICT; * Get parent of pg_pathman's partition. */ CREATE OR REPLACE FUNCTION @extschema@.get_parent_of_partition( - partition_relid REGCLASS) + partition_relid REGCLASS, + raise_error BOOL DEFAULT TRUE) RETURNS REGCLASS AS 'pg_pathman', 'get_parent_of_partition_pl' LANGUAGE C STRICT; diff --git a/src/include/partition_creation.h b/src/include/partition_creation.h index a194c165..df48fee7 100644 --- a/src/include/partition_creation.h +++ b/src/include/partition_creation.h @@ -86,6 +86,9 @@ void create_single_update_trigger_internal(Oid partition_relid, bool has_update_trigger_internal(Oid parent); +void drop_single_update_trigger_internal(Oid relid, + const char *trigname); + /* Partitioning callback type */ typedef enum { diff --git a/src/include/utils.h b/src/include/utils.h index 16100df7..bcff887c 100644 --- a/src/include/utils.h +++ b/src/include/utils.h @@ -30,13 +30,14 @@ bool match_expr_to_operand(Node *expr, Node *operand); * Misc. */ Oid get_pathman_schema(void); -List * list_reverse(List *l); +List *list_reverse(List *l); /* * Useful functions for relations. */ Oid get_rel_owner(Oid relid); -char * get_rel_name_or_relid(Oid relid); +char *get_rel_name_or_relid(Oid relid); +char *get_qualified_rel_name(Oid relid); RangeVar *makeRangeVarFromRelid(Oid relid); /* @@ -52,13 +53,13 @@ void extract_op_func_and_ret_type(char *opname, /* * Print values and cast types. */ -char * datum_to_cstring(Datum datum, Oid typid); +char *datum_to_cstring(Datum datum, Oid typid); Datum perform_type_cast(Datum value, Oid in_type, Oid out_type, bool *success); Datum extract_binary_interval_from_text(Datum interval_text, Oid part_atttype, Oid *interval_type); -char ** deconstruct_text_array(Datum array, int *array_size); -RangeVar ** qualified_relnames_to_rangevars(char **relnames, size_t nrelnames); +char **deconstruct_text_array(Datum array, int *array_size); +RangeVar **qualified_relnames_to_rangevars(char **relnames, size_t nrelnames); #endif /* PATHMAN_UTILS_H */ diff --git a/src/partition_creation.c b/src/partition_creation.c index 4bb54b2e..991f852e 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1741,7 +1741,7 @@ build_partitioning_expression(Oid parent_relid, /* * ------------------------- - * Update trigger creation + * Update triggers management * ------------------------- */ @@ -1774,6 +1774,8 @@ create_single_update_trigger_internal(Oid partition_relid, (void) CreateTrigger(stmt, NULL, InvalidOid, InvalidOid, InvalidOid, InvalidOid, false); + + CommandCounterIncrement(); } /* Check if relation has pg_pathman's update trigger */ @@ -1816,3 +1818,14 @@ has_update_trigger_internal(Oid parent_relid) return res; } + +void +drop_single_update_trigger_internal(Oid relid, + const char *trigname) +{ + Oid trigoid; + + trigoid = get_trigger_oid(relid, trigname, true); + if (OidIsValid(trigoid)) + RemoveTriggerById(trigoid); +} diff --git a/src/partition_filter.c b/src/partition_filter.c index 63c59282..b2978eea 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -478,7 +478,8 @@ select_partition_for_insert(ExprContext *econtext, ExprState *expr_state, Assert(rri_holder->expr_state != NULL); /* Recursively search for subpartitions */ - rri_holder = select_partition_for_insert(econtext, rri_holder->expr_state, + rri_holder = select_partition_for_insert(econtext, + rri_holder->expr_state, subprel, parts_storage, estate); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 982292e5..80858fbb 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -70,6 +70,7 @@ PG_FUNCTION_INFO_V1( invoke_on_partition_created_callback ); PG_FUNCTION_INFO_V1( check_security_policy ); PG_FUNCTION_INFO_V1( create_update_triggers ); +PG_FUNCTION_INFO_V1( drop_update_triggers ); PG_FUNCTION_INFO_V1( pathman_update_trigger_func ); PG_FUNCTION_INFO_V1( create_single_update_trigger ); PG_FUNCTION_INFO_V1( has_update_trigger ); @@ -113,6 +114,9 @@ static void pathman_update_trigger_func_move_tuple(Relation source_rel, HeapTuple old_tuple, HeapTuple new_tuple); +static void create_update_triggers_internal(Oid relid); +static void drop_update_triggers_internal(Oid relid); + static void collect_update_trigger_columns(Oid relid, List **columns); static Oid find_target_partition(Relation source_rel, HeapTuple tuple); static Oid find_topmost_parent(Oid partition); @@ -150,6 +154,7 @@ get_parent_of_partition_pl(PG_FUNCTION_ARGS) Oid partition = PG_GETARG_OID(0); PartParentSearch parent_search; Oid parent; + bool emit_error = PG_GETARG_BOOL(1); /* Fetch parent & write down search status */ parent = get_parent_of_partition(partition, &parent_search); @@ -160,14 +165,13 @@ get_parent_of_partition_pl(PG_FUNCTION_ARGS) /* It must be parent known by pg_pathman */ if (parent_search == PPS_ENTRY_PART_PARENT) PG_RETURN_OID(parent); - else - { + + if (emit_error) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("\"%s\" is not a partition", get_rel_name_or_relid(partition)))); - PG_RETURN_NULL(); - } + PG_RETURN_NULL(); } /* @@ -1480,11 +1484,41 @@ pathman_update_trigger_func_move_tuple(Relation source_rel, FreeTupleDesc(target_tupdesc); } -/* Create UPDATE triggers for all partitions */ +/* + * Create UPDATE triggers for all partitions and subpartitions + */ Datum create_update_triggers(PG_FUNCTION_ARGS) { - Oid parent = PG_GETARG_OID(0); + Oid relid = PG_GETARG_OID(0), + parent; + PartParentSearch parent_search; + + /* + * If table has parent then we should check that parent has update trigger. + * In the ideal world this error should never be thrown since we create and + * drop update triggers for the whole partitions tree and not its parts + */ + parent = get_parent_of_partition(relid, &parent_search); + if (parent_search == PPS_ENTRY_PART_PARENT) + if(!has_update_trigger_internal(parent)) + ereport(ERROR, + (errmsg("Parent table must have an update trigger"), + errhint("Try to perform SELECT %s.create_update_triggers('%s');", + get_namespace_name(get_pathman_schema()), + get_qualified_rel_name(parent)))); + + /* Recursively add triggers */ + create_update_triggers_internal(relid); + PG_RETURN_VOID(); +} + +/* + * Create UPDATE triggers recursively + */ +static void +create_update_triggers_internal(Oid relid) +{ Oid *children; const char *trigname; const PartRelationInfo *prel; @@ -1492,24 +1526,30 @@ create_update_triggers(PG_FUNCTION_ARGS) List *columns = NIL; /* Check that table is partitioned */ - prel = get_pathman_relation_info(parent); - shout_if_prel_is_invalid(parent, prel, PT_ANY); + prel = get_pathman_relation_info(relid); + /* TODO: check this only for topmost relid? */ + // shout_if_prel_is_invalid(relid, prel, PT_ANY); + if (!prel) + return; - /* Acquire trigger and attribute names */ - trigname = build_update_trigger_name_internal(parent); + /* Acquire trigger name */ + trigname = build_update_trigger_name_internal(relid); /* Create trigger for parent */ - collect_update_trigger_columns(parent, &columns); - create_single_update_trigger_internal(parent, trigname, columns); + collect_update_trigger_columns(relid, &columns); + create_single_update_trigger_internal(relid, trigname, columns); /* Fetch children array */ children = PrelGetChildrenArray(prel); /* Create triggers for each partition */ for (i = 0; i < PrelChildrenCount(prel); i++) + { create_single_update_trigger_internal(children[i], trigname, columns); - PG_RETURN_VOID(); + /* Perform the same procedure on subpartitions */ + create_update_triggers_internal(children[i]); + } } static void @@ -1558,6 +1598,67 @@ create_single_update_trigger(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } +/* + * Drop UPDATE triggers for all partitions and subpartitions + */ +Datum +drop_update_triggers(PG_FUNCTION_ARGS) +{ + Oid relid = PG_GETARG_OID(0), + parent; + PartParentSearch parent_search; + + /* + * We can drop triggers only if relid is the topmost parent table (or if + * its parent doesn't have update triggers (which should never happen in + * the ideal world) + */ + parent = get_parent_of_partition(relid, &parent_search); + if (parent_search == PPS_ENTRY_PART_PARENT) + if(has_update_trigger_internal(parent)) + ereport(ERROR, + (errmsg("Parent table must not have an update trigger"), + errhint("Try to perform SELECT %s.drop_triggers('%s');", + get_namespace_name(get_pathman_schema()), + get_qualified_rel_name(parent)))); + + /* Recursively drop triggers */ + drop_update_triggers_internal(relid); + PG_RETURN_VOID(); +} + +static void +drop_update_triggers_internal(Oid relid) +{ + Oid *children; + const char *trigname; + const PartRelationInfo *prel; + uint32 i; + + prel = get_pathman_relation_info(relid); + if (!prel) + return; + + /* Acquire trigger name */ + trigname = build_update_trigger_name_internal(relid); + + /* Fetch children array */ + children = PrelGetChildrenArray(prel); + + /* Drop triggers on partitions */ + for (i = 0; i < PrelChildrenCount(prel); i++) + { + drop_single_update_trigger_internal(children[i], trigname); + + /* Recursively drop triggers on subpartitions */ + drop_update_triggers_internal(children[i]); + + } + + /* Drop trigger on parent */ + drop_single_update_trigger_internal(relid, trigname); +} + /* Check if relation has pg_pathman's update trigger */ Datum has_update_trigger(PG_FUNCTION_ARGS) diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index fff7c76d..a4570b80 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -64,7 +64,6 @@ static void modify_range_constraint(Oid partition_relid, Oid expression_type, const Bound *lower, const Bound *upper); -static char *get_qualified_rel_name(Oid relid); static void drop_table_by_oid(Oid relid); static bool interval_is_trivial(Oid atttype, Datum interval, @@ -1156,19 +1155,6 @@ check_range_adjacence(Oid cmp_proc, Oid collid, List *ranges) } } -/* - * Return palloced fully qualified relation name as a cstring - */ -static char * -get_qualified_rel_name(Oid relid) -{ - Oid nspid = get_rel_namespace(relid); - - return psprintf("%s.%s", - quote_identifier(get_namespace_name(nspid)), - quote_identifier(get_rel_name(relid))); -} - /* * Drop table using it's Oid */ diff --git a/src/utils.c b/src/utils.c index 5f070e30..25cb987b 100644 --- a/src/utils.c +++ b/src/utils.c @@ -193,6 +193,19 @@ get_rel_name_or_relid(Oid relid) return relname; } +/* + * Return palloced fully qualified relation name as a cstring + */ +char * +get_qualified_rel_name(Oid relid) +{ + Oid nspid = get_rel_namespace(relid); + + return psprintf("%s.%s", + quote_identifier(get_namespace_name(nspid)), + quote_identifier(get_rel_name(relid))); +} + RangeVar * makeRangeVarFromRelid(Oid relid) { From 24ca95de424fac6c15a6368c77581feef66885b3 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 11 May 2017 19:27:27 +0300 Subject: [PATCH 035/528] Fix updates on foreign tables --- expected/pathman_update_node.out | 4 +- src/hooks.c | 26 ++++++++++--- src/partition_filter.c | 22 +---------- src/partition_update.c | 14 ------- src/planner_tree_modification.c | 61 ++++++++++++++++++++++++++----- tests/python/partitioning_test.py | 16 +++----- 6 files changed, 82 insertions(+), 61 deletions(-) diff --git a/expected/pathman_update_node.out b/expected/pathman_update_node.out index f4312b2c..2976e767 100644 --- a/expected/pathman_update_node.out +++ b/expected/pathman_update_node.out @@ -18,7 +18,7 @@ NOTICE: sequence "test_range_seq" does not exist, skipping EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 5 WHERE val = 15; QUERY PLAN ------------------------------------------------------------------- - Insert on test_range_2 + Update on test_range_2 -> Custom Scan (PrepareInsert) -> Custom Scan (PartitionFilter) -> Bitmap Heap Scan on test_range_2 @@ -31,7 +31,7 @@ EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 5 WHERE val = 1 EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 14 WHERE val = 15; QUERY PLAN ------------------------------------------------------------------- - Insert on test_range_2 + Update on test_range_2 -> Custom Scan (PrepareInsert) -> Custom Scan (PartitionFilter) -> Bitmap Heap Scan on test_range_2 diff --git a/src/hooks.c b/src/hooks.c index efa7609e..7bda2f63 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -36,6 +36,7 @@ #include "utils/typcache.h" #include "utils/lsyscache.h" +static PlannerInfo* pathman_planner_info = NULL; /* Borrowed from joinpath.c */ #define PATH_PARAM_BY_REL(path, rel) \ @@ -279,6 +280,9 @@ pathman_rel_pathlist_hook(PlannerInfo *root, if (!IsPathmanReady()) return; + /* save root, we will use in plan modify stage */ + pathman_planner_info = root; + /* * Skip if it's a result relation (UPDATE | DELETE | INSERT), * or not a (partitioned) physical relation at all. @@ -524,18 +528,21 @@ pg_pathman_enable_assign_hook(bool newval, void *extra) PlannedStmt * pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) { -#define ExecuteForPlanTree(planned_stmt, proc) \ +#define ExecuteForPlanTree(planned_stmt, context, proc) \ do { \ ListCell *lc; \ - proc((planned_stmt)->rtable, (planned_stmt)->planTree); \ + proc((context), (planned_stmt)->planTree); \ foreach (lc, (planned_stmt)->subplans) \ - proc((planned_stmt)->rtable, (Plan *) lfirst(lc)); \ + proc((context), (Plan *) lfirst(lc)); \ } while (0) PlannedStmt *result; uint32 query_id = parse->queryId; bool pathman_ready = IsPathmanReady(); /* in case it changes */ + /* rel_pathlist_hook will set this variable */ + pathman_planner_info = NULL; + PG_TRY(); { if (pathman_ready && pathman_hooks_enabled) @@ -555,14 +562,17 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) if (pathman_ready && pathman_hooks_enabled) { + List *update_nodes_context; + /* Give rowmark-related attributes correct names */ - ExecuteForPlanTree(result, postprocess_lock_rows); + ExecuteForPlanTree(result, result->rtable, postprocess_lock_rows); /* Add PartitionFilter node for INSERT queries */ - ExecuteForPlanTree(result, add_partition_filters); + ExecuteForPlanTree(result, result->rtable, add_partition_filters); /* Add PartitionUpdate node for UPDATE queries */ - ExecuteForPlanTree(result, add_partition_update_nodes); + update_nodes_context = list_make2(result->rtable, pathman_planner_info); + ExecuteForPlanTree(result, update_nodes_context, add_partition_update_nodes); /* Decrement relation tags refcount */ decr_refcount_relation_tags(); @@ -587,6 +597,7 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) /* Finally return the Plan */ return result; +#undef ExecuteForPlanTree } /* @@ -867,6 +878,9 @@ pathman_executor_hook(QueryDesc *queryDesc, ScanDirection direction, */ cstate->saved_junkFilter = cstate->resultRelInfo->ri_junkFilter; cstate->resultRelInfo->ri_junkFilter = NULL; + + /* hack, change UPDATE operation to INSERT */ + mt_state->operation = CMD_INSERT; } } } diff --git a/src/partition_filter.c b/src/partition_filter.c index 5def7e1f..81cdcbc4 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -320,30 +320,12 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) if (parts_storage->command_type == CMD_UPDATE) { - char relkind; JunkFilter *junkfilter = parts_storage->saved_rel_info->ri_junkFilter; - /* we don't need junk work in UPDATE */ + /* we don't need junk cleaning in ExecModifyTable */ child_result_rel_info->ri_junkFilter = NULL; - relkind = base_rel->rd_rel->relkind; - if (relkind == RELKIND_RELATION) - { - junkfilter->jf_junkAttNo = ExecFindJunkAttribute(junkfilter, "ctid"); - if (!AttributeNumberIsValid(junkfilter->jf_junkAttNo)) - elog(ERROR, "could not find junk ctid column"); - } - else if (relkind == RELKIND_FOREIGN_TABLE) - { - /* - * When there is an AFTER trigger, there should be a - * wholerow attribute. - */ - junkfilter->jf_junkAttNo = ExecFindJunkAttribute(junkfilter, "wholerow"); - } - else - elog(ERROR, "wrong type of relation"); - + /* instead we do junk filtering ourselves */ rri_holder->updates_junkFilter = junkfilter; } diff --git a/src/partition_update.c b/src/partition_update.c index e925ac6d..7b50c81c 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -60,17 +60,6 @@ init_partition_update_static_data(void) NULL); } -/* - * By default UPDATE queries will make ForeignUpdate nodes for foreign tables. - * This function modifies these nodes so they will work as SELECTs - */ -static void -modify_fdw_scan(ForeignScan *node) -{ - node->scan.plan.plan_node_id = CMD_SELECT; - node->operation = CMD_SELECT; -} - Plan * make_partition_update(Plan *subplan, Oid parent_relid, @@ -86,9 +75,6 @@ make_partition_update(Plan *subplan, cscan->scan.plan.plan_rows = subplan->plan_rows; cscan->scan.plan.plan_width = subplan->plan_width; - if (IsA(subplan, ForeignScan)) - modify_fdw_scan((ForeignScan *) subplan); - /* Setup methods and child plan */ cscan->methods = &partition_update_plan_methods; pfilter = make_partition_filter(subplan, parent_relid, ONCONFLICT_NONE, diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index d9af9e00..bffc880c 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -19,6 +19,7 @@ #include "rewrite/rewriteManip.h" #include "access/htup_details.h" +#include "foreign/fdwapi.h" #include "miscadmin.h" #include "optimizer/clauses.h" #include "storage/lmgr.h" @@ -375,10 +376,10 @@ add_partition_filters(List *rtable, Plan *plan) /* Add PartitionUpdate nodes to the plan tree */ void -add_partition_update_nodes(List *rtable, Plan *plan) +add_partition_update_nodes(List *context, Plan *plan) { if (pg_pathman_enable_partition_update) - plan_tree_walker(plan, partition_update_visitor, rtable); + plan_tree_walker(plan, partition_update_visitor, context); } /* @@ -430,6 +431,45 @@ partition_filter_visitor(Plan *plan, void *context) } +static List * +recreate_fdw_private_list(PlannerInfo *root, List *rtable, ModifyTable *node) +{ + ListCell *lc; + int i = 0; + List *fdw_private_list = NIL; + + /* we need DELETE queries for FDW */ + node->operation = CMD_DELETE; + + foreach(lc, node->resultRelations) + { + Index rti = lfirst_int(lc); + FdwRoutine *fdwroutine; + List *fdw_private; + + RangeTblEntry *rte = rt_fetch(rti, rtable); + Assert(rte->rtekind == RTE_RELATION); + if (rte->relkind != RELKIND_FOREIGN_TABLE) + continue; + + fdwroutine = GetFdwRoutineByRelId(rte->relid); + + if (fdwroutine != NULL && + fdwroutine->PlanForeignModify != NULL) + fdw_private = fdwroutine->PlanForeignModify(root, node, rti, i); + else + fdw_private = NIL; + + fdw_private_list = lappend(fdw_private_list, fdw_private); + i++; + } + + /* restore operation */ + node->operation = CMD_UPDATE; + return fdw_private_list; +} + + /* * Add partition update to ModifyTable node's children. * @@ -438,11 +478,12 @@ partition_filter_visitor(Plan *plan, void *context) static void partition_update_visitor(Plan *plan, void *context) { - List *rtable = (List *) context; - ModifyTable *modify_table = (ModifyTable *) plan; - ListCell *lc1, - *lc2, - *lc3; + List *rtable = (List *) linitial((List *) context); + PlannerInfo *root = (PlannerInfo *) lsecond((List *) context); + ModifyTable *modify_table = (ModifyTable *) plan; + ListCell *lc1, + *lc2, + *lc3; /* Skip if not ModifyTable with 'UPDATE' command */ if (!IsA(modify_table, ModifyTable) || modify_table->operation != CMD_UPDATE) @@ -476,8 +517,6 @@ partition_update_visitor(Plan *plan, void *context) { List *returning_list = NIL; - modify_table->operation = CMD_INSERT; - /* Extract returning list if possible */ if (lc3) { @@ -488,6 +527,10 @@ partition_update_visitor(Plan *plan, void *context) lfirst(lc1) = make_partition_update((Plan *) lfirst(lc1), relid, returning_list); + + /* change fdw queries to DELETE */ + modify_table->fdwPrivLists = + recreate_fdw_private_list(root, rtable, modify_table); } } } diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 9bda06d9..cc67eaf0 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -557,12 +557,12 @@ def test_update_node_on_fdw_tables(self): 'postgres', 'select attach_range_partition(\'abc\', \'ftable2\', 30, 40)') - #master.safe_psql('postgres', - # 'set pg_pathman.enable_partitionupdate=on') + master.safe_psql('postgres', + 'set pg_pathman.enable_partitionupdate=on') with master.connect() as con: con.begin() - #con.execute('set pg_pathman.enable_partitionupdate=on') + con.execute('set pg_pathman.enable_partitionupdate=on') con.execute('insert into abc select i from generate_series(1, 19) i') con.commit() @@ -575,13 +575,9 @@ def test_update_node_on_fdw_tables(self): # - update from foreign to local # - update from foreign to foreign - #con.execute('update abc set id=36 where id=9') - #result_relid = con.execute('select tableoid from abc where id=35')[0][0] - #self.assertEqual(result_relid, dest_relid) - - self.set_trace(con, 'pg_debug') - import ipdb; ipdb.set_trace() - pass + con.execute('update abc set id=36 where id=9') + result_relid = con.execute('select tableoid from abc where id=35')[0][0] + self.assertEqual(result_relid, dest_relid) def test_parallel_nodes(self): """Test parallel queries under partitions""" From 66655d3f39c07a43432ac99610421e2b6de3cb22 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 11 May 2017 19:35:10 +0300 Subject: [PATCH 036/528] Add more tests for update node --- tests/python/partitioning_test.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index cc67eaf0..b7091213 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -563,7 +563,7 @@ def test_update_node_on_fdw_tables(self): with master.connect() as con: con.begin() con.execute('set pg_pathman.enable_partitionupdate=on') - con.execute('insert into abc select i from generate_series(1, 19) i') + con.execute("insert into abc select i, 'local' from generate_series(1, 19) i") con.commit() source_relid = con.execute('select tableoid from abc where id=9')[0][0] @@ -572,13 +572,21 @@ def test_update_node_on_fdw_tables(self): # cases # - update from local to foreign - # - update from foreign to local # - update from foreign to foreign + # - update from foreign to local con.execute('update abc set id=36 where id=9') - result_relid = con.execute('select tableoid from abc where id=35')[0][0] + result_relid = con.execute('select tableoid from abc where id=36')[0][0] self.assertEqual(result_relid, dest_relid) + con.execute('update abc set id=38 where id=36') + result_relid = con.execute('select tableoid from abc where id=38')[0][0] + self.assertEqual(result_relid, dest_relid) + + con.execute('update abc set id=9 where id=35') + result_relid = con.execute('select tableoid from abc where id=9')[0][0] + self.assertEqual(result_relid, source_relid) + def test_parallel_nodes(self): """Test parallel queries under partitions""" From be854e78e8092b9e6f3a8696899c324b7f477b8b Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Fri, 12 May 2017 13:46:25 +0300 Subject: [PATCH 037/528] auto trigger creation on create_hash_partitions() and create_range_partitions() if upper parent has trigger --- hash.sql | 13 +++++++++++++ init.sql | 5 +++-- range.sql | 22 ++++++++++++++++++++++ src/partition_creation.c | 19 ++++++++++++++----- src/pl_funcs.c | 30 +++++++++++++++++------------- 5 files changed, 69 insertions(+), 20 deletions(-) diff --git a/hash.sql b/hash.sql index c942e8c6..1349c98f 100644 --- a/hash.sql +++ b/hash.sql @@ -20,6 +20,9 @@ CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions( tablespaces TEXT[] DEFAULT NULL) RETURNS INTEGER AS $$ +DECLARE + v_upper_parent REGCLASS; + BEGIN PERFORM @extschema@.validate_relname(parent_relid); @@ -45,6 +48,16 @@ BEGIN partition_names, tablespaces); + /* + * If there is an upper level parent partitioned by pg_pathman and it has + * update triggers then create them too + */ + v_upper_parent = @extschema@.get_parent_of_partition(parent_relid, false); + IF NOT v_upper_parent IS NULL AND @extschema@.has_update_trigger(v_upper_parent) + THEN + PERFORM @extschema@.create_update_triggers(parent_relid); + END IF; + /* Copy data */ IF partition_data = true THEN PERFORM @extschema@.set_enable_parent(parent_relid, false); diff --git a/init.sql b/init.sql index 59a563d5..82da5ca9 100644 --- a/init.sql +++ b/init.sql @@ -533,7 +533,8 @@ LANGUAGE plpgsql; * Drop triggers */ CREATE OR REPLACE FUNCTION @extschema@.drop_triggers( - parent_relid REGCLASS) + parent_relid REGCLASS, + force BOOL DEFAULT FALSE) RETURNS VOID AS 'pg_pathman', 'drop_update_triggers' LANGUAGE C STRICT; @@ -560,7 +561,7 @@ BEGIN PERFORM @extschema@.prevent_relation_modification(parent_relid); /* First, drop all triggers */ - PERFORM @extschema@.drop_triggers(parent_relid); + PERFORM @extschema@.drop_triggers(parent_relid, TRUE); SELECT count(*) FROM @extschema@.pathman_config WHERE partrel = parent_relid INTO conf_num; diff --git a/range.sql b/range.sql index 40894c7e..8e952256 100644 --- a/range.sql +++ b/range.sql @@ -103,6 +103,7 @@ DECLARE end_value start_value%TYPE; part_count INTEGER := 0; i INTEGER; + v_upper_parent REGCLASS; BEGIN expression := lower(expression); @@ -171,6 +172,16 @@ BEGIN NULL); END IF; + /* + * If there is an upper level parent partitioned by pg_pathman and it has + * update triggers then create them too + */ + v_upper_parent = @extschema@.get_parent_of_partition(parent_relid, false); + IF NOT v_upper_parent IS NULL AND @extschema@.has_update_trigger(v_upper_parent) + THEN + PERFORM @extschema@.create_update_triggers(parent_relid); + END IF; + /* Relocate data if asked to */ IF partition_data = true THEN PERFORM @extschema@.set_enable_parent(parent_relid, false); @@ -202,6 +213,7 @@ DECLARE end_value start_value%TYPE; part_count INTEGER := 0; i INTEGER; + v_upper_parent REGCLASS; BEGIN expression := lower(expression); @@ -267,6 +279,16 @@ BEGIN NULL); END IF; + /* + * If there is an upper level parent partitioned by pg_pathman and it has + * update triggers then create them too + */ + v_upper_parent = @extschema@.get_parent_of_partition(parent_relid, false); + IF NOT v_upper_parent IS NULL AND @extschema@.has_update_trigger(v_upper_parent) + THEN + PERFORM @extschema@.create_update_triggers(parent_relid); + END IF; + /* Relocate data if asked to */ IF partition_data = true THEN PERFORM @extschema@.set_enable_parent(parent_relid, false); diff --git a/src/partition_creation.c b/src/partition_creation.c index 991f852e..b445cb8a 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -25,6 +25,7 @@ #include "catalog/pg_trigger.h" #include "catalog/pg_type.h" #include "catalog/toasting.h" +#include "commands/defrem.h" #include "commands/event_trigger.h" #include "commands/sequence.h" #include "commands/tablecmds.h" @@ -1823,9 +1824,17 @@ void drop_single_update_trigger_internal(Oid relid, const char *trigname) { - Oid trigoid; - - trigoid = get_trigger_oid(relid, trigname, true); - if (OidIsValid(trigoid)) - RemoveTriggerById(trigoid); + DropStmt *n = makeNode(DropStmt); + const char *relname = get_qualified_rel_name(relid); + List *namelist = stringToQualifiedNameList(relname); + + namelist = lappend(namelist, makeString((char *) trigname)); + n->removeType = OBJECT_TRIGGER; + n->missing_ok = true; + n->objects = list_make1(namelist); + n->arguments = NIL; + n->behavior = DROP_RESTRICT; /* default behavior */ + n->concurrent = false; + + RemoveObjects(n); } diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 80858fbb..222bc056 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -1607,20 +1607,24 @@ drop_update_triggers(PG_FUNCTION_ARGS) Oid relid = PG_GETARG_OID(0), parent; PartParentSearch parent_search; + bool force = PG_GETARG_BOOL(1); - /* - * We can drop triggers only if relid is the topmost parent table (or if - * its parent doesn't have update triggers (which should never happen in - * the ideal world) - */ - parent = get_parent_of_partition(relid, &parent_search); - if (parent_search == PPS_ENTRY_PART_PARENT) - if(has_update_trigger_internal(parent)) - ereport(ERROR, - (errmsg("Parent table must not have an update trigger"), - errhint("Try to perform SELECT %s.drop_triggers('%s');", - get_namespace_name(get_pathman_schema()), - get_qualified_rel_name(parent)))); + if (!force) + { + /* + * We can drop triggers only if relid is the topmost parent table (or if + * its parent doesn't have update triggers (which should never happen in + * the ideal world) + */ + parent = get_parent_of_partition(relid, &parent_search); + if (parent_search == PPS_ENTRY_PART_PARENT) + if(has_update_trigger_internal(parent)) + ereport(ERROR, + (errmsg("Parent table must not have an update trigger"), + errhint("Try to perform SELECT %s.drop_triggers('%s');", + get_namespace_name(get_pathman_schema()), + get_qualified_rel_name(parent)))); + } /* Recursively drop triggers */ drop_update_triggers_internal(relid); From 4ebe6fea23c5737faa21094e00b99b3ff8715d05 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Fri, 12 May 2017 18:24:34 +0300 Subject: [PATCH 038/528] fix drop_single_update_trigger_internal() to avoid warnings; fix replace_hash_partition() --- expected/pathman_basic.out | 2 ++ hash.sql | 6 ++++++ range.sql | 2 +- src/partition_creation.c | 25 ++++++++++++++++++++++--- src/pl_funcs.c | 2 -- 5 files changed, 31 insertions(+), 6 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 2c96c7bc..c4531395 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1303,6 +1303,7 @@ SELECT * FROM test.hash_rel WHERE id = 123; /* Test replacing hash partition */ CREATE TABLE test.hash_rel_extern (LIKE test.hash_rel INCLUDING ALL); SELECT pathman.replace_hash_partition('test.hash_rel_0', 'test.hash_rel_extern'); +NOTICE: trigger "hash_rel_upd_trig" for relation "test.hash_rel_0" does not exist, skipping replace_hash_partition ------------------------ test.hash_rel_extern @@ -1338,6 +1339,7 @@ CREATE TABLE test.hash_rel_wrong( id INTEGER NOT NULL, value INTEGER); SELECT pathman.replace_hash_partition('test.hash_rel_1', 'test.hash_rel_wrong'); +NOTICE: trigger "hash_rel_upd_trig" for relation "test.hash_rel_1" does not exist, skipping ERROR: column "value" in child table must be marked NOT NULL EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; QUERY PLAN diff --git a/hash.sql b/hash.sql index 1349c98f..0c76f74b 100644 --- a/hash.sql +++ b/hash.sql @@ -143,6 +143,9 @@ BEGIN EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', old_partition, old_constr_name); + EXECUTE format('DROP TRIGGER IF EXISTS %s ON %s', + @extschema@.build_update_trigger_name(parent_relid), + old_partition); /* Attach the new one */ EXECUTE format('ALTER TABLE %s INHERIT %s', new_partition, parent_relid); @@ -150,6 +153,9 @@ BEGIN new_partition, @extschema@.build_check_constraint_name(new_partition::REGCLASS), old_constr_def); + IF @extschema@.has_update_trigger(parent_relid) THEN + PERFORM @extschema@.create_single_update_trigger(parent_relid, new_partition); + END IF; /* Fetch init_callback from 'params' table */ WITH stub_callback(stub) as (values (0)) diff --git a/range.sql b/range.sql index 8e952256..94dac368 100644 --- a/range.sql +++ b/range.sql @@ -970,7 +970,7 @@ BEGIN INTO v_init_callback; /* If update trigger is enabled then create one for this partition */ - if @extschema@.has_update_trigger(parent_relid) THEN + IF @extschema@.has_update_trigger(parent_relid) THEN PERFORM @extschema@.create_single_update_trigger(parent_relid, partition_relid); END IF; diff --git a/src/partition_creation.c b/src/partition_creation.c index b445cb8a..4b8211f7 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1827,14 +1827,33 @@ drop_single_update_trigger_internal(Oid relid, DropStmt *n = makeNode(DropStmt); const char *relname = get_qualified_rel_name(relid); List *namelist = stringToQualifiedNameList(relname); + Relation relation = NULL; + ObjectAddress address; namelist = lappend(namelist, makeString((char *) trigname)); + + /* + * To avoid warning message about missing trigger we check it beforehand. + * and quit if it doesn't + */ + address = get_object_address(OBJECT_TRIGGER, + namelist, NIL, + &relation, + AccessExclusiveLock, + true); + if (!OidIsValid(address.objectId)) + return; + + /* Actually remove trigger */ n->removeType = OBJECT_TRIGGER; - n->missing_ok = true; n->objects = list_make1(namelist); n->arguments = NIL; n->behavior = DROP_RESTRICT; /* default behavior */ - n->concurrent = false; - + n->missing_ok = true; + n->concurrent = false; RemoveObjects(n); + + /* Release any relcache reference count, but keep lock until commit. */ + if (relation) + heap_close(relation, NoLock); } diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 222bc056..f2e7fb06 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -1527,8 +1527,6 @@ create_update_triggers_internal(Oid relid) /* Check that table is partitioned */ prel = get_pathman_relation_info(relid); - /* TODO: check this only for topmost relid? */ - // shout_if_prel_is_invalid(relid, prel, PT_ANY); if (!prel) return; From 9d89e064409a4894cc9695524b432ff41448891e Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 15 May 2017 11:18:53 +0300 Subject: [PATCH 039/528] Turn off update node for FDW tables --- range.sql | 3 +- src/hooks.c | 15 ++++----- src/include/partition_filter.h | 2 +- src/partition_filter.c | 6 ++-- src/planner_tree_modification.c | 52 +++++++++++-------------------- tests/python/partitioning_test.py | 51 +++++++++--------------------- 6 files changed, 45 insertions(+), 84 deletions(-) diff --git a/range.sql b/range.sql index 009f11f1..3c5776d0 100644 --- a/range.sql +++ b/range.sql @@ -952,9 +952,10 @@ BEGIN PERFORM @extschema@.create_single_update_trigger(parent_relid, partition_relid); END IF; + /* IF @extschema@.is_relation_foreign(partition_relid) THEN PERFORM @extschema@.create_single_nop_trigger(parent_relid, partition_relid); - END IF; + END IF; */ /* Invoke an initialization callback */ PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, diff --git a/src/hooks.c b/src/hooks.c index 7bda2f63..92b65b92 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -528,12 +528,12 @@ pg_pathman_enable_assign_hook(bool newval, void *extra) PlannedStmt * pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) { -#define ExecuteForPlanTree(planned_stmt, context, proc) \ +#define ExecuteForPlanTree(planned_stmt, proc) \ do { \ ListCell *lc; \ - proc((context), (planned_stmt)->planTree); \ + proc((planned_stmt)->rtable, (planned_stmt)->planTree); \ foreach (lc, (planned_stmt)->subplans) \ - proc((context), (Plan *) lfirst(lc)); \ + proc((planned_stmt)->rtable, (Plan *) lfirst(lc)); \ } while (0) PlannedStmt *result; @@ -562,17 +562,14 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) if (pathman_ready && pathman_hooks_enabled) { - List *update_nodes_context; - /* Give rowmark-related attributes correct names */ - ExecuteForPlanTree(result, result->rtable, postprocess_lock_rows); + ExecuteForPlanTree(result, postprocess_lock_rows); /* Add PartitionFilter node for INSERT queries */ - ExecuteForPlanTree(result, result->rtable, add_partition_filters); + ExecuteForPlanTree(result, add_partition_filters); /* Add PartitionUpdate node for UPDATE queries */ - update_nodes_context = list_make2(result->rtable, pathman_planner_info); - ExecuteForPlanTree(result, update_nodes_context, add_partition_update_nodes); + ExecuteForPlanTree(result, add_partition_update_nodes); /* Decrement relation tags refcount */ decr_refcount_relation_tags(); diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index 1519a246..ef3b8741 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -40,7 +40,7 @@ typedef struct Oid partid; /* partition's relid */ ResultRelInfo *result_rel_info; /* cached ResultRelInfo */ TupleConversionMap *tuple_map; /* tuple conversion map (parent => child) */ - JunkFilter *updates_junkFilter; /* we keep junkfilter from scanned + JunkFilter *src_junkFilter; /* we keep junkfilter from scanned ResultRelInfo here */ } ResultRelInfoHolder; diff --git a/src/partition_filter.c b/src/partition_filter.c index 81cdcbc4..8da0f68d 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -316,7 +316,7 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) /* Fill the ResultRelInfo holder */ rri_holder->partid = partid; rri_holder->result_rel_info = child_result_rel_info; - rri_holder->updates_junkFilter = NULL; + rri_holder->src_junkFilter = NULL; if (parts_storage->command_type == CMD_UPDATE) { @@ -326,7 +326,7 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) child_result_rel_info->ri_junkFilter = NULL; /* instead we do junk filtering ourselves */ - rri_holder->updates_junkFilter = junkfilter; + rri_holder->src_junkFilter = junkfilter; } /* Generate tuple transformation map and some other stuff */ @@ -702,7 +702,7 @@ partition_filter_exec(CustomScanState *node) estate->es_result_relation_info = resultRelInfo; /* pass junkfilter to upper node */ - state->src_junkFilter = rri_holder->updates_junkFilter; + state->src_junkFilter = rri_holder->src_junkFilter; /* If there's a transform map, rebuild the tuple */ if (rri_holder->tuple_map) diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index bffc880c..5ca1272f 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -40,6 +40,7 @@ static void partition_update_visitor(Plan *plan, void *context); static rel_parenthood_status tag_extract_parenthood_status(List *relation_tag); +static bool modifytable_contains_fdw(List *rtable, ModifyTable *node); /* * HACK: We have to mark each Query with a unique @@ -431,42 +432,21 @@ partition_filter_visitor(Plan *plan, void *context) } -static List * -recreate_fdw_private_list(PlannerInfo *root, List *rtable, ModifyTable *node) +static bool +modifytable_contains_fdw(List *rtable, ModifyTable *node) { ListCell *lc; - int i = 0; - List *fdw_private_list = NIL; - - /* we need DELETE queries for FDW */ - node->operation = CMD_DELETE; foreach(lc, node->resultRelations) { - Index rti = lfirst_int(lc); - FdwRoutine *fdwroutine; - List *fdw_private; - - RangeTblEntry *rte = rt_fetch(rti, rtable); - Assert(rte->rtekind == RTE_RELATION); - if (rte->relkind != RELKIND_FOREIGN_TABLE) - continue; - - fdwroutine = GetFdwRoutineByRelId(rte->relid); - - if (fdwroutine != NULL && - fdwroutine->PlanForeignModify != NULL) - fdw_private = fdwroutine->PlanForeignModify(root, node, rti, i); - else - fdw_private = NIL; + Index rti = lfirst_int(lc); + RangeTblEntry *rte = rt_fetch(rti, rtable); - fdw_private_list = lappend(fdw_private_list, fdw_private); - i++; + if (rte->relkind == RELKIND_FOREIGN_TABLE) + return true; } - /* restore operation */ - node->operation = CMD_UPDATE; - return fdw_private_list; + return false; } @@ -478,8 +458,7 @@ recreate_fdw_private_list(PlannerInfo *root, List *rtable, ModifyTable *node) static void partition_update_visitor(Plan *plan, void *context) { - List *rtable = (List *) linitial((List *) context); - PlannerInfo *root = (PlannerInfo *) lsecond((List *) context); + List *rtable = (List *) context; ModifyTable *modify_table = (ModifyTable *) plan; ListCell *lc1, *lc2, @@ -491,6 +470,15 @@ partition_update_visitor(Plan *plan, void *context) Assert(rtable && IsA(rtable, List)); + if (modifytable_contains_fdw(rtable, modify_table)) + { + ereport(NOTICE, + (errcode(ERRCODE_STATEMENT_TOO_COMPLEX), + errmsg("discovered mix of local and foreign tables," + " pg_pathman's update node will not be used"))); + return; + } + lc3 = list_head(modify_table->returningLists); forboth (lc1, modify_table->plans, lc2, modify_table->resultRelations) { @@ -527,10 +515,6 @@ partition_update_visitor(Plan *plan, void *context) lfirst(lc1) = make_partition_update((Plan *) lfirst(lc1), relid, returning_list); - - /* change fdw queries to DELETE */ - modify_table->fdwPrivLists = - recreate_fdw_private_list(root, rtable, modify_table); } } } diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index b7091213..0e992be5 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -470,7 +470,7 @@ def make_basic_fdw_setup(self): ) master.safe_psql( 'postgres', - 'select attach_range_partition(\'abc\', \'ftable\', 20, 30)') + 'select attach_range_partition(\'abc\', \'ftable\', 20, 100)') return (master, fserv) @@ -538,54 +538,33 @@ def test_foreign_table(self): fserv.stop() master.stop() - @if_fdw_enabled - def test_update_node_on_fdw_tables(self): + def test_update_triggers_on_fdw_tables(self): ''' Test update node on foreign tables ''' master, fserv = self.make_basic_fdw_setup() - # create second foreign table - fserv.safe_psql('postgres', 'create table ftable2(id serial, name text)') - fserv.safe_psql('postgres', 'insert into ftable2 values (35, \'foreign\')') - - master.safe_psql( - 'postgres', - '''import foreign schema public limit to (ftable2) - from server fserv into public''' - ) - master.safe_psql( - 'postgres', - 'select attach_range_partition(\'abc\', \'ftable2\', 30, 40)') - - master.safe_psql('postgres', - 'set pg_pathman.enable_partitionupdate=on') - with master.connect() as con: con.begin() - con.execute('set pg_pathman.enable_partitionupdate=on') - con.execute("insert into abc select i, 'local' from generate_series(1, 19) i") + con.execute("select create_update_triggers('abc')") + con.execute("insert into abc select i, i from generate_series(1, 30) i") con.commit() source_relid = con.execute('select tableoid from abc where id=9')[0][0] - dest_relid = con.execute('select tableoid from abc where id=35')[0][0] + dest_relid = con.execute('select tableoid from abc where id=25')[0][0] self.assertNotEqual(source_relid, dest_relid) - # cases - # - update from local to foreign - # - update from foreign to foreign - # - update from foreign to local + self.set_trace(con, 'pg_debug') + import ipdb; ipdb.set_trace() + count1 = con.execute("select count(*) from abc")[0][0] + con.execute('update abc set id=id + 10') + count2 = con.execute("select count(*) from abc")[0][0] + self.assertEqual(count1, count2) - con.execute('update abc set id=36 where id=9') - result_relid = con.execute('select tableoid from abc where id=36')[0][0] - self.assertEqual(result_relid, dest_relid) - - con.execute('update abc set id=38 where id=36') - result_relid = con.execute('select tableoid from abc where id=38')[0][0] - self.assertEqual(result_relid, dest_relid) + fserv.cleanup() + master.cleanup() - con.execute('update abc set id=9 where id=35') - result_relid = con.execute('select tableoid from abc where id=9')[0][0] - self.assertEqual(result_relid, source_relid) + fserv.stop() + master.stop() def test_parallel_nodes(self): """Test parallel queries under partitions""" From 856c71f185e5f75bd58f76eb6671ede045333788 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 15 May 2017 14:49:24 +0300 Subject: [PATCH 040/528] Comment tests for update triggers for FDW tables (until bug fixed in core), fix tests for update node --- tests/python/partitioning_test.py | 54 +++++++++++++++---------------- 1 file changed, 26 insertions(+), 28 deletions(-) diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 0e992be5..684369cc 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -538,33 +538,31 @@ def test_foreign_table(self): fserv.stop() master.stop() - def test_update_triggers_on_fdw_tables(self): - ''' Test update node on foreign tables ''' - - master, fserv = self.make_basic_fdw_setup() - - with master.connect() as con: - con.begin() - con.execute("select create_update_triggers('abc')") - con.execute("insert into abc select i, i from generate_series(1, 30) i") - con.commit() - - source_relid = con.execute('select tableoid from abc where id=9')[0][0] - dest_relid = con.execute('select tableoid from abc where id=25')[0][0] - self.assertNotEqual(source_relid, dest_relid) - - self.set_trace(con, 'pg_debug') - import ipdb; ipdb.set_trace() - count1 = con.execute("select count(*) from abc")[0][0] - con.execute('update abc set id=id + 10') - count2 = con.execute("select count(*) from abc")[0][0] - self.assertEqual(count1, count2) - - fserv.cleanup() - master.cleanup() - - fserv.stop() - master.stop() +# def test_update_triggers_on_fdw_tables(self): +# ''' Test update node on foreign tables ''' +# +# master, fserv = self.make_basic_fdw_setup() +# +# with master.connect() as con: +# con.begin() +# con.execute("select create_update_triggers('abc')") +# con.execute("insert into abc select i, i from generate_series(1, 30) i") +# con.commit() +# +# source_relid = con.execute('select tableoid from abc where id=9')[0][0] +# dest_relid = con.execute('select tableoid from abc where id=25')[0][0] +# self.assertNotEqual(source_relid, dest_relid) +# +# count1 = con.execute("select count(*) from abc")[0][0] +# con.execute('update abc set id=id + 10') +# count2 = con.execute("select count(*) from abc")[0][0] +# self.assertEqual(count1, count2) +# +# fserv.cleanup() +# master.cleanup() +# +# fserv.stop() +# master.stop() def test_parallel_nodes(self): """Test parallel queries under partitions""" @@ -1129,7 +1127,7 @@ def test_update_node_plan1(self): plan = plan[0]["Plan"] self.assertEqual(plan["Node Type"], "ModifyTable") - self.assertEqual(plan["Operation"], "Insert") + self.assertEqual(plan["Operation"], "Update") self.assertEqual(plan["Relation Name"], "test_range") self.assertEqual(len(plan["Target Tables"]), 11) From 32dff1646901eed048bcc370b63319b98e21bc80 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 15 May 2017 15:08:32 +0300 Subject: [PATCH 041/528] Fix python tests --- tests/python/partitioning_test.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 684369cc..120b4865 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -535,9 +535,6 @@ def test_foreign_table(self): fserv.cleanup() master.cleanup() - fserv.stop() - master.stop() - # def test_update_triggers_on_fdw_tables(self): # ''' Test update node on foreign tables ''' # @@ -560,9 +557,6 @@ def test_foreign_table(self): # # fserv.cleanup() # master.cleanup() -# -# fserv.stop() -# master.stop() def test_parallel_nodes(self): """Test parallel queries under partitions""" From 338f057c039243e7c084a8d54070464eaf0b5481 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 15 May 2017 15:49:55 +0300 Subject: [PATCH 042/528] Add more tests for update node --- expected/pathman_update_node.out | 77 +++++++++++++++++++++++++++++++- sql/pathman_update_node.sql | 20 +++++++++ 2 files changed, 96 insertions(+), 1 deletion(-) diff --git a/expected/pathman_update_node.out b/expected/pathman_update_node.out index 2976e767..45ca80e1 100644 --- a/expected/pathman_update_node.out +++ b/expected/pathman_update_node.out @@ -291,6 +291,81 @@ SELECT count(*) FROM test_update_node.test_range; 90 (1 row) +DROP TABLE test_update_node.test_range CASCADE; +NOTICE: drop cascades to 12 other objects +/* recreate table and mass move */ +CREATE TABLE test_update_node.test_range(val NUMERIC NOT NULL, comment TEXT); +INSERT INTO test_update_node.test_range SELECT i, i FROM generate_series(1, 100) i; +SELECT create_range_partitions('test_update_node.test_range', 'val', 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +SELECT tableoid::regclass, MIN(val) FROM test_update_node.test_range + GROUP BY tableoid::regclass ORDER BY tableoid::regclass; + tableoid | min +--------------------------------+----- + test_update_node.test_range_1 | 1 + test_update_node.test_range_2 | 11 + test_update_node.test_range_3 | 21 + test_update_node.test_range_4 | 31 + test_update_node.test_range_5 | 41 + test_update_node.test_range_6 | 51 + test_update_node.test_range_7 | 61 + test_update_node.test_range_8 | 71 + test_update_node.test_range_9 | 81 + test_update_node.test_range_10 | 91 +(10 rows) + +SELECT count(*) FROM test_update_node.test_range; + count +------- + 100 +(1 row) + +/* move everything to next partition */ +UPDATE test_update_node.test_range SET val = val + 10; +SELECT tableoid::regclass, MIN(val) FROM test_update_node.test_range + GROUP BY tableoid::regclass ORDER BY tableoid::regclass; + tableoid | min +--------------------------------+----- + test_update_node.test_range_2 | 11 + test_update_node.test_range_3 | 21 + test_update_node.test_range_4 | 31 + test_update_node.test_range_5 | 41 + test_update_node.test_range_6 | 51 + test_update_node.test_range_7 | 61 + test_update_node.test_range_8 | 71 + test_update_node.test_range_9 | 81 + test_update_node.test_range_10 | 91 + test_update_node.test_range_11 | 101 +(10 rows) + +/* move everything to previous partition */ +UPDATE test_update_node.test_range SET val = val - 10; +SELECT tableoid::regclass, MIN(val) FROM test_update_node.test_range + GROUP BY tableoid::regclass ORDER BY tableoid::regclass; + tableoid | min +--------------------------------+----- + test_update_node.test_range_1 | 1 + test_update_node.test_range_2 | 11 + test_update_node.test_range_3 | 21 + test_update_node.test_range_4 | 31 + test_update_node.test_range_5 | 41 + test_update_node.test_range_6 | 51 + test_update_node.test_range_7 | 61 + test_update_node.test_range_8 | 71 + test_update_node.test_range_9 | 81 + test_update_node.test_range_10 | 91 +(10 rows) + +SELECT count(*) FROM test_update_node.test_range; + count +------- + 100 +(1 row) + /* Partition table by HASH (INT4) */ CREATE TABLE test_update_node.test_hash(val INT4 NOT NULL, comment TEXT); INSERT INTO test_update_node.test_hash SELECT i, i FROM generate_series(1, 10) i; @@ -345,5 +420,5 @@ SELECT count(*) FROM test_update_node.test_hash; (1 row) DROP SCHEMA test_update_node CASCADE; -NOTICE: drop cascades to 18 other objects +NOTICE: drop cascades to 17 other objects DROP EXTENSION pg_pathman; diff --git a/sql/pathman_update_node.sql b/sql/pathman_update_node.sql index 754dffc2..f451010e 100644 --- a/sql/pathman_update_node.sql +++ b/sql/pathman_update_node.sql @@ -144,7 +144,27 @@ WHERE val = 115; UPDATE test_update_node.test_range SET val = 55 WHERE val = 115; SELECT count(*) FROM test_update_node.test_range; +DROP TABLE test_update_node.test_range CASCADE; +/* recreate table and mass move */ +CREATE TABLE test_update_node.test_range(val NUMERIC NOT NULL, comment TEXT); +INSERT INTO test_update_node.test_range SELECT i, i FROM generate_series(1, 100) i; +SELECT create_range_partitions('test_update_node.test_range', 'val', 1, 10); + +SELECT tableoid::regclass, MIN(val) FROM test_update_node.test_range + GROUP BY tableoid::regclass ORDER BY tableoid::regclass; +SELECT count(*) FROM test_update_node.test_range; + +/* move everything to next partition */ +UPDATE test_update_node.test_range SET val = val + 10; +SELECT tableoid::regclass, MIN(val) FROM test_update_node.test_range + GROUP BY tableoid::regclass ORDER BY tableoid::regclass; + +/* move everything to previous partition */ +UPDATE test_update_node.test_range SET val = val - 10; +SELECT tableoid::regclass, MIN(val) FROM test_update_node.test_range + GROUP BY tableoid::regclass ORDER BY tableoid::regclass; +SELECT count(*) FROM test_update_node.test_range; /* Partition table by HASH (INT4) */ CREATE TABLE test_update_node.test_hash(val INT4 NOT NULL, comment TEXT); From 52b2086714fb9582f60cc2fa7b3d70c51e65e3e9 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 15 May 2017 16:07:13 +0300 Subject: [PATCH 043/528] Remove FDW tables support from update node functions --- hash.sql | 4 -- init.sql | 25 ------------- range.sql | 5 --- src/include/partition_update.h | 1 - src/init.c | 11 ------ src/partition_creation.c | 35 ------------------ src/partition_update.c | 67 ++++++---------------------------- src/pl_funcs.c | 66 --------------------------------- 8 files changed, 11 insertions(+), 203 deletions(-) diff --git a/hash.sql b/hash.sql index 677239b6..4c21f9df 100644 --- a/hash.sql +++ b/hash.sql @@ -138,10 +138,6 @@ BEGIN @extschema@.build_check_constraint_name(new_partition::REGCLASS), old_constr_def); - IF @extschema@.is_relation_foreign(new_partition) THEN - PERFORM @extschema@.create_single_nop_trigger(parent_relid, new_partition); - END IF; - /* Fetch init_callback from 'params' table */ WITH stub_callback(stub) as (values (0)) SELECT init_callback diff --git a/init.sql b/init.sql index 1ea4355b..b106f318 100644 --- a/init.sql +++ b/init.sql @@ -737,23 +737,6 @@ CREATE OR REPLACE FUNCTION @extschema@.has_update_trigger( RETURNS BOOL AS 'pg_pathman', 'has_update_trigger' LANGUAGE C STRICT; -/* - * Function for NOP triggers. - * NOP trigger is a trigger that we use to turn off direct modify of FDW tables - */ -CREATE OR REPLACE FUNCTION @extschema@.pathman_nop_trigger_func() -RETURNS TRIGGER AS 'pg_pathman', 'pathman_nop_trigger_func' -LANGUAGE C STRICT; - -/* - * Creates single NOP trigger. - */ -CREATE OR REPLACE FUNCTION @extschema@.create_single_nop_trigger( - parent_relid REGCLASS, - partition_relid REGCLASS) -RETURNS VOID AS 'pg_pathman', 'create_single_nop_trigger' -LANGUAGE C STRICT; - /* * Partitioning key */ @@ -946,11 +929,3 @@ LANGUAGE C STRICT; CREATE OR REPLACE FUNCTION @extschema@.get_pathman_lib_version() RETURNS CSTRING AS 'pg_pathman', 'get_pathman_lib_version' LANGUAGE C STRICT; - -/* - * Check if relation is foreign table - */ -CREATE OR REPLACE FUNCTION @extschema@.is_relation_foreign( - relid REGCLASS) -RETURNS BOOL AS 'pg_pathman', 'is_relation_foreign' -LANGUAGE C STRICT; diff --git a/range.sql b/range.sql index 3c5776d0..099a04bc 100644 --- a/range.sql +++ b/range.sql @@ -952,11 +952,6 @@ BEGIN PERFORM @extschema@.create_single_update_trigger(parent_relid, partition_relid); END IF; - /* - IF @extschema@.is_relation_foreign(partition_relid) THEN - PERFORM @extschema@.create_single_nop_trigger(parent_relid, partition_relid); - END IF; */ - /* Invoke an initialization callback */ PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, partition_relid, diff --git a/src/include/partition_update.h b/src/include/partition_update.h index 84668587..b82ec61a 100644 --- a/src/include/partition_update.h +++ b/src/include/partition_update.h @@ -29,7 +29,6 @@ typedef struct PartitionUpdateState CustomScanState css; Oid partitioned_table; - List *returning_list; ResultRelInfo *resultRelInfo; JunkFilter *saved_junkFilter; Plan *subplan; /* proxy variable to store subplan */ diff --git a/src/init.c b/src/init.c index 25ce724c..0333d263 100644 --- a/src/init.c +++ b/src/init.c @@ -583,17 +583,6 @@ build_update_trigger_name_internal(Oid relid) return psprintf("%s_upd_trig", get_rel_name(relid)); } -/* - * Generate name for NOP trigger. - * NOTE: this function does not perform sanity checks at all. - */ -char * -build_nop_trigger_name_internal(Oid relid) -{ - AssertArg(OidIsValid(relid)); - return psprintf("%s_nop_trig", get_rel_name(relid)); -} - /* * Generate name for update trigger's function. * NOTE: this function does not perform sanity checks at all. diff --git a/src/partition_creation.c b/src/partition_creation.c index 5f237575..c06bfaa6 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1830,38 +1830,3 @@ has_update_trigger_internal(Oid parent_relid) trigname = build_update_trigger_name_internal(parent_relid); return has_trigger_internal(parent_relid, trigname); } - -/* Create trigger for partition that does nothing */ -void -create_single_nop_trigger_internal(Oid relid, - const char *trigname, - List *columns) -{ - CreateTrigStmt *stmt; - List *func; - - /* do nothing if relation has trigger already */ - if (has_trigger_internal(relid, trigname)) - return; - - func = list_make2(makeString(get_namespace_name(get_pathman_schema())), - makeString(CppAsString(pathman_nop_trigger_func))); - - stmt = makeNode(CreateTrigStmt); - stmt->trigname = (char *) trigname; - stmt->relation = makeRangeVarFromRelid(relid); - stmt->funcname = func; - stmt->args = NIL; - stmt->row = true; - stmt->timing = TRIGGER_TYPE_BEFORE; - stmt->events = TRIGGER_TYPE_UPDATE; - stmt->columns = columns; - stmt->whenClause = NULL; - stmt->isconstraint = false; - stmt->deferrable = false; - stmt->initdeferred = false; - stmt->constrrel = NULL; - - (void) CreateTrigger(stmt, NULL, InvalidOid, InvalidOid, - InvalidOid, InvalidOid, false); -} diff --git a/src/partition_update.c b/src/partition_update.c index 7b50c81c..99a520f5 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -28,7 +28,6 @@ CustomScanMethods partition_update_plan_methods; CustomExecMethods partition_update_exec_methods; static TupleTableSlot *ExecDeleteInternal(ItemPointer tupleid, - HeapTuple oldtuple, TupleTableSlot *planSlot, EPQState *epqstate, EState *estate); @@ -138,15 +137,12 @@ partition_update_exec(CustomScanState *node) if (!TupIsNull(slot)) { Datum datum; - bool isNull; char relkind; ResultRelInfo *resultRelInfo, *sourceRelInfo; ItemPointer tupleid = NULL; ItemPointerData tuple_ctid; EPQState epqstate; - HeapTupleData oldtupdata; - HeapTuple oldtuple = NULL; PartitionFilterState *child_state; JunkFilter *junkfilter; @@ -178,28 +174,9 @@ partition_update_exec(CustomScanState *node) tupleid = &tuple_ctid; } else if (relkind == RELKIND_FOREIGN_TABLE) - { - if (AttributeNumberIsValid(junkfilter->jf_junkAttNo)) - { - datum = ExecGetJunkAttribute(child_state->subplan_slot, - junkfilter->jf_junkAttNo, - &isNull); - /* shouldn't ever get a null result... */ - if (isNull) - elog(ERROR, "wholerow is NULL"); - - oldtupdata.t_data = DatumGetHeapTupleHeader(datum); - oldtupdata.t_len = - HeapTupleHeaderGetDatumLength(oldtupdata.t_data); - ItemPointerSetInvalid(&(oldtupdata.t_self)); - - /* Historically, view triggers see invalid t_tableOid. */ - oldtupdata.t_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc); - oldtuple = &oldtupdata; - } - } + elog(ERROR, "update node is not supported for foreign tables"); else - elog(ERROR, "got unexpected type of relation"); + elog(ERROR, "got unexpected type of relation for update"); /* * Clean from junk attributes before INSERT, @@ -209,14 +186,9 @@ partition_update_exec(CustomScanState *node) slot = ExecFilterJunk(junkfilter, slot); } - /* - * Delete old tuple. We have two cases here: - * 1) local tables - tupleid points to actual tuple - * 2) foreign tables - tupleid is invalid, slot is required - */ + /* Delete old tuple */ estate->es_result_relation_info = sourceRelInfo; - ExecDeleteInternal(tupleid, oldtuple, child_state->subplan_slot, - &epqstate, estate); + ExecDeleteInternal(tupleid, child_state->subplan_slot, &epqstate, estate); /* we've got the slot that can be inserted to child partition */ estate->es_result_relation_info = resultRelInfo; @@ -254,15 +226,14 @@ partition_update_explain(CustomScanState *node, List *ancestors, ExplainState *e */ static TupleTableSlot * ExecDeleteInternal(ItemPointer tupleid, - HeapTuple oldtuple, TupleTableSlot *planSlot, EPQState *epqstate, EState *estate) { - ResultRelInfo *resultRelInfo; - Relation resultRelationDesc; - HTSU_Result result; - HeapUpdateFailureData hufd; + ResultRelInfo *resultRelInfo; + Relation resultRelationDesc; + HTSU_Result result; + HeapUpdateFailureData hufd; /* * get information on the (current) result relation @@ -277,29 +248,13 @@ ExecDeleteInternal(ItemPointer tupleid, bool dodelete; dodelete = ExecBRDeleteTriggers(estate, epqstate, resultRelInfo, - tupleid, oldtuple); + tupleid, NULL); if (!dodelete) elog(ERROR, "the old row always should be deleted from child table"); } - if (resultRelInfo->ri_FdwRoutine) - { - TupleTableSlot *slot = MakeSingleTupleTableSlot(RelationGetDescr(resultRelationDesc)); - - /* - * delete from foreign table: let the FDW do it - */ - ExecSetSlotDescriptor(slot, RelationGetDescr(resultRelationDesc)); - resultRelInfo->ri_FdwRoutine->ExecForeignDelete(estate, - resultRelInfo, - slot, - planSlot); - - /* we don't need slot anymore */ - ExecDropSingleTupleTableSlot(slot); - } - else if (tupleid != NULL) + if (tupleid != NULL) { /* delete the tuple */ ldelete:; @@ -358,7 +313,7 @@ ldelete:; elog(ERROR, "tupleid should be specified for deletion"); /* AFTER ROW DELETE Triggers */ - ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple); + ExecARDeleteTriggers(estate, resultRelInfo, tupleid, NULL); return NULL; } diff --git a/src/pl_funcs.c b/src/pl_funcs.c index f7c17f3d..246f11d8 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -71,12 +71,8 @@ PG_FUNCTION_INFO_V1( check_security_policy ); PG_FUNCTION_INFO_V1( create_update_triggers ); PG_FUNCTION_INFO_V1( pathman_update_trigger_func ); -PG_FUNCTION_INFO_V1( pathman_nop_trigger_func ); PG_FUNCTION_INFO_V1( create_single_update_trigger ); PG_FUNCTION_INFO_V1( has_update_trigger ); -PG_FUNCTION_INFO_V1( is_relation_foreign ); - -PG_FUNCTION_INFO_V1( create_single_nop_trigger ); PG_FUNCTION_INFO_V1( debug_capture ); PG_FUNCTION_INFO_V1( get_pathman_lib_version ); @@ -1217,24 +1213,6 @@ pathman_update_trigger_func(PG_FUNCTION_ARGS) PG_RETURN_POINTER(new_tuple); } -Datum -pathman_nop_trigger_func(PG_FUNCTION_ARGS) -{ - TriggerData *trigdata = (TriggerData *) fcinfo->context; - - /* Handle user calls */ - if (!CALLED_AS_TRIGGER(fcinfo)) - elog(ERROR, "this function should not be called directly"); - - /* Handle wrong fire mode */ - if (!TRIGGER_FIRED_FOR_ROW(trigdata->tg_event)) - elog(ERROR, "%s: must be fired for row", - trigdata->tg_trigger->tgname); - - /* Just return NEW tuple */ - PG_RETURN_POINTER(trigdata->tg_newtuple); -} - struct replace_vars_cxt { HeapTuple new_tuple; @@ -1478,50 +1456,6 @@ has_update_trigger(PG_FUNCTION_ARGS) PG_RETURN_BOOL(has_update_trigger_internal(parent_relid)); } -/* Check if relation is foreign table */ -Datum -is_relation_foreign(PG_FUNCTION_ARGS) -{ - Oid relid = PG_GETARG_OID(0); - Relation rel; - bool res; - - /* Check that relation exists */ - if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relid))) - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("relation \"%u\" does not exist", relid))); - - rel = heap_open(relid, NoLock); - res = (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE); - heap_close(rel, NoLock); - PG_RETURN_BOOL(res); -} - -/* Create a trigger for partition that does nothing */ -Datum -create_single_nop_trigger(PG_FUNCTION_ARGS) -{ - Oid parent = PG_GETARG_OID(0); - Oid child = PG_GETARG_OID(1); - const char *trigname; - const PartRelationInfo *prel; - List *columns; - - /* Check that table is partitioned */ - prel = get_pathman_relation_info(parent); - shout_if_prel_is_invalid(parent, prel, PT_ANY); - - /* Acquire trigger and attribute names */ - trigname = build_nop_trigger_name_internal(parent); - - /* Generate list of columns used in expression */ - columns = PrelExpressionColumnNames(prel); - create_single_nop_trigger_internal(child, trigname, columns); - - PG_RETURN_VOID(); -} - - /* * ------- * DEBUG From 05515ecd8391798fe069678c85107e97ee2a1881 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 15 May 2017 16:10:34 +0300 Subject: [PATCH 044/528] Clean code from NOP definitions --- src/include/init.h | 2 -- src/include/partition_creation.h | 5 ----- 2 files changed, 7 deletions(-) diff --git a/src/include/init.h b/src/include/init.h index 769bf119..bea36d67 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -199,8 +199,6 @@ char *build_sequence_name_internal(Oid relid); char *build_update_trigger_name_internal(Oid relid); char *build_update_trigger_func_name_internal(Oid relid); -char *build_nop_trigger_name_internal(Oid relid); - bool pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, diff --git a/src/include/partition_creation.h b/src/include/partition_creation.h index 106054c9..42454ca9 100644 --- a/src/include/partition_creation.h +++ b/src/include/partition_creation.h @@ -86,11 +86,6 @@ void create_single_update_trigger_internal(Oid partition_relid, bool has_update_trigger_internal(Oid parent); -/* NOP triggers */ -void create_single_nop_trigger_internal(Oid relid, - const char *trigname, - List *columns); - /* Partitioning callback type */ typedef enum { From ea1a91d91b3c61a3edff29e3323d65417d14064d Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 15 May 2017 16:13:26 +0300 Subject: [PATCH 045/528] Cleanup code --- src/hooks.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 92b65b92..5d64b073 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -36,8 +36,6 @@ #include "utils/typcache.h" #include "utils/lsyscache.h" -static PlannerInfo* pathman_planner_info = NULL; - /* Borrowed from joinpath.c */ #define PATH_PARAM_BY_REL(path, rel) \ ((path)->param_info && bms_overlap(PATH_REQ_OUTER(path), (rel)->relids)) @@ -280,9 +278,6 @@ pathman_rel_pathlist_hook(PlannerInfo *root, if (!IsPathmanReady()) return; - /* save root, we will use in plan modify stage */ - pathman_planner_info = root; - /* * Skip if it's a result relation (UPDATE | DELETE | INSERT), * or not a (partitioned) physical relation at all. @@ -540,9 +535,6 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) uint32 query_id = parse->queryId; bool pathman_ready = IsPathmanReady(); /* in case it changes */ - /* rel_pathlist_hook will set this variable */ - pathman_planner_info = NULL; - PG_TRY(); { if (pathman_ready && pathman_hooks_enabled) From 7293034e9999359c498f822720b088d1d2c95d9e Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 15 May 2017 16:19:18 +0300 Subject: [PATCH 046/528] Fix tests --- expected/pathman_update_node.out | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/expected/pathman_update_node.out b/expected/pathman_update_node.out index 45ca80e1..a6214a52 100644 --- a/expected/pathman_update_node.out +++ b/expected/pathman_update_node.out @@ -8,7 +8,6 @@ CREATE TABLE test_update_node.test_range(val NUMERIC NOT NULL, comment TEXT); CREATE INDEX val_idx ON test_update_node.test_range (val); INSERT INTO test_update_node.test_range SELECT i, i FROM generate_series(1, 100) i; SELECT create_range_partitions('test_update_node.test_range', 'val', 1, 10); -NOTICE: sequence "test_range_seq" does not exist, skipping create_range_partitions ------------------------- 10 @@ -292,7 +291,7 @@ SELECT count(*) FROM test_update_node.test_range; (1 row) DROP TABLE test_update_node.test_range CASCADE; -NOTICE: drop cascades to 12 other objects +NOTICE: drop cascades to 13 other objects /* recreate table and mass move */ CREATE TABLE test_update_node.test_range(val NUMERIC NOT NULL, comment TEXT); INSERT INTO test_update_node.test_range SELECT i, i FROM generate_series(1, 100) i; From 8b08edad0c3ac8b8ff524054b4f90a06bdf2d616 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Thu, 18 May 2017 17:08:12 +0300 Subject: [PATCH 047/528] subpartitions tests --- Makefile | 5 +- expected/pathman_expressions.out | 4 + expected/pathman_subpartitions.out | 299 +++++++++++++++++++++++++++++ sql/pathman_expressions.sql | 4 + sql/pathman_subpartitions.sql | 99 ++++++++++ 5 files changed, 409 insertions(+), 2 deletions(-) create mode 100644 expected/pathman_subpartitions.out create mode 100644 sql/pathman_subpartitions.sql diff --git a/Makefile b/Makefile index 2ebb95be..9acbd6c8 100644 --- a/Makefile +++ b/Makefile @@ -31,6 +31,7 @@ REGRESS = pathman_basic \ pathman_column_type \ pathman_cte \ pathman_domains \ + pathman_expressions \ pathman_foreign_keys \ pathman_inserts \ pathman_interval \ @@ -40,10 +41,10 @@ REGRESS = pathman_basic \ pathman_permissions \ pathman_rowmarks \ pathman_runtime_nodes \ + pathman_subpartitions \ pathman_update_trigger \ pathman_updates \ - pathman_utility_stmt \ - pathman_expressions + pathman_utility_stmt EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add diff --git a/expected/pathman_expressions.out b/expected/pathman_expressions.out index b462bf20..e0832ff8 100644 --- a/expected/pathman_expressions.out +++ b/expected/pathman_expressions.out @@ -152,3 +152,7 @@ SELECT COUNT(*) FROM test.range_rel_2; 24 (1 row) +DROP SCHEMA test CASCADE; +NOTICE: drop cascades to 17 other objects +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_subpartitions.out b/expected/pathman_subpartitions.out new file mode 100644 index 00000000..6889bb1a --- /dev/null +++ b/expected/pathman_subpartitions.out @@ -0,0 +1,299 @@ +\set VERBOSITY terse +CREATE EXTENSION pg_pathman; +/* Create two level partitioning structure */ +CREATE TABLE abc(a INTEGER NOT NULL, b INTEGER NOT NULL); +INSERT INTO abc SELECT i, i FROM generate_series(1, 200, 20) as i; +SELECT create_range_partitions('abc', 'a', 0, 100, 2); +NOTICE: sequence "abc_seq" does not exist, skipping + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_hash_partitions('abc_1', 'a', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT create_hash_partitions('abc_2', 'b', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +SELECT * FROM pathman_partition_list; + parent | partition | parttype | partattr | range_min | range_max +--------+-----------+----------+----------+-----------+----------- + abc | abc_1 | 2 | a | 0 | 100 + abc | abc_2 | 2 | a | 100 | 200 + abc_1 | abc_1_0 | 1 | a | | + abc_1 | abc_1_1 | 1 | a | | + abc_1 | abc_1_2 | 1 | a | | + abc_2 | abc_2_0 | 1 | b | | + abc_2 | abc_2_1 | 1 | b | | +(7 rows) + +SELECT tableoid::regclass, * FROM abc; + tableoid | a | b +----------+-----+----- + abc_1_0 | 21 | 21 + abc_1_0 | 61 | 61 + abc_1_1 | 41 | 41 + abc_1_2 | 1 | 1 + abc_1_2 | 81 | 81 + abc_2_0 | 101 | 101 + abc_2_0 | 141 | 141 + abc_2_1 | 121 | 121 + abc_2_1 | 161 | 161 + abc_2_1 | 181 | 181 +(10 rows) + +/* Insert should result in creating of new subpartition */ +SELECT append_range_partition('abc', 'abc_3'); + append_range_partition +------------------------ + abc_3 +(1 row) + +SELECT create_range_partitions('abc_3', 'b', 200, 10, 2); +NOTICE: sequence "abc_3_seq" does not exist, skipping + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT * FROM pathman_partition_list WHERE parent = 'abc_3'::regclass; + parent | partition | parttype | partattr | range_min | range_max +--------+-----------+----------+----------+-----------+----------- + abc_3 | abc_3_1 | 2 | b | 200 | 210 + abc_3 | abc_3_2 | 2 | b | 210 | 220 +(2 rows) + +INSERT INTO abc VALUES (215, 215); +SELECT * FROM pathman_partition_list WHERE parent = 'abc_3'::regclass; + parent | partition | parttype | partattr | range_min | range_max +--------+-----------+----------+----------+-----------+----------- + abc_3 | abc_3_1 | 2 | b | 200 | 210 + abc_3 | abc_3_2 | 2 | b | 210 | 220 +(2 rows) + +SELECT tableoid::regclass, * FROM abc WHERE a = 215 AND b = 215; + tableoid | a | b +----------+-----+----- + abc_3_2 | 215 | 215 +(1 row) + +/* Pruning tests */ +EXPLAIN (COSTS OFF) SELECT * FROM abc WHERE a < 150; + QUERY PLAN +--------------------------------- + Append + -> Append + -> Seq Scan on abc_1_0 + -> Seq Scan on abc_1_1 + -> Seq Scan on abc_1_2 + -> Append + -> Seq Scan on abc_2_0 + Filter: (a < 150) + -> Seq Scan on abc_2_1 + Filter: (a < 150) +(10 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM abc WHERE b = 215; + QUERY PLAN +--------------------------------- + Append + -> Append + -> Seq Scan on abc_1_0 + Filter: (b = 215) + -> Seq Scan on abc_1_1 + Filter: (b = 215) + -> Seq Scan on abc_1_2 + Filter: (b = 215) + -> Append + -> Seq Scan on abc_2_1 + Filter: (b = 215) + -> Append + -> Seq Scan on abc_3_2 + Filter: (b = 215) +(14 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM abc WHERE a = 215 AND b = 215; + QUERY PLAN +------------------------------------------------- + Append + -> Append + -> Seq Scan on abc_3_2 + Filter: ((a = 215) AND (b = 215)) +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM abc WHERE a >= 210 and b >= 210; + QUERY PLAN +---------------------------------- + Append + -> Append + -> Seq Scan on abc_3_2 + Filter: (a >= 210) +(4 rows) + +/* Multilevel partitioning with update triggers */ +CREATE OR REPLACE FUNCTION partitions_tree(rel REGCLASS) +RETURNS SETOF REGCLASS AS +$$ +DECLARE + partition REGCLASS; + subpartition REGCLASS; +BEGIN + IF rel IS NULL THEN + RETURN; + END IF; + + RETURN NEXT rel; + + FOR partition IN (SELECT l.partition FROM pathman_partition_list l WHERE parent = rel) + LOOP + FOR subpartition IN (SELECT partitions_tree(partition)) + LOOP + RETURN NEXT subpartition; + END LOOP; + END LOOP; +END +$$ LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION get_triggers(rel REGCLASS) +RETURNS SETOF TEXT AS +$$ +DECLARE + def TEXT; +BEGIN + FOR def IN (SELECT pg_get_triggerdef(oid) FROM pg_trigger WHERE tgrelid = rel) + LOOP + RETURN NEXT def; + END LOOP; + + RETURN; +END; +$$ LANGUAGE plpgsql; +SELECT create_update_triggers('abc_1'); /* Cannot perform on partition */ +ERROR: Parent table must have an update trigger +SELECT create_update_triggers('abc'); /* Only on parent */ + create_update_triggers +------------------------ + +(1 row) + +SELECT p, get_triggers(p) FROM partitions_tree('abc') as p; + p | get_triggers +---------+----------------------------------------------------------------------------------------------------------------------------- + abc | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON abc FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + abc_1 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON abc_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + abc_1 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON abc_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + abc_1_0 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON abc_1_0 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + abc_1_1 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON abc_1_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + abc_1_2 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON abc_1_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + abc_2 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON abc_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + abc_2 | CREATE TRIGGER abc_2_upd_trig BEFORE UPDATE OF b, a ON abc_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + abc_2_0 | CREATE TRIGGER abc_2_upd_trig BEFORE UPDATE OF b, a ON abc_2_0 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + abc_2_1 | CREATE TRIGGER abc_2_upd_trig BEFORE UPDATE OF b, a ON abc_2_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + abc_3 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON abc_3 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + abc_3 | CREATE TRIGGER abc_3_upd_trig BEFORE UPDATE OF b, a ON abc_3 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + abc_3_1 | CREATE TRIGGER abc_3_upd_trig BEFORE UPDATE OF b, a ON abc_3_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + abc_3_2 | CREATE TRIGGER abc_3_upd_trig BEFORE UPDATE OF b, a ON abc_3_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() +(14 rows) + +SELECT append_range_partition('abc', 'abc_4'); + append_range_partition +------------------------ + abc_4 +(1 row) + +SELECT create_hash_partitions('abc_4', 'b', 2); /* Triggers should automatically + create_hash_partitions +------------------------ + 2 +(1 row) + + * be created on subpartitions */ +SELECT p, get_triggers(p) FROM partitions_tree('abc_4') as p; + p | get_triggers +---------+----------------------------------------------------------------------------------------------------------------------------- + abc_4 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON abc_4 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + abc_4 | CREATE TRIGGER abc_4_upd_trig BEFORE UPDATE OF b, a ON abc_4 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + abc_4_0 | CREATE TRIGGER abc_4_upd_trig BEFORE UPDATE OF b, a ON abc_4_0 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + abc_4_1 | CREATE TRIGGER abc_4_upd_trig BEFORE UPDATE OF b, a ON abc_4_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() +(4 rows) + +SELECT drop_triggers('abc_1'); /* Cannot perform on partition */ +ERROR: Parent table must not have an update trigger +SELECT drop_triggers('abc'); /* Only on parent */ + drop_triggers +--------------- + +(1 row) + +SELECT p, get_triggers(p) FROM partitions_tree('abc') as p; /* No partitions */ + p | get_triggers +---+-------------- +(0 rows) + +DROP TABLE abc CASCADE; +NOTICE: drop cascades to 13 other objects +/* Test that update trigger words correclty */ +CREATE TABLE abc(a INTEGER NOT NULL, b INTEGER NOT NULL); +SELECT create_range_partitions('abc', 'a', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_range_partitions('abc_1', 'b', 0, 50, 2); +NOTICE: sequence "abc_1_seq" does not exist, skipping + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_range_partitions('abc_2', 'b', 0, 50, 2); +NOTICE: sequence "abc_2_seq" does not exist, skipping + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_update_triggers('abc'); + create_update_triggers +------------------------ + +(1 row) + +INSERT INTO abc VALUES (25, 25); /* Should get into abc_1_1 */ +SELECT tableoid::regclass, * FROM abc; + tableoid | a | b +----------+----+---- + abc_1_1 | 25 | 25 +(1 row) + +UPDATE abc SET a = 125 WHERE a = 25 and b = 25; +SELECT tableoid::regclass, * FROM abc; /* Should be in abc_2_1 */ + tableoid | a | b +----------+-----+---- + abc_2_1 | 125 | 25 +(1 row) + +UPDATE abc SET b = 75 WHERE a = 125 and b = 25; +SELECT tableoid::regclass, * FROM abc; /* Should be in abc_2_2 */ + tableoid | a | b +----------+-----+---- + abc_2_2 | 125 | 75 +(1 row) + +UPDATE abc SET b = 125 WHERE a = 125 and b = 75; +SELECT tableoid::regclass, * FROM abc; /* Should create partition abc_2_3 */ + tableoid | a | b +----------+-----+----- + abc_2_3 | 125 | 125 +(1 row) + +DROP TABLE abc CASCADE; +NOTICE: drop cascades to 7 other objects +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_expressions.sql b/sql/pathman_expressions.sql index bc24e30f..3212929a 100644 --- a/sql/pathman_expressions.sql +++ b/sql/pathman_expressions.sql @@ -51,3 +51,7 @@ UPDATE test.range_rel SET dt = '2016-12-01' WHERE dt >= '2015-10-10' AND dt <= ' SELECT COUNT(*) FROM test.range_rel; SELECT COUNT(*) FROM test.range_rel_1; SELECT COUNT(*) FROM test.range_rel_2; + +DROP SCHEMA test CASCADE; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; \ No newline at end of file diff --git a/sql/pathman_subpartitions.sql b/sql/pathman_subpartitions.sql new file mode 100644 index 00000000..4cf5d1a1 --- /dev/null +++ b/sql/pathman_subpartitions.sql @@ -0,0 +1,99 @@ +\set VERBOSITY terse + +CREATE EXTENSION pg_pathman; + +/* Create two level partitioning structure */ +CREATE TABLE abc(a INTEGER NOT NULL, b INTEGER NOT NULL); +INSERT INTO abc SELECT i, i FROM generate_series(1, 200, 20) as i; +SELECT create_range_partitions('abc', 'a', 0, 100, 2); +SELECT create_hash_partitions('abc_1', 'a', 3); +SELECT create_hash_partitions('abc_2', 'b', 2); +SELECT * FROM pathman_partition_list; +SELECT tableoid::regclass, * FROM abc; + +/* Insert should result in creating of new subpartition */ +SELECT append_range_partition('abc', 'abc_3'); +SELECT create_range_partitions('abc_3', 'b', 200, 10, 2); +SELECT * FROM pathman_partition_list WHERE parent = 'abc_3'::regclass; +INSERT INTO abc VALUES (215, 215); +SELECT * FROM pathman_partition_list WHERE parent = 'abc_3'::regclass; +SELECT tableoid::regclass, * FROM abc WHERE a = 215 AND b = 215; + +/* Pruning tests */ +EXPLAIN (COSTS OFF) SELECT * FROM abc WHERE a < 150; +EXPLAIN (COSTS OFF) SELECT * FROM abc WHERE b = 215; +EXPLAIN (COSTS OFF) SELECT * FROM abc WHERE a = 215 AND b = 215; +EXPLAIN (COSTS OFF) SELECT * FROM abc WHERE a >= 210 and b >= 210; + +/* Multilevel partitioning with update triggers */ +CREATE OR REPLACE FUNCTION partitions_tree(rel REGCLASS) +RETURNS SETOF REGCLASS AS +$$ +DECLARE + partition REGCLASS; + subpartition REGCLASS; +BEGIN + IF rel IS NULL THEN + RETURN; + END IF; + + RETURN NEXT rel; + + FOR partition IN (SELECT l.partition FROM pathman_partition_list l WHERE parent = rel) + LOOP + FOR subpartition IN (SELECT partitions_tree(partition)) + LOOP + RETURN NEXT subpartition; + END LOOP; + END LOOP; +END +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION get_triggers(rel REGCLASS) +RETURNS SETOF TEXT AS +$$ +DECLARE + def TEXT; +BEGIN + FOR def IN (SELECT pg_get_triggerdef(oid) FROM pg_trigger WHERE tgrelid = rel) + LOOP + RETURN NEXT def; + END LOOP; + + RETURN; +END; +$$ LANGUAGE plpgsql; + +SELECT create_update_triggers('abc_1'); /* Cannot perform on partition */ +SELECT create_update_triggers('abc'); /* Only on parent */ +SELECT p, get_triggers(p) FROM partitions_tree('abc') as p; + +SELECT append_range_partition('abc', 'abc_4'); +SELECT create_hash_partitions('abc_4', 'b', 2); /* Triggers should automatically + * be created on subpartitions */ +SELECT p, get_triggers(p) FROM partitions_tree('abc_4') as p; +SELECT drop_triggers('abc_1'); /* Cannot perform on partition */ +SELECT drop_triggers('abc'); /* Only on parent */ +SELECT p, get_triggers(p) FROM partitions_tree('abc') as p; /* No partitions */ + +DROP TABLE abc CASCADE; + +/* Test that update trigger words correclty */ +CREATE TABLE abc(a INTEGER NOT NULL, b INTEGER NOT NULL); +SELECT create_range_partitions('abc', 'a', 0, 100, 2); +SELECT create_range_partitions('abc_1', 'b', 0, 50, 2); +SELECT create_range_partitions('abc_2', 'b', 0, 50, 2); +SELECT create_update_triggers('abc'); + +INSERT INTO abc VALUES (25, 25); /* Should get into abc_1_1 */ +SELECT tableoid::regclass, * FROM abc; +UPDATE abc SET a = 125 WHERE a = 25 and b = 25; +SELECT tableoid::regclass, * FROM abc; /* Should be in abc_2_1 */ +UPDATE abc SET b = 75 WHERE a = 125 and b = 25; +SELECT tableoid::regclass, * FROM abc; /* Should be in abc_2_2 */ +UPDATE abc SET b = 125 WHERE a = 125 and b = 75; +SELECT tableoid::regclass, * FROM abc; /* Should create partition abc_2_3 */ + +DROP TABLE abc CASCADE; + +DROP EXTENSION pg_pathman; \ No newline at end of file From 11e184e47b218b61aec1e68b2b6eea265ba69773 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 9 Jun 2017 18:51:45 +0300 Subject: [PATCH 048/528] bump dev version to 1.5 --- Makefile | 2 +- expected/pathman_calamity.out | 2 +- pg_pathman.control | 2 +- src/include/init.h | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index 8b8fa036..4295350e 100644 --- a/Makefile +++ b/Makefile @@ -14,7 +14,7 @@ override PG_CPPFLAGS += -I$(CURDIR)/src/include EXTENSION = pg_pathman -EXTVERSION = 1.4 +EXTVERSION = 1.5 DATA_built = pg_pathman--$(EXTVERSION).sql diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 9ae638b5..251ec31c 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -12,7 +12,7 @@ SELECT debug_capture(); SELECT get_pathman_lib_version(); get_pathman_lib_version ------------------------- - 10401 + 10500 (1 row) set client_min_messages = NOTICE; diff --git a/pg_pathman.control b/pg_pathman.control index 0d6af5d3..138b26c6 100644 --- a/pg_pathman.control +++ b/pg_pathman.control @@ -1,4 +1,4 @@ # pg_pathman extension comment = 'Partitioning tool for PostgreSQL' -default_version = '1.4' +default_version = '1.5' module_pathname = '$libdir/pg_pathman' diff --git a/src/include/init.h b/src/include/init.h index 262d48a0..3f1790ce 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -154,10 +154,10 @@ simpify_mcxt_name(MemoryContext mcxt) /* Lowest version of Pl/PgSQL frontend compatible with internals (0xAA_BB_CC) */ -#define LOWEST_COMPATIBLE_FRONT 0x010400 +#define LOWEST_COMPATIBLE_FRONT 0x010500 /* Current version of native C library (0xAA_BB_CC) */ -#define CURRENT_LIB_VERSION 0x010401 +#define CURRENT_LIB_VERSION 0x010500 void *pathman_cache_search_relid(HTAB *cache_table, From 07a9c885eba3962043a1b6a1b7c74d00c01ffb50 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Fri, 16 Jun 2017 19:12:45 +0300 Subject: [PATCH 049/528] Copy unlogged attribute from parent relation --- src/partition_creation.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index 412b3f36..9b599af2 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -671,6 +671,8 @@ create_single_partition_internal(Oid parent_relid, RangeVar *partition_rv, char *tablespace) { + Relation parentrel; + /* Value to be returned */ Oid partition_relid = InvalidOid; /* safety */ @@ -680,7 +682,8 @@ create_single_partition_internal(Oid parent_relid, *parent_nsp_name; /* Elements of the "CREATE TABLE" query tree */ - RangeVar *parent_rv; + RangeVar *parent_rv, + *newrel_rv = copyObject(partition_rv); TableLikeClause like_clause; CreateStmt create_stmt; List *create_stmts; @@ -730,7 +733,10 @@ create_single_partition_internal(Oid parent_relid, /* Make up parent's RangeVar */ parent_rv = makeRangeVar(parent_nsp_name, parent_name, -1); - Assert(partition_rv); + /* Copy attributes */ + parentrel = heap_open(parent_relid, NoLock); + newrel_rv->relpersistence = parentrel->rd_rel->relpersistence; + heap_close(parentrel, NoLock); /* If no 'tablespace' is provided, get parent's tablespace */ if (!tablespace) @@ -745,7 +751,7 @@ create_single_partition_internal(Oid parent_relid, /* Initialize CreateStmt structure */ NodeSetTag(&create_stmt, T_CreateStmt); - create_stmt.relation = copyObject(partition_rv); + create_stmt.relation = newrel_rv; create_stmt.tableElts = list_make1(copyObject(&like_clause)); create_stmt.inhRelations = list_make1(copyObject(parent_rv)); create_stmt.ofTypename = NULL; From 96376f191117ef2edb26a335859b34106c37d67a Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 19 Jun 2017 15:51:46 +0300 Subject: [PATCH 050/528] Copy WITH options to partitions --- expected/pathman_basic.out | 28 +++++++++++++++++++ sql/pathman_basic.sql | 13 +++++++++ src/partition_creation.c | 55 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 96 insertions(+) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 69c1458d..4e3f53b0 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1339,6 +1339,34 @@ NOTICE: 1000 rows copied from test.num_range_rel_3 DROP TABLE test.num_range_rel CASCADE; DROP TABLE test.range_rel CASCADE; NOTICE: drop cascades to 10 other objects +/* Test attributes copying */ +CREATE UNLOGGED TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt DATE NOT NULL) +WITH (fillfactor = 70); +INSERT INTO test.range_rel (dt) + SELECT g FROM generate_series('2015-01-01', '2015-02-15', '1 month'::interval) AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2015-01-01'::date, '1 month'::interval); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT reloptions, relpersistence FROM pg_class WHERE oid='test.range_rel'::REGCLASS; + reloptions | relpersistence +-----------------+---------------- + {fillfactor=70} | u +(1 row) + +SELECT reloptions, relpersistence FROM pg_class WHERE oid='test.range_rel_1'::REGCLASS; + reloptions | relpersistence +-----------------+---------------- + {fillfactor=70} | u +(1 row) + +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 3 other objects /* Test automatic partition creation */ CREATE TABLE test.range_rel ( id SERIAL PRIMARY KEY, diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 297c4097..6a209433 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -390,6 +390,19 @@ DROP TABLE test.num_range_rel CASCADE; DROP TABLE test.range_rel CASCADE; +/* Test attributes copying */ +CREATE UNLOGGED TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt DATE NOT NULL) +WITH (fillfactor = 70); +INSERT INTO test.range_rel (dt) + SELECT g FROM generate_series('2015-01-01', '2015-02-15', '1 month'::interval) AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2015-01-01'::date, '1 month'::interval); +SELECT reloptions, relpersistence FROM pg_class WHERE oid='test.range_rel'::REGCLASS; +SELECT reloptions, relpersistence FROM pg_class WHERE oid='test.range_rel_1'::REGCLASS; +DROP TABLE test.range_rel CASCADE; + /* Test automatic partition creation */ CREATE TABLE test.range_rel ( id SERIAL PRIMARY KEY, diff --git a/src/partition_creation.c b/src/partition_creation.c index 9b599af2..669295c5 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -74,6 +74,7 @@ static ObjectAddress create_table_using_stmt(CreateStmt *create_stmt, Oid relowner); static void copy_foreign_keys(Oid parent_relid, Oid partition_oid); +static void copy_relation_attributes(Oid partition_relid, Datum reloptions); static void postprocess_child_table_and_atts(Oid parent_relid, Oid partition_relid); static Oid text_to_regprocedure(text *proname_args); @@ -671,6 +672,7 @@ create_single_partition_internal(Oid parent_relid, RangeVar *partition_rv, char *tablespace) { + HeapTuple tuple = NULL; Relation parentrel; /* Value to be returned */ @@ -693,6 +695,7 @@ create_single_partition_internal(Oid parent_relid, Oid save_userid; int save_sec_context; bool need_priv_escalation = !superuser(); /* we might be a SU */ + Datum reloptions = (Datum) 0; /* Lock parent and check if it exists */ LockRelationOid(parent_relid, ShareUpdateExclusiveLock); @@ -736,6 +739,19 @@ create_single_partition_internal(Oid parent_relid, /* Copy attributes */ parentrel = heap_open(parent_relid, NoLock); newrel_rv->relpersistence = parentrel->rd_rel->relpersistence; + if (parentrel->rd_options) + { + bool isNull; + + tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(parent_relid)); + if (!HeapTupleIsValid(tuple)) + elog(ERROR, "cache lookup failed for relation %u", parent_relid); + + reloptions = SysCacheGetAttr(RELOID, tuple, Anum_pg_class_reloptions, + &isNull); + if (isNull) + reloptions = (Datum) 0; + } heap_close(parentrel, NoLock); /* If no 'tablespace' is provided, get parent's tablespace */ @@ -787,6 +803,10 @@ create_single_partition_internal(Oid parent_relid, partition_relid = create_table_using_stmt((CreateStmt *) cur_stmt, child_relowner).objectId; + /* Copy attributes to partition */ + if (reloptions) + copy_relation_attributes(partition_relid, reloptions); + /* Copy FOREIGN KEYS of the parent table */ copy_foreign_keys(parent_relid, partition_relid); @@ -823,6 +843,9 @@ create_single_partition_internal(Oid parent_relid, if (need_priv_escalation) SetUserIdAndSecContext(save_userid, save_sec_context); + if (tuple != NULL) + ReleaseSysCache(tuple); + return partition_relid; } @@ -1114,6 +1137,38 @@ copy_foreign_keys(Oid parent_relid, Oid partition_oid) FunctionCallInvoke(©_fkeys_proc_fcinfo); } +/* Copy attributes to partition. Updates partition's tuple in pg_class */ +static void +copy_relation_attributes(Oid partition_relid, Datum reloptions) +{ + Relation classRel; + HeapTuple tuple, + newtuple; + Datum new_val[Natts_pg_class]; + bool new_null[Natts_pg_class], + new_repl[Natts_pg_class]; + + classRel = heap_open(RelationRelationId, RowExclusiveLock); + tuple = SearchSysCacheCopy1(RELOID, + ObjectIdGetDatum(partition_relid)); + if (!HeapTupleIsValid(tuple)) + elog(ERROR, "cache lookup failed for relation %u", + partition_relid); + + /* Fill in relpartbound value */ + memset(new_val, 0, sizeof(new_val)); + memset(new_null, false, sizeof(new_null)); + memset(new_repl, false, sizeof(new_repl)); + new_val[Anum_pg_class_reloptions - 1] = reloptions; + new_null[Anum_pg_class_reloptions - 1] = false; + new_repl[Anum_pg_class_reloptions - 1] = true; + newtuple = heap_modify_tuple(tuple, RelationGetDescr(classRel), + new_val, new_null, new_repl); + CatalogTupleUpdate(classRel, &newtuple->t_self, newtuple); + heap_freetuple(newtuple); + heap_close(classRel, RowExclusiveLock); +} + /* * ----------------------------- From 4d544c82714b8e0c91d30afd48543ff18dbe7f3e Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 28 Jun 2017 17:57:14 +0300 Subject: [PATCH 051/528] Add docker files --- .travis.yml | 39 +++++++++++++++++++------------------- Dockerfile.tmpl | 21 ++++++++++++++++++++ src/utility_stmt_hooking.c | 6 ++++-- travis/pg-travis-test.sh | 2 +- 4 files changed, 46 insertions(+), 22 deletions(-) create mode 100644 Dockerfile.tmpl diff --git a/.travis.yml b/.travis.yml index fd0e57ed..2ea8bbdb 100644 --- a/.travis.yml +++ b/.travis.yml @@ -6,28 +6,29 @@ dist: trusty language: c -compiler: - - clang - - gcc +services: + - docker -before_install: - - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then sudo apt-get -y install -qq wget ca-certificates; fi - - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then source ./travis/dep-ubuntu-postgres.sh; fi - - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then source ./travis/dep-ubuntu-llvm.sh; fi - - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then sudo apt-get update -qq; fi +install: + - sed -e 's/${CC}/'${CC}/g -e 's/${PG_VERSION}/'${PG_VERSION}/g Dockerfile.tmpl > Dockerfile + - docker-compose build -env: - global: - - LLVM_VER=4.0 - matrix: - - PG_VER=10 CHECK_CODE=true - - PG_VER=10 CHECK_CODE=false - - PG_VER=9.6 CHECK_CODE=true - - PG_VER=9.6 CHECK_CODE=false - - PG_VER=9.5 CHECK_CODE=true - - PG_VER=9.5 CHECK_CODE=false +script: + - docker-compose run tests -script: bash ./travis/pg-travis-test.sh +env: + - PG_VERSION=10 CHECK_CODE=true CC=clang + - PG_VERSION=10 CHECK_CODE=false CC=clang + - PG_VERSION=9.6 CHECK_CODE=true CC=clang + - PG_VERSION=9.6 CHECK_CODE=false CC=clang + - PG_VERSION=9.5 CHECK_CODE=true CC=clang + - PG_VERSION=9.5 CHECK_CODE=false CC=clang + - PG_VERSION=10 CHECK_CODE=true CC=gcc + - PG_VERSION=10 CHECK_CODE=false CC=gcc + - PG_VERSION=9.6 CHECK_CODE=true CC=gcc + - PG_VERSION=9.6 CHECK_CODE=false CC=gcc + - PG_VERSION=9.5 CHECK_CODE=true CC=gcc + - PG_VERSION=9.5 CHECK_CODE=false CC=gcc after_success: - bash <(curl -s https://codecov.io/bash) diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl new file mode 100644 index 00000000..9efe71df --- /dev/null +++ b/Dockerfile.tmpl @@ -0,0 +1,21 @@ +FROM postgres:${PG_VERSION}-alpine + +ENV LANG=C.UTF-8 +RUN echo 'http://dl-cdn.alpinelinux.org/alpine/v3.6/main' > /etc/apk/repositories && \ + echo 'http://dl-cdn.alpinelinux.org/alpine/v3.6/community' >> /etc/apk/repositories && \ + apk --no-cache add cmocka cppcheck cmake python3 gcc make ${CC} +RUN apk add --no-cache musl-dev cmocka-dev +RUN pip3 install testgres + +ENV PGDATA=/pg/data +RUN mkdir -p /pg/data && \ + mkdir /pg/pg_pathman && \ + chown postgres:postgres ${PGDATA} && \ + chmod a+rwx /usr/local/lib/postgresql && \ + chmod a+rwx /usr/local/share/postgresql/extension + +ADD . /pg/pg_pathman +WORKDIR /pg/pg_pathman +RUN chmod -R go+rwX /pg/pg_pathman +USER postgres +RUN PGDATA=${PGDATA} CC=${CC} CHECK_CODE=${CHECK_CODE} bash run_tests.sh diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index f05aae27..e0196b45 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -31,8 +31,10 @@ #include "utils/memutils.h" #include "utils/rls.h" -#include "libpq/libpq.h" - +/* we avoid includig libpq.h because it requires openssl.h */ +#include "libpq/pqcomm.h" +extern ProtocolVersion FrontendProtocol; +extern void pq_endmsgread(void); /* Determine whether we should enable COPY or not (PostgresPro has a fix) */ #if defined(WIN32) && \ diff --git a/travis/pg-travis-test.sh b/travis/pg-travis-test.sh index 5c0ec44e..890897a4 100755 --- a/travis/pg-travis-test.sh +++ b/travis/pg-travis-test.sh @@ -68,7 +68,7 @@ CLUSTER_PATH=$(pwd)/test_cluster $initdb_path -D $CLUSTER_PATH -U $USER -A trust # build pg_pathman (using CFLAGS_SL for gcov) -make USE_PGXS=1 PG_CONFIG=$config_path CFLAGS_SL="$($config_path --cflags_sl) -coverage" +make USE_PGXS=1 CC=${CC} PG_CONFIG=$config_path CFLAGS_SL="$($config_path --cflags_sl) -coverage" sudo make install USE_PGXS=1 PG_CONFIG=$config_path # check build From 0571525470a2096cf801238227f723f0539c625e Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 29 Jun 2017 12:19:19 +0300 Subject: [PATCH 052/528] Fix tests --- .gitignore | 1 + .travis.yml | 5 +- Dockerfile.tmpl | 19 +- docker-compose.yml | 2 + run_tests.sh | 75 ++ tests/cmocka/Makefile | 5 +- tests/cmocka/cmocka-1.1.1.tar.xz | Bin 85648 -> 0 bytes tests/cmocka/cmockery.c | 1770 ++++++++++++++++++++++++++++++ tests/cmocka/cmockery.h | 484 ++++++++ tests/cmocka/rangeset_tests.c | 18 +- travis/dep-ubuntu-llvm.sh | 4 - travis/dep-ubuntu-postgres.sh | 4 - travis/llvm-snapshot.gpg.key | 52 - travis/pg-travis-test.sh | 139 --- travis/postgresql.gpg.key | 77 -- 15 files changed, 2354 insertions(+), 301 deletions(-) create mode 100644 docker-compose.yml create mode 100755 run_tests.sh delete mode 100644 tests/cmocka/cmocka-1.1.1.tar.xz create mode 100755 tests/cmocka/cmockery.c create mode 100755 tests/cmocka/cmockery.h delete mode 100755 travis/dep-ubuntu-llvm.sh delete mode 100755 travis/dep-ubuntu-postgres.sh delete mode 100644 travis/llvm-snapshot.gpg.key delete mode 100755 travis/pg-travis-test.sh delete mode 100644 travis/postgresql.gpg.key diff --git a/.gitignore b/.gitignore index 9cf8da8f..3eb50e54 100644 --- a/.gitignore +++ b/.gitignore @@ -12,3 +12,4 @@ regression.out pg_pathman--*.sql tags cscope* +Dockerfile diff --git a/.travis.yml b/.travis.yml index 2ea8bbdb..b498e674 100644 --- a/.travis.yml +++ b/.travis.yml @@ -10,7 +10,7 @@ services: - docker install: - - sed -e 's/${CC}/'${CC}/g -e 's/${PG_VERSION}/'${PG_VERSION}/g Dockerfile.tmpl > Dockerfile + - sed -e 's/${CHECK_CODE}/'${CHECK_CODE}/g -e 's/${CC}/'${CC}/g -e 's/${PG_VERSION}/'${PG_VERSION}/g Dockerfile.tmpl > Dockerfile - docker-compose build script: @@ -18,11 +18,8 @@ script: env: - PG_VERSION=10 CHECK_CODE=true CC=clang - - PG_VERSION=10 CHECK_CODE=false CC=clang - PG_VERSION=9.6 CHECK_CODE=true CC=clang - - PG_VERSION=9.6 CHECK_CODE=false CC=clang - PG_VERSION=9.5 CHECK_CODE=true CC=clang - - PG_VERSION=9.5 CHECK_CODE=false CC=clang - PG_VERSION=10 CHECK_CODE=true CC=gcc - PG_VERSION=10 CHECK_CODE=false CC=gcc - PG_VERSION=9.6 CHECK_CODE=true CC=gcc diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index 9efe71df..b74538fc 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -1,14 +1,15 @@ FROM postgres:${PG_VERSION}-alpine -ENV LANG=C.UTF-8 -RUN echo 'http://dl-cdn.alpinelinux.org/alpine/v3.6/main' > /etc/apk/repositories && \ - echo 'http://dl-cdn.alpinelinux.org/alpine/v3.6/community' >> /etc/apk/repositories && \ - apk --no-cache add cmocka cppcheck cmake python3 gcc make ${CC} -RUN apk add --no-cache musl-dev cmocka-dev -RUN pip3 install testgres +ENV LANG=C.UTF-8 PGDATA=/pg/data +RUN apk --no-cache add python3 gcc make musl-dev ${CC} -ENV PGDATA=/pg/data -RUN mkdir -p /pg/data && \ +RUN if ${CHECK_CODE} -eq "true" && ${CC} -eq "gcc"; then \ + echo 'http://dl-cdn.alpinelinux.org/alpine/v3.6/main' > /etc/apk/repositories && \ + echo 'http://dl-cdn.alpinelinux.org/alpine/v3.6/community' >> /etc/apk/repositories && \ + apk --no-cache add cppcheck; \ + fi && \ + pip3 install testgres && \ + mkdir -p /pg/data && \ mkdir /pg/pg_pathman && \ chown postgres:postgres ${PGDATA} && \ chmod a+rwx /usr/local/lib/postgresql && \ @@ -18,4 +19,4 @@ ADD . /pg/pg_pathman WORKDIR /pg/pg_pathman RUN chmod -R go+rwX /pg/pg_pathman USER postgres -RUN PGDATA=${PGDATA} CC=${CC} CHECK_CODE=${CHECK_CODE} bash run_tests.sh +ENTRYPOINT PGDATA=${PGDATA} CC=${CC} CHECK_CODE=${CHECK_CODE} bash run_tests.sh diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 00000000..471ab779 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,2 @@ +tests: + build: . diff --git a/run_tests.sh b/run_tests.sh new file mode 100755 index 00000000..b87c00e3 --- /dev/null +++ b/run_tests.sh @@ -0,0 +1,75 @@ +#!/bin/bash + +set -eux + +id + +# perform code analysis if necessary +if [ $CHECK_CODE = "true" ]; then + + if [ "$CC" = "clang" ]; then + scan-build --status-bugs make USE_PGXS=1 || status=$? + exit $status + + elif [ "$CC" = "gcc" ]; then + cppcheck --template "{file} ({line}): {severity} ({id}): {message}" \ + --enable=warning,portability,performance \ + --suppress=redundantAssignment \ + --suppress=uselessAssignmentPtrArg \ + --suppress=incorrectStringBooleanError \ + --std=c89 src/*.c src/*.h 2> cppcheck.log + + if [ -s cppcheck.log ]; then + cat cppcheck.log + status=1 # error + fi + + exit $status + fi + + # don't forget to "make clean" + make USE_PGXS=1 clean +fi + +# initialize database +initdb + +# build pg_pathman (using CFLAGS_SL for gcov) +make USE_PGXS=1 CFLAGS_SL="$(pg_config --cflags_sl) -coverage" +make USE_PGXS=1 install + +# check build +status=$? +if [ $status -ne 0 ]; then exit $status; fi + +# add pg_pathman to shared_preload_libraries and restart cluster 'test' +echo "shared_preload_libraries = 'pg_pathman'" >> $PGDATA/postgresql.conf +echo "port = 55435" >> $PGDATA/postgresql.conf +pg_ctl start -l /tmp/postgres.log -w + +# run regression tests +PGPORT=55435 make USE_PGXS=1 installcheck || status=$? + +# show diff if it exists +if test -f regression.diffs; then cat regression.diffs; fi + +set +u + +# run python tests +make USE_PGXS=1 python_tests || status=$? +if [ $status -ne 0 ]; then exit $status; fi + +set -u + +# run mock tests (using CFLAGS_SL for gcov) +make USE_PGXS=1 PG_CPPFLAGS="-coverage" cmocka_tests || status=$? +if [ $status -ne 0 ]; then exit $status; fi + +# remove useless gcov files +rm -f tests/cmocka/*.gcno +rm -f tests/cmocka/*.gcda + +#generate *.gcov files +gcov src/*.c src/compat/*.c src/include/*.h src/include/compat/*.h + +exit $status diff --git a/tests/cmocka/Makefile b/tests/cmocka/Makefile index d46ad869..2d4d8bff 100644 --- a/tests/cmocka/Makefile +++ b/tests/cmocka/Makefile @@ -3,16 +3,15 @@ TOP_SRC_DIR = ../../src CC = gcc CFLAGS += -I $(TOP_SRC_DIR) -I $(shell $(PG_CONFIG) --includedir-server) -CFLAGS += -I$(CURDIR)/../../src/include +CFLAGS += -I$(CURDIR)/../../src/include -I. CFLAGS += $(shell $(PG_CONFIG) --cflags_sl) CFLAGS += $(shell $(PG_CONFIG) --cflags) CFLAGS += $(CFLAGS_SL) CFLAGS += $(PG_CPPFLAGS) -LDFLAGS = -lcmocka TEST_BIN = rangeset_tests OBJ = missing_basic.o missing_list.o missing_stringinfo.o \ - missing_bitmapset.o rangeset_tests.o \ + missing_bitmapset.o rangeset_tests.o cmockery.o \ $(TOP_SRC_DIR)/rangeset.o diff --git a/tests/cmocka/cmocka-1.1.1.tar.xz b/tests/cmocka/cmocka-1.1.1.tar.xz deleted file mode 100644 index 7b25e7ff504a2fa1b8f7bb33abdb73fdfef9f860..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 85648 zcmV(fK>EM^H+ooF000E$*0e?f03iVu0001VFXf}<#2oMcT>vqgNp#i!F4oYh6SI@RU(QpFQN#cI8k7#Ud|7b7`vEDl1dQ~Yt%NG> zAl&LHlMzfr2D8pX0z8W8wy7j&V5vaAk!kaCi==(M!K9ou8;9)CIJ_f) zE+w{h_uBKy0-Q+x8?;1Oy2zGGEWw^HS_T07W-z$K^a-ySrqfTEp@rX{dae5q^jG`8i|)@cT>~iqA_aO zo6aasK$cFN8aKuu-uEYai=GP5>1PQ2KhhQ0I;}^6i+M=cQ$Y^*eh;j&R>Vn$#@9@% z8t$zvcn?@#Pmci+^Bh_#=Vm!uFHeb>#yKY>fQZZI{)L7h&Oud3VHX zOh$BTsHmefBBMj5gHoOrV|>xr`B@W@is60dpD9SYv**OSLX>wWI4?RDTa)%$cEsxn zWev8yJ38%{nS>2V6jaON7|2D%Cj7XQxnz?N@PyXZJddM(3D;B%A@Ko3RhbY=24ahoiM8Na@|L?u48gQ8HCpThNkR7a5 zOmE64P!9VWc`l*z2YGDw2r))$z}v}Z7{0j1+H6T7Tv%9T`mb0#t0dKGS`a(vt5%o- z&=vpW8vO~ek_Cf5qr0<Q9j zUsRE%G6zu!Fd2MFz~7EZg+s8WVf!KpDnrwvO(yvnCjNUk zjZa5vXJMS_0Tz`;XS%MbWQ~N*qW6kI*m+_>FRhB~`5m29PSf%8z%(P0UHa&IE0l)* zXgl&;f|y`5$>0k!-rH}j3XD`CVGY#&-Kejwn@Trc>IG9=%$3)$ob(WQZ-JPL5u(v^ zPk?#h3EeIw%~lTV)adycrIPj62-!gH4sDrQ#{uS5M$Jd-v9O#wmHqBqV_`#-vyr*L zB6-i4!dWjOGLN|}$5WQDv@Wuy?u@hBv44{)fz+4yyJcFMY7gvZn{CxR@s9eiv^HjW zBVB$E5JspVX(z~Z{wa41*`axKJlEnhzoE^Uwm9fV=%w*a(y{@tanJf_cq$cc?`Y@) z?ea}o0Ql0v%?qXi1-Hcs2g^1j&{qpzu^CBY{7!FEM?DFT#1e5Ms|W;Xn2YSdo$rgG z{`NLcFFIBhW>hgwuVH>82{otCH4q}%?_{rAy%QZ2u&M=3Gf&<5pPtQjSWLCD-wvJ1 z{l<5IV0{NB~o{K_@01dR^$^VC4+DO0v)t~ z5RpNIWDb3+pl&58Ab|n*t8SB^$j;R;9tDsO;L;2FGT1RNLKaa6X2h(!=XH_gd)0g0 zXpTU8bO3&?F*5LTY(PJL`C74$PA7c;5!2YGkSjqMN*GYOy9_C^fs_mbQZsbUliWEc16hA7 z$^3 zQfDPBAEeo6tYlff39@R;D*R*brM0dBlr(QG3m7B_RrLWVnsA!i&pRp8V1_oAbwN;0NT%m5(i`W(_?EU{(TXAE`O(Jx|LtTHl@IC{`mQ_yDVr{TRZ1IJ z2{|o+?q54<;GL;j*nJBNrwYyfK6&ijux*c3%|Kpn-;&)IwUSc?rY0CS;JD2kK`5CGq( z4!qBaLoRS-jG}eJ9+EoPWzk_<8bXMzMGUY(X`sY$O?ZTa=Mo1#Q|Wd<)H{s4%dqYD z$_YcJREJv5fb310<2iJjVr?~mpOYQ}=(27Xy?lZ-J(hrF_#nniNdKezFwTJERc=Z% zLQo^z7WGn6)3^88%6Zl=4<0EwLi(;8_&)7-TiHqR-n)Hv09Wk5#|$7UlJtTf$QXXc zL(pP>jf|-*a2Y4r5@}1w(D9>`He% zy6E3Ln8lUL4_ce8RV2Y@RYV<4XG`tX)PkoB!bN8MiY` z%r*lVsED}=qID^Ts0r113`oZqDRl2ZbHm?T(xXNR*feSY+XE4YvoxNwVd3ZJU2iWr zk+X&I>qKb$td5`I;(rfxL6U#kbpJB#*^e};vod$q>jnW7{mM#h zZ(7F&jmStwLUo;h3gW@>;dD*N^JQT|z5Qz$iDWWaZmYBUd3^Kokfn)NouRQ$IqAt@ zyDO_n--~@wVyzG%m0p$SB4S~&L=BOE@1%@FD<69w3OyQ>jtVTikf$_vbZylc3tKc7`6Q=bRveS3`CS=7=>H zpI@gvY{kM|P6}Ggo`Z=wH}!gsIyWj*=dlnhhlK%FMSTF)Wp_VI@_UWD`7FTfP_arF zw+OL+IAjELj8wGavD3&TP@jH~UnaZ&KG;{a*^}8~1C^I_wwv;qP(+Ha#M}Du>g}H? zm-b!{1)r`YZ^WB6r$D?HJy!Uut#M&+dSZx1KFwpxR*%fcE=;K7BqZrq4S04Yg?rcD zL3Pf0jMwD2_!rB|{S5zi*g73h+x-rTrI}v){6Ua7ctB*H%Qq z#hbtHEifJG{fZjWF~ydnEP_=Ux$+3E6Mpo3*;%S@HM$5G^4we$maSTip8}4qL2n&4 z_#y;F#i}wteBX-WYX%6EDml*N5m5uE{%%ByKOa3%SPt;lEd;#Bb*ba&$I`?5-2x6z zy(pDSBYDUmQoBG(G_@3yuaK+-A;PpDlfS zfr*53%5bHq^u)zF-!d(F`M1#pDt<6;b-qh%f>uKg=JGuPl3cAo0(A7Le31k!sHuF7 zt(>~9Kq5wDs(AD|-uxT^RC)O|GSFR*-Y7BNe|hcvR$w0aTDG)WaTQFPq$- zRgb5KyUah{`@hwQjvgi+U<2{pp}N_cE=Aa6u3< z^@*sGVC`7M+_e3(fRixQv!OOr7ovs!6%PwW_qt1-bV?1%nD@gY!hdZbbQ&7S4|dUv zk-1GJdylmbffno+u=!MKzLz=afJo3`p$f&<8hW5(rIW8LS#Y&+9n3luAEHy3z;qtpOid`rN;N1XY5UYB>?K7dXq>?e%i5uC_A8(b8o%C@ABBQzb{lgx! z5eOIBsK<0d@x`JVO1gUaEw}ny{6q46(Jlwd7HX-YxWJ^smvJM9ja`z0p&|ge5UG&Z z#pTdRU~xc{VXMM4y~@}a_y>-NKHJG29aU)gmhx!5Sh%GPuDv`-G+C#-%j!jqO$`50 zv!lgx2BuF-*q|mv|90Z2rYkhS;hzi=?k3SyTeWc@J+t6GmsQP=|BB8U|G29OeO9|( zLR*D5=2Nz+t{}mu2T6X@vhy)Tq>ED(h?0~g5ySfF<|XR8h0~Ix^6d+WccuseC|pbk zVVuFqG^T%jX9mNj;$51+@cMwKH_9}4pLu@DO8O3%=HBjA7>&>;UG z)wLxqMMuv`amKAE_jcWrOd|F*=F|_~#!H%_9Gs?;yjg%)B0}ANl=4gLviB1jF!Oyk zZJyH*E9yCz#){!5OMQs(-UOY$Q>pAX{NW{!{5g~)W=u#ik>QN}BVfr& zi%)=)sMs1zx>>#ao~K7bg@8X6b`^cNUfEf#Te~s4fyQ)eOA=HdjABU21o5ob8`x?X zXXe4SV{XbZeR0sRWnXFl)yJz;FzYEgzCeS+aGl;1)EY}hikz|B{y%MW|J5zxW_b1F z-<9^aou}WgwgLi_TM3c$pwf~MShUl!UgoB+<;aanb7@r*sMJkNf526T+zjaB9^$J^ zo8)T?0-j5Y$)|JjR*kKU(sr`I&p$bpaVdT!K}8pADw1{AGbP{Al>@VTOeLqo)y-~E z^*cgg9`3GlZE*pTg%iKklRBx}QpE|KAiU>7DOZ>7kY!*tpL_`-K|tQLMDw9;G^*sI z3dRojvFS3t^!=@zI=qCmm>&1<*cZSt#lKR4$|fq4jfvDd6l%RlY`kB6MQ$oKlgh;& zE6$ex7r_Dsc{1zI>Clu=AfW29XMri5v)RrQ+88^dadwdZKSTKiy#FRvB52f zTXPngA#&~eR%kO_lP->^_onp!Ww`Z&lmrNnNWCyUS0eJQFFK;F8=r);9|WAa+DaoR zypDN5=>g&s`f|X$>%L z?Z(d}⁢(OEzJiDyfA!LUJiSTG<#>K>V0i?r}!;e-}WH33|>^1UAq-=wAhN-ZfNz z4E4|`FR^}xum@zj9z>m>h%WCOz{jo4tqC35_=9?)PIz2|dv=VaAisteAP&q@Lrknf z-C{*7*Ea>5^$o%ft-f5TVqQ$#Q$7u@mEdI@j8otWNo@2C=}3`KUjeGq&Z+^gj|{Om z5QvY@|0X3rst9r61n{?42a1J=Hg{__`+aH!fKn9T06DxV1=8|ads^D))-atQv!lts zq^H8A4EsT-#``D&z0vqTeSLBM7M=Pc3}qI{=Qd@{zgK2#4vYTrK)|5-Jj!(2p2Gnd zhtW7TVlR4=UTDNdYP-s#V{iQ#s&mB?72CK3-=vk9gIypJHFX#LhzX%gzZ+n*mToob=r^Vc=#9&liV@{%@@}|&3iHuy!5qF2j{BW zGf4O!r*jstDv?&Wo!Tb*7WoIagc5; z-O9n;Wj0O`^SVI%3}`Qg%kuD0KV?Pnl;y$S786CTRdOT(mIZwp?cGit0PtYyRzi@N zCCt}S*8<2$oKDG`aYB;vn)TDWOwRS8Li5qMWJ9IBF-w`*%>;v{Z2>v*m{>vh1~JsF zqoGe@g1sh;kBD?cFB;EFf0oz6FzA1?F@S-!SdZ^X%DCl2SmyPc? za16G<*Y$%(2nMXqhiiM|-_B3C)N9URPgcSdLQEa0aR_9V46rPWz-jvs2{q_K>Mctz zLH{+P$oc}`wCp;JlzRKR;&6w4?e8vUm}0@y)AK`sw05Hg#b*QlG8e zlFtB|JpcU*4oCl;EW0yYK?<#|Ur+oVOBQmHXW3N{-1N7brwedB*9^T9Ou1`}XU5VH zIK-B{GYmCSar+;5#E1lYWV~VWcq**?i?a0`7*W{?&Efk62V)q#?&OZ$oB{aY zJh^6m#2_WbWXyY|7b99yAZO^Zt<6{}#oU$INNCV%akodJz!KTQ*<7q&Gme2hw#z^0 z?67S~Bpgw~hr%l*^h^K00NG{^vpG9hW}sd9-ncCOHs6ehFKn%cY4#TskCs73Fp$8+ zp7_R3PG@C-Yuk#)7Zz=X+=ZMa{=zp3E-{0EZw4JR?%j^D4nI>;l+0ZfoSUjd{%IoQPYO8dbb$^X+B#+VO(suKwJ43nrDZlJj(z1 z7J0TI)?=@Y=tIFJ|2*()Tuyyn_P()#+`PUQIdqu7*nlHE73zwO)ecK>$>T zkO5RPqX7sZ7_L;pQp|A5by9m;4RR!(ws-OH*$S6n5J zm-1~FlueRHh=^=P?<I^lAmB!IF?@b$}ZfWJB7QIT4gU$t5>tdMZex=ATi#XSJTAi|S#8eD~9 z9;xH}vgWu*gsX@#x3oa=T7_+izm6n9H-}_z5<(j~!tJJyXw-f1Fn>QQw2&}7YT+~m zKK-z5)S4;KSW9^zL*4;6X2+<@0nYCFXSH9m&+r z4&D@721$5{N*qJ#!kcszS9$xENe8Qy=)1pkfu0 zT2E_XW&T2Km{}4x*^vl?W^m#$rK19ek6py5h7mdz)vGU86{&pg!k`Gy9B!mzI3#)^ z(?tWl|0zttC4CR!ZAGohTq^CvtO^{828EFGk?LV;){h zYHo>Lkf$<_Vku%r_JoT}(h+p0HM}%{!uqgGX%jufMI9#EP3L`lsXc(8(dHxA8o0XJ zh&8K_yO!j>J5cT4UOlM_WaTgVKTHCFXElm<%k|Fkt22kUy&#J}KF775B*iT*4;RcKqNFL%^HQ-L*e&~E z#s?HLblF>D2s=%AK2qjJ9i~$AH&DX+E>jots+?-w8mNJNOEukKRlPNgEeg9IiI*ux z69G)VH89DX%s@U zJuZP5_&2F9Rm-P8f&jq<*pOSd!EG#e7#c5k?ckQsfw6#f5hfpkGJy1~UIcX}Mqp`%2dx--^UeX6+Y$hxge z`AC+R3$^s75|#8nNS@{SP~U0!ZjgI%iWVqmmO?H_QIjHOiT*+n7pf>Tg1$VSFzlUG z_~yVD@ww+0TaTka)o95BmB80E9!5_%)AbIA_wrZd8EOwPzc<=Uyo7f_54-)ut1fCl z_MFsY87aJ~2~t3A!tLKu?t0IjZzKRb2vMetgGyew|MxYN!$Y030rnw8lx!=9G`^#q zOO<6B;gaP+*IlYfg4uvIf%5u}~)z#-E_1fzH+Fg_FAaD2*U^ zjj0^5YG6&9YaW`rW|7_js4ex<0DMAL#1$a&k~1$84-#JIkBntYG{2PZ9-X&?-(7qt zBX$>tA_d2DEGfh&=FaVIpN2>4Mo$M$e9qxlWU7)^Llw{CIlW%2AlV z7~!V=hOx`s`4<7s0z~Nu7@@~Db-1y#JSz}5LB#-{yKv^4`T* z{V9&61GH=IGsUAmDCWJfS-~2wyM0_uw&txruyARCvHrmFPqeoCyVeo=uAG6OEchq) zHMh8RAlLG%Rt-47bEeDYIizi2NYz9fh`hOO5YHB7_67tv=HMEQIZ8MF#IxYE_6+q% zKbMERN}WTV`iUa8(<(+K+iY@)K*ek}Ga#|V2~10Wrn|^^8RD}KD8{E|4~zOwi*Vnk z#@Z}UMFXVr@xojRAk1?aZH*O9BuD5$;#Cwxpe#3$=UExTwQm0QmOP)gumB3;G>rt4 z{;byeX}yL3NfyGDB#l`wD#giy zXCSLK8C@dIU7`q37CSUKJ*bdBy%w(EM|AtY2TCAebCmcX(TgO_GJQb#o~SnQjGy7De-!1#ECx){=h z;`BW?me#}Iu3(a7_06kLDqp%qRN8(XQ+B&gocP+BQ(R^dZN^fB5uy>kcJb6Jsopmy zMF@1LU!NGvkd5ET9H5e)C+nOm_LeiW^*p+x^n+ThQ+Hb^8cPEP3|T#GY6r9VJ)kar zfKc}UMsUX7Q`?obr!yjnP`JShrr1jf>u{i%7uizGvx1t-tFQZLK)@4mD4Sr@h)i&TAP?+_of8;_6QoHaVh?$w%kaYjqTqFpwpAD^sW$v11}@0V|<54nn!r8hc=c;!D|OuPuPy0~&#n{E7zBU;ZLkXTjyx zLO$oGe=6^46-mt9T=`(L8^^oRG4hH4u%RzjuE%JT(v{Ygm*H?vEp<|N`hlY5dIvkx zBw9idxZ0LTnn_?gq|AD5E9uh?GyU&6U6m(FKxq#wq} zRU*9OeqTi)W4Q?jde2#yu)~4$^>bKB@2%d$d}-JI2pZHMvtmoh)`C0#460wkvFr5eSVLOtP;S3-h%4GEr!b-<%=0P`NP2h6DqV zBk;y^-g;@YWafFTA(quZ@$3SVNuJb^ry;6#M%cc$&_wnd7^%b;A`NmzHFQYz*;SWg%lr8%-R3l9 zH7c+xFlbrhgwdyYk3|86o_08-+>`{&NN@av{=eJ4U96~B=3`mhv)*2wQ$dY1_*=)7 zNQSzraIf?8@6rL#OKHuY?gFom;#g=QmuAiMFya2}8BCPr&cZ*TnV7grCd{7{QRiF$ zijzzda{SZYw_gn41fWD}l@j0AIK^sY@BK_#V@5t}m-E+;l$!`yi>u*z|LgXDT0Kd_ zOw%?m2Ef}uu3*mp*UQ?saN)ZboIm$is`k6HTvIDbfk<##$Id63F5BoCV;mg!QGZ1k zy~n%*E=XB)Z2H5=@C?yw-g!v$qOm3&$UesbJOtKcW69H#6L?pQd!t8z(%CA&uRd0Z zf$WZU#lOinwf`X4(wg4=VbcyS;C0+cWhPSc9gP z&3;&kt1*#pk=IQ4u=iR#x0H!ja?2gM(yOdP5I|43LO0p*tKGHFnfKIT55_`1l{i>V z7`j$3dmyQEjjL5H3dM@mp1Lk8Dd+Rl*q5_%9&f7-cM>uHBPC6d@t?jy{h8Erkj7@A>@~nAaVK-gt1!% z^rX6eWxzfX(iU2L(m@fsmca^zfnGU`q`9@|Y=Bk*#W_q9o&+_P0PI1#%Hw+~8=E@i zYe#fRM@&G3)4Cco;}t0r#C6DXer39BFg^OdJf2HT64b+FOB4vHq&p*R7r>I}Xq_uh z>9#guO%M9x#9dGfmR)&k-Mn5Tx~0wei`3^{Ln>r)$1s^u4craE94bvgbqS7O8x!EoM;^F0I`(Vxq)Bd&} zy5v`T!rDFV`dp^q2=Qcg^z%teN2-td+s1ovWVf{uHO^+=d0@fu<j_<+9s2_ES5WVv zN3Cg_t8}ZLFB7`Ltp`Sq%c}`$U4A=mfAeTW%F4;6lg7)x^X(AOCU=5%@w#=KD<;1^ z?pg+9Ab0WvemSSiR#@$I7SnJFmbb}84v9cJX@F{)`7xEBLDW(+2<0y|Y*bx`rO@G` zN~}0~JK)|z`kYJM)U2JG`a6Oid+$!DqzVOUk*0Cgz3SOU3YE2k(fIu4mvwr%%L{rI zXg*C_cdX@$R0~%&K%R=+tb)%}(KuQRp9tzPQFiRH=*q(tLQxMX6iKzmUCc zy^bqLn;HScCSih6pw?1<`lJIVTkRco5W`IqOYY(Yb$N$buW5k2g4x<&wn~T>=qb={ z_T=Ts^GUfcVO#9fnn?fkzNm^n9M+g8`WLI(VZ1mrE_kEQou2o0D7&~1abCE(UOh~o z!COCS{)v<__`St;Y!BSU*Z$E11GSS>I{VXpDmhxk$1d_2?Md`oqJy`{z0ai8Q%ubfEo~*nUZN>x0u3Ni5&)1-5u(qTse2*eNrII7AShV^F3H%NWoVb zj1-th@pjFj9R!7M{&n+}c7pZjP_ux_oh~?iJr6zfKd$8 zP4xKAzdDrSdNwE4HhJRIl4P~>?ml42@*=imL)>7e0S1q4^$pyvk#-B3Ktw5mdoV`A z$A{m^C21Of{gDb#7Vm=f<|A~z*8L{sYQIEz@7^QgdD%^`ZSOyNw)E^{X{Sdu0PMLXi#(5|J)Tin*5I9tAb- zv$Dy;QDszwNQxY0g+9mcNWtODGI)@)@mi;Uc~;`iT8sCIvxE&<6hiMwH5lYR5S=3n z!y9ScwIBDrrmwZGKJ(cH=#(`!%BYIKM%Ck%m*w_CFVt+GQ5pWgR<0EYP9lZ!e+p#P zeh1B(MeDN=u1gJh&D&+KKK?C%p=5lR+<4IS|i{mvF_kjX1~+L zG|ybh0B&&)h*hP3#(%KoLVQAaO1%$q`U;L~gu-88*6SnweV*0^WD31O`f$?!Hb1Qm^E zv6Tk;A0icD{FkkQS^URm107SR=qRH#iP%Nnc35p-1 zuir)~-Iz3lhS5~J+5{6#*@@@eJNfMEhAsbvss5*~FC(t&rTdY?5;ohpqkdKvnIwJ_ z!=h{af!OG2&|NCcZejhxu%j3J?*PTKFKZ3ORoI%<~=Y#Hnkped>#O7KFX1- zbW2GTP-!Bn&JeEy5nPBGR9kMA@0tsY&d^1}3*-%cOr-AKNtpk(>8F7a1m1kjZ-5AT z$PvrqTM5at>DphY6Yl&sP`D3zSm4Nl{$AV4*P|2d9%(jH79kDo07I|TLgC)P$fSzY ztVrkcBPlaR!G+%3*A35Znp17?LJ1zoQsLJo+~zJZ1Vows#Ub2KS5C+q({U7vOEt2~ zI_SE}H@fdRD$ULYE))N*Iq*RMx^NoyUsaPrbzMA5>>*N=0JnooW)7b4w^Lqey}Uq} zIvaHukASyF_kp^%v3QFgWKRMX!k{H>;+%@Npqu)rlt|>!(9nckZk5 zdv&H$+}RF+BOexrOr~1iYo>5G^V=eleK&dNjCjV;=zok5u!g3Hl%IVcHLM5R40L~7 zz}JGpDY^VCs5ImYT{?a9MR)-g*Gwm>V&K!5#;cO>DC!h~R z_i1b8gZJpOL!2Fx_K;*KtXGuID2z-%6}YI#s=?5w-ACuC{RYiPWr=oA*ImRiwiJnM z3i}VU)O_F|O*zCzzOB{~z7;W|uYT^0Z{!OotCNTIHP z7#`}z2&b#a(Ei5b9Zeh=k@pfj?2TsyjJskr-3))O=PFR)&-OH zt&>nqYv^8Vm0VZb%1q^g=Xt&!T8+%%<?{ul8i{ z1jRW}cPA6O%b2rg?LW?J9+m9C4@E1>EhVp3l2R5^nXt&4Yo0_^$*AGpF+kC^pLT`$ zPfO48kTZTxr0pXHeU5Lnc%3Ezt@Gc4#tJL=cB?+RLv<%P?8<$@n)rs_v(~AnO!u?i zn5|#AZ#6^D_b17W0{LTFI1$ilwRbN+?XWq;z=(-g_Y|?h2h#@wW_)Q2&hwCo>tdrs z0R+o)N-s0cIac{6*(E;N2&CM#8(s*$^ckcYhtv>fqW?}pQ*8vPlEL5e zWdPSU=&WI z`EY_7pQbQmx<&TXVu}BT-zx-4Qup3BoZs-0c?G0hldx@Q7g34MfBUGkwGUdk)BQrp zf>J2sGyGANe#pK4Y@7_|TR>r>xvr8C@M#;N1<)&{W)(Cwrfv+t4;$!*FARR8DGDst zcDV4Ai*ltyp`ELlIx{2ro=vLG*)IElmgc7_i2d@h^}jj$KOz>ju(Ua+8A7@#QKdGS zrC?ED2qEYUY~c2pNyi&e+y>TmCpreKZlFn#`(8OsBY2z^&v^~L0x{`8E-Jr_3mFzV z7oPNR0OiEn4I%SVmX;!_-o{yX;YbrGx9GM3iQL)-QTi{cK1Cp1FoWU@eP)z_ z>2O<*Zxk_(n~R7+b^Y6zJ9XJ(A+V-`DCvvVh1EZRe9Lv1!?UXM zijXV&9p-rK6n^U1Ql1N;;=;y7#1_~gzTn2grUodV1aU;9jhz6+mHzq%dY?eLm@c<%D z0E?FsL+A*T?uBP3@--g~mf?8o)__b2j;7=lxDBi#SI7F>$5OugQ3k92DVH<(_C_Wp z`HVbgG~zH~B#WIy{seVZfbjrDt23D(v36pB&b@c?P*{awAD-yGMLCISBfYz=G4-O& zj&xEAdm#V*D75j=h3#$y7`~90X}wUSAA=>gKDPfz90KN%;`;6_Fu-p0Ro$m~t%=Ra z;S>`^`%~4>mKtQAQY0><@7}rLcBsZI?BCo#oVZvUEI%_e+TXadL6{D{O!MBPU7RA_ z4Y&jMT#_ccbWc_|ZgN;Ki3EqF=_Rb2+tPHo4to(a?Pu%X`<^VfH2I6dFn=rOdKJL0 z=qJ;Kzy={R$=+&rvhj9&MA?yVPY>tle`-emt~XR2SJ zqcU(8XKGMD$I~y3j`3`s)VHXl0dr#y2;+f%`+7H29CcFM21!~pM%bgkkH8c=FG}Z7 zubkfHcWU0TJV=@OKdI!LjW9VQLJR%f3koq42d4V1$#`K~%Bj6|T3@XYr`MrP!OO1~ zpE+OvyWwH6baqvRpD9U^7~Vv3^@GmDW&;EPIAGaozXF$Nh;`JM6C3i_kav6h7uHgqATF6o~S*sQ8|clJj<_nu{H>jBIc_!vu-Lidqo$ zJjuPZgLp|H7)Kq|b}8cl$3NO%Q40)#y>ksLBe~y&Dh?4P)P;%RLnOYNN zjLcP1MWM5jEFppWuQ%UvQACzed8Wl_9`$YM8EKnxe#TT{L+5%3W1h#36A+x_R4TG9 z;_#^GvW{jMVwhnP1n^vZn-1C#ecpX~HbBt5t$U?I-H$=1m0{n$zZb`pt4B6U+JvxV&?}dWLQcB)ud(yGx#*xnZkG6fChy;C!<6 zg>gbENj;?$mBvu{JXiJkib)aGYtDqLjqf@v6hrlP(ZmU^>?e|@ zB9Wzh{kp4(wqEMDK5;@&Wh8BnD#fEbz>WtCpog~ZXnmk6+syIs7&Uc28f5h?pz!+& z3^HxW2$?xK-Sa6RO$PO>>@3J(i0Z4T`-(KQy5rGCgGst-KpL4ngfOP|Q z=r;vkEh#kh!cNy_IMTn+*(IDDnz7(JmujAOLxO|mjCLY%`~Ft3aLg?*6Fe+$J1hO~ z@J`acWpaYLUlM~V(qM~px($BV&nklNXni1Fn-iFDtwEk+%W2mT z%$74VI*(mhZkp}A<&!5sio~Vb1SUxzT3XB| zHXr9QNVo(9-X=Si`Z?||<9ahI1nB{pJ1#tgMd6WnJr=$jdaM1Z%IXFLeIo+?3JyTw z^2nghszH&_PXuL=Lf;>}r6o<-|bXHuwzpvQA7lbXMx^ zc6G1>dC7o9Hh~YYRCA?@-?>by*a@W;%tAE`FqfmX`_#kY!o&DNeG?F8B@4FL$xJC@ zdl+m8FnrWL`Kh`rKYqi!G}Vr6PN;kN-ae9%^!Mti zX}uHPk-TI|TgT~&!DxB+Gx{ME{$0<8qOtSqF|;^we6_z{=8>7bqj~DDDPwHYs%{zX zj9J(a*QXSuSV$kTarkZO9`qi`=blExy1r5g)4f5XJE!+M?XQiXe-S%+?C}44;}Ilt zGc!*iuPF23V~dR^2ff2E04+e$znw3nf^Dipn}k?B3bdX(QDKx^zLmCJSlp_z?7v~A zxemEr-OL%m49eF+ba1t}%n%p|#uA<+oEQM4YH684y>kwwZjOt2oO?Hh8R|f@?JsF6 z#j&AlqJ499vmN|UY@8tEAbk0a2nGoG4$NAyK9> zE73KfU1A~<)7l^!%ze2DY}!xi3I1pUm~LB501GnH4T9Ek0Ctl(BruOt_1Wa@G6kKL z_m9)h>P{$nt6JOm-Lf8+hoZ)bBJ9Y?pEU}m#|al;41y6~eMY+1#4v&Mo3s-1bh^= zU8e<{{>>JoHQNiR!;V*}K@23#LTUPjE>jRCvX|$+4iDK+J8lBfuGG zij?0fUW6X}(kyPbGqaN1{pv2L_+Q2a|K2zLZcEjR_63avMAG-0JuFdOBmJS@#?EzkZmop05=7Ar>76*qzwH4(H+47bswxI zm4QS+(@B@d!KpcT;zS<#9(x*P?mV;1jv^ZCqjRE#!KQ$wKl637UPx+?(<#BhDLT>c zB4~(3qU#DG-_7}M|7oqZnv__vNbLNZ6F}TIFpD*Wqj|12rGATWuvBAefzp+#`hU4~ zn8m6)Aw*xtDMUMJp~?A$G zll=b%x}#yTVfRGKC+3#klETDjMfl})PR5R(7lh`BF@Fb^)x5NInN#(^rpQ|vq)Ai2jO07~EzuzxhQ0npN`1$-Q>88Rp8C!Y69K}>SSr~(`dN@5i+)mwuCxwd3jC+*xHp&he?yXv zLagMb(k}PP3p!h*K@b~nO^2$!|7`g+)Yz)@bILEP=z&mSraZF}G{Jk)ZR+L2U`TtQ zS!gga+T{;CH3~Z-Qo`?z7$EPun8*cNqhAWx_`YNEpR?@#Mu?A!dxnn9BqmX23<=L& zFPzu2*PvbTb)r=IzakAyP{GIyoce5F2r(ot+~R5CYZodgXpHRW zdqk)@n2)Rvq?IpeUBvH|eZDv)+fPxZ@bPNg4O0NcG+A%9Ym^h{{N7eUyD#SY>&%g= z2y8M0xr}o+wCq%?ktN?VEY83WTCA(~4eGev4L1=*{i^3-h8sFjbH!R!gcwwCNz{V* z@B=L-GbkzGVeKA%T#>=S;ijTsOo*8}5yK+IkhTGQ{l2oTviq8K44;Z|nUsPL;GkO{ zd9vZ@X;gab)`=#)l#sNr7Hu`aw;4wR}3z3I`j-48(U zp#{%ydP*V1T2m?;P`_L7gbw1{Q2$t3tsu6uF&jNL@&JConLl@$~lAgkOzaj7nF zOXy8&8Y+`MgmHRjku?1Lj7l0Sn^~xM+4Rp(VquP5wLG#Fp#Rulf+}4yM99eR-4Vid z^mn@~fuqcAaZDmBsRO*zQ3FIv>2wqi{YcjHisLjhiaaRGjKPgkS0jWCpE~1f4blpo zG@}25$Ah!vjYc9?lo|X$<|=#c=1s16K9u31U*iwPi|V$#yQYAEY8Fk%Z!Xi@{|bGC zy8V`krK*-_mS1s-GS!y<;Ybs;KA$hR{2|=_)*w2q)HQo>eV$!b4PkKXfTdK4E)OkI z@jp%liE0?F2yrzSw9nz81bg!^6>T3(vpn!9H$-alIZr>=KCZViYLHVgrKcwuqND3l zmlD;NGQ+9WdsqogOQnl^bV1%N?!pC+R6v`y_T9v!o6f$w zJk5oU-kO&5kCJ`u18vpetv68HkE3hSf7KP>*QK@5ZMI+NMdLHq;w%Ehwl{iu8c=U_ zHff!P4~47|F-Xj`mbjAzscB@pty1AI1@Nuh9klFPusv;I+Pxv+y%bgLZhp6IJAae%!IYor$avGN z#5#*iQ(k=S5^Gx422#@x{-gCV4~`D~#>o}yz;?xFLV7+NDR^Oug%D!ZQI2PhfEukH zw-MK>Z@wMqu5rScV%w&%lX)kA%{%OV)93;lRB6W_c2#_kuqqO2kzPK_de+TP)ckEi zGP5*gfe>xOys)relg!Lq_|aJ0VP&E1#Q4kaAREn2L1kCv$LOhVsU+AFnqenc^172{ zJvVsFwWivRV2!`3DwLD>Hr{FE7b3UsdZ1m~UH{Xj-U}_Djwq}vmJ~k)J>?-r2i%W0 z&mMup4~LWjYQ*Nnx1M&RG}s5jbn9!Ua52@G%%J*9BnJ5KB@hTZMJ<~7>P5Ayatpi)j(FCnGCY_o zyA>$ZcV9)QuWK*rz;T`b!!6&WWkVec9;UUhFAPRH(Q_r=X&LM z)`0_w7F@@(i5Pc$(G3_27>V%a0vI@W?9D2W%~lh|x)%$U3H1sek%{IU>6UaCSulxdKKUk+JCzspO>^W-Xjf(d1Sbx1H0f~3Y&cx;%hJdAgNG;N{3RwlD(lwU^y|qju!7e4Nk=U^-b%akPk&J!S*SM zQn5Rxz+hXE**}7d1rf_L9`=HiY9pHXjgmf{z&a^zOcYTp$hV#ilgE>zamtw!-$Z*J_gpS8Et72`@sV-~PZS7M27U9OZCPdg_7p8%pE&c{ysBWn%cP z&i%NO_f_7je}NulBNi$2KRTP1u>7Q@8-Y>=_2`397W5~pa*X< zQ+A0SgfU@%nkgj8aX{z_z8BeFi{~$`|00^IwNS| z4AlWQqp)m1_EXBWXpk+B-xkJH>Z{quITK2hJ%SpC#tBzx&PD~;0%|Y1zCsa7bVvt^ z6PC8tm8yCA?@V{xP<^edW&gA4P4s_4JF$QMwXXNUac^%Ely`!lh~X*5!=>!xN#&7Y zY(gec&mV${UE2(trJJ*8nje@hq0SXf2WTSV+Uw0KS~df8_WjrzwRvMDFUY#K@SJt^ zfyIIRGcl@H4`5m{fZbL(mk@j8@o1NwHbhU)BoHx02 zg>14iD@9SAQSJP8_BUSkkCNrjb(=hEkz>(MPG^FA9TIQ7oVWv`F6dA#;^qnVXmmf} zw+VYw?Ac5xXgE0FGTO(4hT1b!iBb`|!w+ZZF#{J9|M!h@>^+`5hV z+x&{PRG_;>;R7tBj1t3_Hkhs|?Us~WpU%Jslsj+Pl`9M3YKa25sA~n$0JXdJo@{47 z+8=uk6eRSDH;%d{gcJVo3=y!W*|(>F9vo>L^A)CW;kmQS_yha`ks1~fCElB0)J9CC)a#CK^93c;`+6`QQWq<|{kf=$lYpk-%8;0EKo9}J}_(cFnZpprbP_|o$dcQ#L2a{8!8= zp3NSKCWf^xiScB2oUM<8+LN!vQw_pzoGkg+5m%Lm%m~p;&oUR^Uu6`-(z`zp=_Z~A z;Th;)bZJL&o|_^c+q>P9}3dWM^(M3T(EeR*T%J9XC*9?Ql;HmT(?okzWTNZ>Nbz+H82F*tq~Y9Y;5vbOhJS z{&(7KEHW6q;AB&C#eu$rJ@qm|G^}yDb^lD2PbBAs;4o?e3}Xo&F*I~h-Y0!BD928C zuzDW0w`wTY2k&{25#AdRwbVgkRcKF7%j~HW^w~L~t5*+isFNWI`}&GaBqep6hnweg zCxJ*YT{&YSBunmt_#UNC810G^E`uYzHJvJAgE-O2`Z$m<#QCt?>+#$~s;6kuHOshl z#|~=24khWM4_UA(U9T+d@E;LU=)7M0@9<{lGv)Vt|PGRJE zq8b7A=I$Z77$@`d*fj&E={6SGN8+9T>e%Tl(cAc9Fqs)oH0DD&H^S5lECu(-2vwej z$@nlLVt%t2aA5`$|1?X5I}l`H(x_X=KW3z2gv#tA)G>QdM6GXk)soMqZaM`?nM~!~ ziCSLoycSHEAFGe!hRRXR5x$hRjiobt*w$95Ga~pg#+ZsQvvrpc`?6opiyRQyd9=0< z2B=FJdEy`2bhsIi#4N3i!p?8*I1hxS3}Zm_jNnMMbTh0|{J3g)^aEfT;2C87@b56H z=F3RY?pLG6h6HnblO0(IhYwo*k2+qh3_%!sYPRm zV*oFuEVrbPM0g{Hs!KHrw4{h!B#zj?Ul$(qty>QvAMJZXAC;3TySUKEl0JhBn*Y)W z>#um54wg$5(SYU5e&=ZXn0h4@<6w-AY1&>gBjYEhuK2zcsoHd zCHPVl-j(BeDY&g~<5NTqQ`4nvvHROhKnil#RzGCL-D1Lm0F@7DU(l~{ZF(79& zfDanjx~^F1ZQ0_Oj77x43Oh-2QT|1c>J4Yt8Ntr=1Y!4>{Vj9 z+P~0u9T3<4r~i4USAdS|&01+um}!5kz%6AIycx~SXzY99_Us#mC#KmYpNbRGl11qU zapIe*G}Dm!d2^hE=4YLgUx%Sci&1F*N6+DW{eMP=vz_cPAflZ^+D90TOhQ*vzcAOi z-{V*q^Y7ZDHODAQ_}dGHg=+WVydRl&uHajRC02sMsM0pJ=WSVw#oNPb9azl$R zSx6%O+bIO2smI@JSQ2l?_`E1;f3p~yuq6N@%1PZa1UboJOBxVtIwuTA*h5BM2YJ>C zq5Ken%*(csP5%~OZ=p1cH;cthJLg*A6UNO?I(F8racI^)5G~J@m)6N7&}6Qu(cp@) zhI`FbqR&eX=z05l*I8-h3ssX`k`%Q>Wh*M>Xq$45T+jkxrd{Lh0$(Y}HLnb10 zgiIx&kg}hrHlbf$labGi&1+JAz(GQ#`eV|(@w776sf6d!>rMWgCRJLdU4wu&!0QM{ z5S=kRb&>>k@ZOsl_Ko+gkMjALPWNS0@B8JDvSJw(Fvhf3yHw|&aeUeaHuhKOhu}Nk zE^p#`8c3=B9G{I;({ia2psRUYez|2TvZD%gj}MSKdZrd49gnVsf{1^n zLKZOo+%Cdc`o>tQio{^?r;c;KZ807|@4HLaP(uDjxhLCa2u&WA;P}j_s3+%t%?ZFd zR(a$ZG~}2i>Cwo1DSkMsPGSdM5jcqAqTR~~DsMP??Jt_N+(IT!X(rqrkEJ))2s#_klF^2zI`GUbc_)i zv)gVFD#Tiwx?NFgr^(y%x_TcRj%MV2_ajiC0H|*vv=4$WB*!57&_I5}INVZ*KLWI6 z4pxs&Y7HbUMNhV~kkS*OD)9E883nd_gl-XsXeUEfD-{S>&6^CYwbM%qWpf@Be9=4^ zDBDJ3c_GH)+FfNx5wt+U!da6<%KoHtUfY#>b9O8`@BKGb3*9nf=rZ1^;}8{rK@)1X zz1?3j@oE^XcaHp57*n_?Mhic)uMVYc!?W0Gg^+SxzdS(&M}1M@I%D7)wqB4W84_t+ zk^?Taw@ecPh0(%g;)T2J2AyKKw?Pj*l8K7sRGbJ&2C?xc8>S-HNV!=vHE@Mce%09< z!&!9V>cWKicW1$R;xFt8VjtE!>z)NV2jgjZ8FH&B{fkL$79;mM?k$1hOgm|E6o#UeqdW$}tp)`9L zXnM-*6hrQb5BzoTgs)p%Zl4=h@~g84+3t&DeBHYnozlo#M0 zdTvlW?9PU?WTjzx%Q!p09BE7)xOHqdq+N=)d5sCbgX z%w|OH^t~$(7UF8(F>FGRm=CD822@;RkrZ>P^?sxTu-w}O;g3z-p;U#pTT~)zb^@Q9 z<5`9!IxCqjZU>6kztYTuEA~&uI7H+A$!ort)|6d&I|MZYl}GdU#!|Swsw#TbPY@za z7q6Y?Ip^VIrJ$fs2Dy9S3DnJ(*ztjAz;p&|+mkONRxHFT?ej!Pkj4kM{oUnl(TKo)+Lmj0BTnG?{EW8HOqy;3S5tjLdU|NDOm=Rf^j$=%Oo&_zJq zdeRxcf@h+J@=nMVsGJ14%q0uf$9m^WUA~T{qJb~0$CcESR{)h(7}lj<#%;KSDt^=K z1ac+I0F{jCpS#L(_T2v{Pr?>;{b=k#XQF-_tm?r*mu;B2csU>Y@9BE<(@ev>dl?9{ z@Mea_#J?z+X|Q7InxeJTH8T$McU?=nB9caZwGk`5aVDliQ%z>+zp#|A_~6h}L&=HA zM=DnlBcCHPipg=H;2{40I@&L*T+WAuJ|b~O2R?&Dvu%5B(d?^Ty}BJjL_|kIZ`;{1<;Au1=Es?-*+Vv``y%)@ITDk2WS{qNhYg;TWlqZxWp zYS!}OXF9rIgu9t_57LY7ZXm1?Chz%c$BOjHpjSH1=5#iEuuGt{T1q2QN+Uuf67&@3 z?A2*K(y0UK6lsmY=e@L&Q7^=DPstJYoyFaad}?7o@m*5q{%$qMkiC=xF0K@D14=c> z^?!4KB9~Tp`cEuWK_&}_=pl>cq|Uf4j6WM_i!JcMCpRnro_b@J-)ORx&8x9O*AlOj zoPlJe^Y$Y`d!9@5ouEz8>Zv5!Ze))NxW525L;+9(%U3N8jk9re9zzF28L&~|8P!#U zFho4dePWZDwB4Yns!&%uxhE3aU`(kfSV}W@;NTdUhkf1lZ4b}^HrPmL^*Eckmh-O& zU`*rg`VuAqx8g=n+1Op<6-DfYfW@~-$*jU7eN_q1ldDf#N-8DmM`N1ARNuEZ7^%Jb z$NXoqtw^A{MTq^gs*@IJ-S4?!t76bzI8I&kfcIH&aV^WEDI0DdfRsFI694(xAVF=_ zoL>*acuCRXQ-%J3iCF*sFKMR~S@mtfoRu>Fw4ruQma)*Zg?W|U{iyJss~mYk`*sZ^ zl0fGqXY?`V5l_^61&5oFAyjs7^gWs-_gm-=qAY0TrbFs=TI1+@V$cR z80`aerSXoZL_~j>b=Js1kAAHBAF)(PUJt7x_~ ztN4m=ourI>MOq%)6vh1-LC{6_$?+4e45|9nh<3X*v&tRFDA(nNh5~QC+6u_y@ylKE zPihW9lM@VI=8`I4y0I;CSR&tctTv^^heXUZZgBx0pvoco*ehg$iqV>*| z=sKPfSZw{U?hKYA?Lh`kYL-IjGi;-U|JMuocqGsrOT3#@d%O9S;X9s}!-qW|ey6kD zjdx(^zAB{a*K~C^o94~U8`NMY4gG&9zBvL=EEv)xVai^Ohx0FH^3DihEm80|XCu5M|Zq8Bz^&m;8mB=+2aiE4<@6p_D~2Q=}SVG30#rZWsYeI&<%3!LKAK?Mrvl- zu(_Ih{y92f5<&b|iwJp94TEGoL7t;^p>bH|SnG9bb%@I|KDZ$ww~33_CG*pf!))$| zegdVg@C*KbbOMSo_5!6b2X7Ptz|Hjsh9ck5^&Tpi5{HEF=ZH5I`;;r1&`CMc@hRm- z(_7a8X{4nAaLy+w;*EWI|NO6W^4HHA_>#QMjy_W#IdA-vXxbUJ+7++) z3p$UIPXZQA6eoeM$6D0P+01Q4eFUGJDLBWVLh!yp#zN3+x6TGTSF z1mwy0a2w!Ru{_pDlDNX?aHp0%rMmi#p^f5$plItHcs5w+*Xm1MgKN1 zZ>A|k)VxAvL8V;u_G$z~LFGp%lw{UOpHKPLKqV*eh5l8ko(4woJq_rIAX@%-^;%aK zfkaRn?MlzYvC+hoE=&%0fp(kKf~WZ+)fe{Mbq*HM9`R+9msyk z(d<%rZ$yUuxW8#BXf6-0z{D0#0#0&hk6ATpVK{6$qW_cxsWDNU)o75x;mDTjbs7p~ zJ*$SD-0}}~Sfi8fqdDoaox@@8w5ON1?ly?RIQ55$VufEo`${YBNynUh^Rg0Hf)J3M zm7$k<3p+~dgk<7VlpWV9v{LAm8aea()BgGn7XydL z22!21+~)rB=q;_eWCxqoglz| z7sn*;8Tq~uA3rhH9j}gzYZukQ{RSKrDhJ*X>cLlYqXI1KQF#1^erFks zfS6KzR$Y;-fW!`ME>7_Yo2^S1 z2K)sD+EDuLRJHkuF|W0EA$)0m&WCOV$F0Q@cgvZ9Lg>8YD3t%2fR*I!TTytU-47^# zSkG)H$>E0Fm%X+<^AHh>WP4bk=79TEUa|#9Q(`Vfz z_g4_L*@-WUI!MUr3Cxre`>L&`n*46^50khLIPgeiFf>b?LS|NiXnf37hVAN(>n$HG z@FprYLzn$Ry9o9J3b4xmUk@n9hS*nvv5SX#H?xkRNAkUlb{|s)J@bz5mi_Md(G{*)#p8_Sl7yv|q3ZzSNFaIbocDK8bl7DmWAavui76+_Jkvz^G1Ix$9` zpepK> zpRkjFf+IEi!2Ivu500#Y6JdQvL~Pf;@aE9W-}#XMAX`RfLgn)#gY^-Ih;k3$zu@nz zeQGd$Ysxo!*E3QQ3I7_7j-(S06Su~F0L%UP-3FB)-zAy>BF`offVXz$ z4K=?qpBzCv)g&pq@nQhYbk9yZYb&VCE2Yi8?+_RUkF{VY`I*)(ykUK&p6hhtbf6=H z>g|B8s|LIq84L<8g&rFO7lL161k^-i1q#i7&licqw_lc7DIWP$H1us_MGV(q<+=D9x>&)wHX%Eftq>Ayvz7k3TZilSg}>Id1hrL z`#tg)7PFa7-KV0Qi(~g44q|qGkJ-(i3P>`C7(_8|;}R$V-Urpvzis7pgrNB!5#Ab* zzeYfQKJwOz<)iwrx1cO*+WtfTej})RHkD))k<^?iusL(qY9wu=1HR>17p3!W!DiO{ z9bOsdn_0GX9dxXT#F+xfVPWH4LMN)ftKXjyW*s1NV~1djS&MW%3&^>O-loNK0jqewm-()s%W& z3{v1^+iRo6tc2XK53aKlK9QnRamBmr=%^*SGhNG5tf852B|z+p3Bq@7MhJYK?XSyR zgHGW2wBm^lXL_r_5uL4-W~oW$bUNuWtM17us}->G_y4x6k?70sJYoBJA+>tumC7sR z16yW!qMdI+i$JqJXDwq)cIJwW^ixCLd1?8Tc4dgTP~p9`C%%DSU*t@hg09qPzSOB? z-4P^pxe^Cu7HdS?Zpvx;hAGxu1rh}uWL+0zZHNOQQ~XqBF3`BJgz8_Wm%A|9WNJ!= z59|XKkNYZR_`g(D(vJ*qE40!8^!?Xc<`rje;v`HpW>QRLrQxbYDtyflk zf?1R#e8sd|F4#okk>c;#j9AJ41G<|YfiSWylfC7si!^I{Z`;%-zPLy;j$g>Wr81%G z{0G5vK%}8od8<)mt9R8yx6QCw{N0uPe$QNA8+q=`Jm|Pywznb_ z34HS&W#usA@%24y;6T{o4%|QqtnB5jzky_mF8yO7JkWMS-ki&h>x~MWDHSj3mAIr~ z19>G+W@~YAGGxoyWbfI&HfZSYafj3dA#guS)l^PT%if~^kP1Km9GIALMuqy^`M1LG zD>U~$65t4@t?E}u@X6Tk_5dbU8R6Y5DW5FKKJ(NbrYNI^EAQ*y>YM=_} zI^wRS=i5K;CS=k_AHqcFRb2VN0@C8(RQskupb$NhoST{Rwv9{Xtfo4N(Xk zuE(jYEOlI=dezGA4O0{_fK@-SV*uJ&qH>eoR-yn%M7u_2KRJgBq}8E>J6jW>eBR*K(f()Z{O47oD>__g1V8j z8L@wVE~Qb}GW1KyZ`O#$GskE%{`MO5`pfL+pYeVc!mQY1K9F1NG1cL&|V zDy1xO;GyoRAap)D*#eLqbtRKYGDW)#5}p**?%f+A)1?`E-@bdRJf?>*e!G8~p~AB9 zL{|c0LAQ|Vb1R%jG_#oHcYY}zRaOfkPRvBQk)8U(Dp3l_rrnSx31vp;1;DBEP;V^w zCIR9|3=@_0M|w2I9E`X04cF>Bdc?>&MQ23|f@60D7|_oUI({01-_@Oaq%nv$sUn!! zg7>Ec@hSP`$^s7ZILCDLhC+Jv&U3OXT6?587}CUYoi(h*@{C#ie=1%wXZlB%YV|iM zaC$o}7QpYfZU%1mH$;x;NEK# z_5x<^PEpo9!I0Cjx2i|N9E}_oHT&^3uv@knX7{^;qem}o^paU@S%{)8P`xWv#x66F zS3Io?&kTHAnLeho5Ked)BbrNH(WMd(9r((%+>Vk1)o`0bPsPk3gYV+U zEK6!4u>*Z5D0YedW$s`hlrlQqgMrczn2t}tv(`pes>-k3+KNf$44{bGmpI?$Bz0%I zaD%u~Z-ZAarR!s|+K-_QYV_$$`oFDQL4%6t*@;FZHWGEQZ}AvE**Y z;R6Y^5S5k78t!Gbrnm@A%a<9&zL4zqT|l%r9if$F63=>WU=!yD61qO4`yIBMxV|8Ec()ON00(Qsz@F(p5 za#$`e>6>rBqMjo_-3nE`YRH=N|$sE}a# zxp?24QkYr5`0^fdE0Hvy@*oaI=0Gn|;J)YdrQbx3>$iZILl}5eCA4lCZ&Ggb;1e}; zRjZO2ESSfLDhDSp{*-us?9ULOmf=nvF@w8)A6Xt{=af6X-{0#Y zJGB9Jb?1v8aTGE;efjR2?GVEyTG9u@fXzHoMCDDrY|i5C=|36BJ3nGE%k%Y!>p+Bk zN|idg+}U4aiaGv=4cAcwpNN@8Qp+ABBYU=^dX~^F-rK-14crUotOlr+ zNj@$Mo%EmqP$*&tttWyp=FHq+p=9UdXU9@rT>lEVC((zW%VJ37*FWR1JkV-V?*d1l z?rmrc#645Xxc5eu(A52vb@gtwI~F@8%ZwHhpded8lBjH0=PipaCQ&5I%o$EtV245_ zJfOh(aqZ zr!nivV^&*g!aQ2u5?}adG0D4Inc$OX(xYhlAk#wukPd)gL`Wya-BS2I=Fv8LHH~*& zYXqL)knmWU-)}?mEx4i(8HME8q0gsrlNR=0I5V|nfoE|dj(d{xD z+u^n!oF2e3mwJ%iMgogGOI_#>1V&YzVhnvvgh_4QOS(=*Qr%xeQcP~G0f0l&i_)kY&L zh>!r25+=ZQJqmS%1?wtN+&xl|W=e_;lk6sz-^+$cDv=K4{I8yBvw2|aP zp`I09I|`Y{Zvb9(H-ozDCYQ(ZeR^>@%q- zLb88ax&ME~eDWfn? z!?!<{s<|;8d`-sYn_hQy%=n}5UbD}M&4It05^P%weeDEb)4j+R#+)3JRl?){w=Dni z3HK9&H;=gBTyJ493nji?c!6ZG^Vdms$wjij@xu&T(>$wlr~gN3{r+aCHr+B?RcDGx znczS>KYY_G(}c?H!%{C|nj6JnW^0$_1_t9h3X;Z$cXKbCGa&w8kYmAc2KIR z!}ewmK^5L5leD5G+4Tdig*lcpt2lz`kMAj!@(4Tl7y-yVC{tVLEkEy$6QAXq2Cy4M zPkB1I`A(?tx&)aWZ0^B%r~?T3%dhE#_M|_#j|S^%L4nfpuQLuJmBwlhPZ~0K94+|> zz2w+*>yxB?gBIboR0QA(U6SKY=lG4*mqm|n?t_VQN{*~S&{a_YUL=Wvo4%a2eeqzAJ(UF+laNg!jqw7iZ}y%PRt4t3f9=`(7x~;|p^t(j!$9GD zTavjBreNbzB&B3f4%3<*qyN_(ivjyk0|_7aa%+_T0CzQ=&cCZTpAl#$aI+AoQvF_{ z+TNiLo|B#u+XB1vo25;l-RA<$!9`BoVpf1$hF%=QUF8a5-1q=#Lyq5O*4fZ1GF7l@ zA6$~CrT#Xh5}`OCOn14g+M+FU-?CO*84kayn$=Pflv2w|^$_4FYh5v{0<2ke(c@+{ zAsRc{vNnKCiW^vn31}8-1@WlHF^Y~Vlzt^YrMMPfmT8ge>z0+z@VaBj3?H98>n zr?G{%l|ga5Dh^Hp7C{k&>LumG^Sr6H7z%?Z6Sx_6vq{PE7}~^;>WlKELBvKBkM#Ko ztp;HBbMU*);SI2>gF_G-dziEnaYl$uf+OQ1J~YMxZphC{y-=nK@wr@gksUmM_!~d~ z4wjcJ$r6#(5aWrN^f?QldhBZ=U9HwD#>eF;Mgp6@3utC2;MU?3v&^WON%6*PlH4w~ zTYbgxiT!jNR-}|KbH7|SF2FmMTHk6)bl16*tee}qSl(g;M1)Xav?hN7TC9nWGeke_7J>OYhIn#ijJG=XwOt zz$R=fHh;ZJ^BpL>SBVwW@^JpOY_!0qtmG~nh(nYJe?<5pfLGV?KABsI#@7qH)nr7aZ8Wi>Q%&V9pn zzN5KH>`#dbtdepQy~Y%!l%SB+qcY|&rzHCzTTQUOrE;B8RhR1#3DW3svXhzZ}+*sTzLHXU%^%b!@Cy)v7Xpf%w1^IOJ)ghJ$hHqP~|Bv)^1PZtCdke*2 z@V;&5ys}IuMfe?3e7tiz70RoT__!r8WXhLEi;LFWu)ElJPd*4VC))%DQNkjHQZgt6 z{&|MH+Q74DYLUP%vcD>ZZYE~MY{dAp^*Z*Rd5^svN$i^Rn*7p=JUH*!8o~nS?;`Yb zYj)!|)&m$j@bys!i4888h)hE!jTMV|`xolqUlnKao0>#}itv0kswfkcwHQmlICSaF z;r4%oDI-@sBVBHfJxRYuho0u`UcnV!6C`y3+Gl7ri_==m59jSg ziaV(+Dt`Dp`$ky=codn7t}&nNvK&rMw5%Dsk5O4W(8v+Ve3~r4EBfd#jAxnq7j!&p z_giCOQ{Zjnl5AwkcAM~jhTL?d_Q%EC#uwj*9pOdTYH<{_4>Na+bg5fo8ZTOjD|7>T zk3@>QKi2PpyLv+k>FdpL-(k`$cBDb>Qm&oE5SAlir z-fz?6ZThP4p$4Ee_>? zP|1IFYrEsLin8Cw#VmyhFI31>af~T-vn0%dww%XKw_3)$_JMxy)I#QAi@zF{+?S zcGwJXFto~0s^6J*Ny}|6N}f_)4OXo*3xWPcQDg6uELa7+%{>A~I0He+L;@HU2q#hw zL0m^EK>?|i1V7m~A4dg~2DD}c#2-Y_WMq7u)&%obU4oc(B^QXSL21b2W8NUti%GfJ zpa9Aj5$9XD7#^-ocvz^;)5RLr+ZvLmps)rcQ>Y^lz22r9O=7A|>lnOZcR=R7Qeow{ zq~K*M!dT2|;I8|U_O(@nW;}NH6w;&PgrWD5#gSDAUG$M;{LE_LNGEe|WQ2OWL!9N4 zJCH5Y(h)=;9o#jZG-2^GvS)uGw{1K0>M_zVy*hQpKc#)dUf#C$@j^H_Dku0 zQ5ihBGZEvFuy-P221~orzZ;?)w*Jy(c!D}S>idFimz+Hf+ zWPC;#lGA0XR-uv0N04}i4Fu@mFFp&Y@hjOo(sLp#D4s=%O)DjXjz-8QTwU&$>Yh@! zzfmf6A&W3a{vZaO7Al;`5IV>$YZ6Y}K2TEXw;XCG6?LIJG?!8CuW(IgggtY+90_w! zld|s4$^z(>$_F7}hjWdlMrR{w7{4xpuZ8gLvjq7wcC0kiOJqscSupS$uLAkuZufzc z&GL(8RO`|!n&T+iT^s@>ig0Hgb%=YUARhg(z|5*`jw2KTtGA}L)xQ&sGOh9<6eGNc zG~r)EEVG%`hRKku7kF5<0eM#I`KZiCpk*I43wP$A3D&Qr-tvem!Hc?h@Usf-ES>1W zSo5o(zpt@_5yzDW^GWubEx_!Sx}yYFn7MP7*l&+=vY?`f{%@6b{Dl-yKGUwETHK1* zNl4!Mc|wtCWTx76tu%G20|$%MO3Kn#p531lbyMU+MyUI?3#C**(A~RM`UM z?9;SdQAVZ4jhduVLt#;g;Ju}7kZS}OUT6F2Qu;29qm2mbHaAb&82g@dU-e%V7bhnz z7qqd>;C%g#mol}=tSO>vnu};tA*enxv*Xu`ynzytMVqPmh#dUgS2S;RgK^<+&qog87@}Dn1cRbRYBaFP8efzgZOY zCzLWoEs9hAr4can2foQ&(m+1Fn|0xI>I(J^D$1i%0&O?h_d{s%DwZy*uI>*Hr4Ar!OIm~9NXe)_Q8<20 zepK=jT*uWwcBXFj$-8(BDfyxRY}xgh5E;>11c&v!35c&um?Oz1%@*yFAJJox%qakz;%4rS0eL0-3R z5ll=UMgsuZktuLbG_&Ae1mw5>0C(3kn>dR zuons2(gTlc-&5Qw*9x_de>39e*ho9trX9=kb8cWGU9FKV=& z%EF=A;>hLz3NkW>$u64p9<#I3;wYuy-G<-4Ak8%j{i?M# z=OYxxeF7sQQehSLA83hJ?b<$yX{KH+S-1x@+=MHNz7A%&#a;&>K6R@kKeoDHo@&61 z=grEqnJF0RljKUni4(nMj2?B{SE3A66iZsPpTZ~LIAa2Zkf29f06gbPDtQJDYot_X z6Du!z(Q?G)FVmuX4-(@>w-^@7e3rm0UIyw+*w}5$<~@F!vwR_oU)-G|ddfHA>&?@` z3$bOToW|HGv^a+|Mr1-%6Nv4%>8v4ES8F_aXiFs{9i{)K#I!Sze#NPAW9fj zE0owFoMy({+06e&;E(J|%tat=vnU+x)pxW0ZR%w+-k+Yd?&Hq!F#RoE7;?k~635ip z1%9_EO~FE*dhb(V+-s=vRH@lpPCpwD@@iy-H{6eMBqNVn*K7SI^N<}l-M?43kAHPd zd)7r>Laafn95JXyX+pS))Dma3P=&YyiDDt}uvQ0LxM3iOQUXB|66crv)qdYH_Un&o zg7y}e0!zk$hVR-lW_%4H!ob>$kzUa&EHpkL6_22P73lu8T9xlR^*qo_`Fq^@vgR#^ zdyZQ{CG($b=?lYunai$nQj*c!GcERz_=TkJ#~KN??b1U6Rh18b*bfN9u_Uio6)6CB zN44msuys2Ocvyp(*kSSVi0WmlumJk8g%7C#>^%DpXG0h5iD<8sbAs7B2+ucx(!l#) z6gBl$MV|SsiI&)<&Lr<>>nu%@r}dosfzhyG*m5gz=}KF;_ZzuVE_CvAmA3OM^wr0o zQ@VZEaCwPtnyN*qfiiOnSVC;*m8W!N;gLASw45;w`4xNTUB_c$fEIY&PYSYSkPcHW zmqNEyc>pJAQCxo_k}LQ*zXylPM!IlK)CcFITa|B8C;*=hfx9*(pTliu;`>XCh2E}< zHLkW*Qs+rGFafMtlMw9>`AV6X%9B(b%)iRRo~AfCwd(c6Z9?^RD#knI3s_Znd1F+Q z7%cuKy@K;87gD;7X)&F2nw@}lBu9~fbJErT!X}$~>Q`mU93EVVHEr;7w#H;^)I}Ai zfp<+?ycrJN)K>!-%5H18L0mMN&09(&b=-153(iNy8sMO+iQs!aRPmG9t$T&Y!l^dy$PcNo=v(ysRKy5c%1|n zM-Kuf$6ySpSg?u1J`v^p#(GlfU+yyOq^|F{eh-xLx;Qp)8ztKgn6Bfa?^t62Y8qO-kd)r$$j{33S8MKp-=zE*X!@%l`+VpEIDy{g1QMuiD&j- z27EI16f_-c0HYf*(Dp$?x(+<_9um1mto@r|0Bj75*Bv3**yltyj+eEYc`D2z(IsW; zvPr5yGtOQv&$-E6KIz5KL#Wbw`&Ib(Ya(7~Q@T+fRtsVv{fdT0^Z&5kI;nmQIS+fw zTY&vslS9rB)O(TexN#ucQKGLNHX~zIS5-hVD?uW&8^5smQ64uxTEfj~^~#(4S7*xU zKF~=-x-B673v#&BzE`WtO}-%A8vD2gw=@9TD%!?lQTLTUK_3>x^ghaMX8Af+7~O%h zY{%_<8(UC_-ZF@|OqOyqr{PB&VXeLl+59jP!|3SykK`N@2aml7mQMjd9mDlns3*MLsh?-}1a^PlHV@=pdzu&&VuZ~GATVmo^ zsuc{pYNGgC99G_hIaB__-gUy#gQ7XBT9u3so>@7rd#)htsDv!D?dZ_xXgbA71KhIX zCrxfne_<({l<iKZ#-40*@Sz+3Awt83S<|7#h%%``r)%s3BgrDI@rqY=?!O6 z%*?t9DiGpdy?+M5+VRec|5tJ3{j#3PJ7q7W;8xa&l>c-#iWiC)4MiT zd;g@P4Fvxt1ZB4TKG?K?l>c=W78?TztQ8c)44m1g{Qk_%ne{ zFxYq4+uu=qF-A!}$NwFl`AdXdF^R=H3ezZ_4|+v5jC~4aXq>Lpe;w}@b4iBjuv8`4 zp#3iIWuXI9Q4Yk^tKFNtP@uH3<*pI~x`tQ8?9L66D}zYpMG=UBn-x7a1S&aYBiSP5 zNw-Veg+F~{T96ZbvOg&Kw3cC@MNd?ezhcD1J0vs2^|`bW>X9|R>!$x(Z2P|3Vcc{K z@6rMqWD(zzpjeMF9kF&{cV7#WYHz6YnHDo4Ia7vPqy?%2-bRRUCaeru74+kwde04( z@%}b7{;ARrro;9Te~g~q$gYfC#c|mJMf3{LM9V))`>?J0xO6T{snXtO3dBK z6tpp6zj%Z{MVr`X&E?o6L{P~^{Poa6)M84xQoFzNsDz~hOp5h;Po^@_#%_>+Qsf5t z%qQcKn(|!_z`~0e=@jfAR}5ufK8urr2h#3aIY|z>y-mPVSF>17)V|lgUW%O|8-32r zknIV^bxurZd3BIOd3w`Be!W~9n*d-pi1&cSS_=+v z_xuUN!_)esOSCaBB;imFczzVQcV0b^KOI{{j11Pnh_))k{p^jpxr~VSIp6S&8q~Y0 zRR^v+_@@0>>VJ7P4NBc~ng$vOTT$%lpM$}`bYjQ97;+7DX$(X>i~4oqlaT_e3SPdd z?C2Ds6AG;|s)oG&qF=m0gMI7pUvxJo3GC);GvU`@I6m2C5cN-EY%YUH?5_qFx~9mP7}dB;&7Iz0 zh#ANdl$$i{BO@**^z~wOa6J2=uyC<&{(&Mw8lxR(WgRGftdW6h489i7(QkUt>}BS! zgszO(iz`X@{YrHHMq$Je5tU96i`~*@avc+dg2-`fVEH?@^96y>O%p#=_Xu11W`cZD zGK`Tu7N(!GL|x4v*YU)s?M+G%l1z6|^;WR|HK`Wk9~t89Au3~;;fx1^&_Vr%;0ZD3 zjXl>Qnoe9Uiz^9|4d>m^m-(8rVP-cx)Sbb6;LEz3W*3YYI){7X5ejii;YLTG#UCy( z%Iv{&#YE1BikEG>!mujjAJ(~9mob9tcGV*tjI912df&v1xpkgm9v+qhEOfh@CF;+I zEFh84ECO3?H)?N7NqZ`TK|6AI2|j-TF{Kz4;T%l6HIG!jYCyyaPoVf@Tf1ol!e9`z ziOC7liR~T`O(w-%g`$0O*+}?N=r9&5je=OXZDc#(ad0fxY8x*Jg{jxU0p*ix&|jKh zTC3?~=PX&qp0Br;>WHTq3?bUFm+vqI1IH6|7b7D(Me2DHv1^DJWVnIoFLQ3C`xmp#yrOaV~jGtMBMFsLfr>%8Pp#cJG1u3Gb@LO;XkBoFG|W@qNoeY^hZiKB8MMs17W{F^brTQwl zz0I0@)h@U{EZo5~0}iG1FP0G^*XtSsE!zEOWHix>zP{8>-l*vK|IeOt1Iv-Hm++Zw z7_@~K#lE30U|FOLtN=HaTtQ`mdl#a0^i9|*`s_6iw8mxBBF&Kx<7}RQu0$DSAL>DJ z$SQ?N>FyE-TC@24J!f0 zaUO>+G08jbwvd}X40ll4-aN6W)RzO|%~(a1pH-vHS&_7Jgf*MqE~w8JHBh=i4425K z=dI(ZTONDq9_I5~7C&#D9y>Z=_&EtLp8RoXJh;ps4R{LCM09ep`Wz zA2?8(@A~_(lza3ekf?BIKaEzTXQ|{<#d2QxA8L8%D-(B0qC81Ha;8B5@obTm7}Uch z+jI+lRuW?nAJ}5BJ6DmQCgH<~(l4?tfB?O{<$>p-w4qxFH}E{WTb-~6fsjGnA44)i z{J=-U0)81~@!L-(X6&D!j4d{fqXHvdlP3B)S-(1tv`-HpnP4lM?aQQFU~M`KR$V~o zLJ1n`c5<&oGJx2X5q2dBbqbh)zp*6jEO=$SXcazOV>qMjaDxuL0T#&(!3mWW=v9~c z7+Id)rb25%lG2H=Mw|)a5PNojXgX(&xWw&fWY6P>k2kuGFiB+xw#wWw!cl6g!prq~ z?-ss3k%)n;P~!-!jFyqaiJ7&#^iC5kU0u-RNGYv+0SquBH{(C@jb>Yd`sbj2mehwL zk=Z#2u^D&;h{%qkFZ)SymZ3^rIz1&}5wR!19M%LC1=HW)=o!f}ttxT89zG>gP)*J9 z@I^9YmmG^2e4v$<(#tBMQ)+fzspWT!9Aq zY>r;(up8a6f^l`>19mFKL?GxVUG@=mF`22!+79LDl-Gn89pDih5SfRlKXA>T*s16$ z`tON&E#G)IsM&U)H!PU?9{n^hVvxK_z^u%qqi zA|dqbliRa9@iW2N_uA;E$OYpaNz|&t3iYg|#s@p^3e(H!N%eglo&o!8JSDZM0aOkF z>4@6I*)Lm@XN`U{@-#zzrVBYvu0@rsaa_;><#jh~v#AvmSpttysFt?IoyWY`N+jyl zOK0bhzOTcOIpE8-OJLNZqt6Bw7IpQUpwzpE5QLndGWs>!NLHbYGk$*4qG_gszJw#v z`aguWJ=ZnIoU$n1o5wOuzW>_!NjZU49CIp$s+NO#AZJnQDcPd$A6#5RKi5G zCL?nf=26XHMRxUgjvVr+sBpPky;$?nQ#Rdt*XNRnp%nQTHumQ94M`sYuMqDg`fFVl znc^eDM#fc1}dFtBER-v7&(2^ya8}_9+-?k=^ z^&?%UF0aH0dA6p3t$XMeqFrd^jOWlu4VL)DV{p!*yAX2w@qAAdZ+{hwbwmt=v4Mp7 zBIZoOn#yZjU)WDw8zpREawIcSucCjTpM^Nxx zLZ#=Mg@!h??G6N(aaO^h^)q~SmH%`tkpG^UQ_Pd-9gAu^X97C9+>rW`$ZlPD=mZk6 zT?fGe+i^2Yna}mD517pSf&3s*N|tx-ONbH}Tok;3k&={3?=$Qn&)w7?)_Q|SNv>dM z8==B*dThXzuf@OcAVF}bcYopX+@5Vk%UTyyRQ0Ht27iG5A+KML(dH8EvWA$(p-CYD zqhh&y0@D;1h0_&i&fO#Ve|{0AA9}TzF{PHoRNBCNL6$YD(&ArfIK4vAdyO-q!*L8L ztRTKUaoX15r1#9vk2M-pWigZ-n9$hNeZLo}SS!$o9JhKmi~2|4{R53+b85LQ)z?97 z{;yBRa3|a+@rV0#NRh$lrhk#7REr8bWgXL-jBBbgZ0W7S^O1jUk`g@*mYm_fR~G9$!s8 zpkqKn(^sI%D(7Cbz|Y^a#q8_BTT{AVjb;?pOnf2ZFt+e#raYwV@!`Yo{V%D)QtqT} z*f5v%-2<7gtQ_MsqI82s)keB8PSy)Sm=}IM9;qS4?)t6jKNX922STS6r($ElyLq~c z272KBi`}}c9*Wv(833?fnC%4>`&x`0fW4=4@jAV)r2_2SM(h7`v(s#8UOSb4nq1mc zKM_2%w~qz-XrDBWuTyO=r=w>ZSD>hFPN*?$0=jF1h{SE0;Fo~XS?#>*5ISfxa!_j} zUJdwD!qgPt1qY`9y@&NQDLNgNV}TwTR-R}it&{dC0{AvQ5s|r;xO+q&i_R|c6aGH> z6=RbOOY;nv-i|jKUO)?DQJ47*58}14Y5+dy*%UUlZJG@^TV?SpZv1+kvtLVih#NY;IXE}ull-(QTDi<#G5mAhE-^n4v zS9Bs-!00fH+#MmR9t+0Hoyx*+tyK5>L=9?}1oB z>8<*gYwXHJpcr(CpR@|tOP969hzQO_*oC^vto~jOaO*{jRjY1Wf6-_ONx}LqANkgz zy9RwFXA|L`%R7>L%E5fX=KdFzzxhzS^aq`O_;v_k6GcO84N|kIh?6ynx&PXaO?WMH zZvJ*UdI>Cx0#Lp7t0?yhTeY=<&=#;sZ?;VbJ0X~nf*x4;>nWrxYuqC))+$AI7cUqQ zxamDwLKa=G;W>YH2{_ElnfJby&;cSh7DTGTcXBAel~K@n)g(LFWm&dx3pzT94bz544Mza|Dd(4*x*Ju4_~bzzG0Px( zmB>=>X$1&rp0UFLpks~KhxQ&|(Wf79_AaMLKc`DI@6Eo3E-CnpQ-!2Vidl2-lcpjz zu~!!*d97I&yI`r`(}J~A6->W~dbJKM$b!;?f_N9K(BXB@({9@o{~nbX4G+0%P4LUF zO^r6&f)-6~y>n%@&3!>GnlSV!`B{7*Vbr0@gG1V&OptyfE)GE>66yH>iwvJM#6Ne&HVBii>jjibDZ>gv zh|8F+BBx=6CALU+xYC+bqT52w0chUFATL9lNbV*l)ytqj8#RMF)&7+`*7%7{HG(`8K$k&w49% za@;9&c&j#uRW6~Ja!j*!YC}3_QDctChNB*yQCvq4CAAK~5x5bhWBYh_B@X%Dv>VAN?AnA%_%K(D8ez)yycwl?_H^Em38%ZuwHTMS$1cL-TWi zzYmaNFl@H61p%}M6pJl3x<&0g?73lwA%&a?px}4mLZt*`{SOnO5&e*=F0!uK;e`)Y z67(8;HohT(3bvisfuYcD(?wfb5QRlu6k{V+2kFyA?_A51Fof1q9~oZ`I**r16He>> zV)H1K_cIE_7I!NrS@Gdt0ZZ-$6cYCYZGFa#!JVyxj1}hJrjn*)v@Bh9Uq0T( z^CSTvs2qeuXW=k*YsL1;dqD(gw7!Xp9+mq&uW*m9_&I|@_Ol&3-;=@3MOXgisRTr9 zy!_r%QW19%TQP~t>gU2v<9U1`y=P1w%7RtJkSF^|(%$67eSz2dIsXij37N7NedIwI z&007B=dX=A5j|Grt*S(!i;g6-$d!q*b0C^i12UlXByZ?Zt+0Dwn}r4S)Jjjnh>8_l zS%F93yx7hMK9IEoWGJx#MoF&YaXGs2fISZG0x^lvu)3w6}wA=*U_x!Yi!QW-ot0G*Ql~r8#-u@$-~fkIM_x z2B9@v9FDfXn8)@P%eW|bq+yuRF%+NwssYAAx%E+<^to)}N77jHh(@=c`cHkJ%DwTE z%qya_`FD6O3vJwRd%jp@-X)tgdhms=H{@! z!#cXfU^F9SsBi87@qONUzJOj)eQds<_2Vm#5#)T=*_gz&1@2LX4Mi|Xy>h+E`_&7S z>uEj{i(-eR8KV<`OIRvLn|pWiFdY}YL~W3bAwk>0QR0nhD-8W!W(SK+M)+PL{)ork z+tD{=-+T4ZMAuKb^ex35CG@zm)PU)vR_C^0mk}jeJI@$=4Ot5LSfvUcMWjG&hrB0Q zXR;YJeYg4sE{zq&7+~i@I<8d~?itE8k9!a!e@lEQp9?_F8p?O06mO%~Fi-4Tk+~6i z*618Di(7F!H!n-!)gzWgc?Vx7aaPbTn-&2@4X$Uloz?UG>lEj>HX?799t3TlChB^l z6roCy(KKg$f@xD+Z#x-`mu{eQg$25%Wp0$C88A;Le(o?nm+_Rvy8vLG0^IQ8Abjb$ zD`vpqx?@{rkU3A~yxQvUda6i>YE2$zD)xi7m8p8qksMuHCgBhz(VVErG(l>o>fBt> zmGS83-RH9H5>iu&v#8pIag4gexY|x}S=-ZFMX1(3;_ngcg^M{mfx`&KZ(H7AJh1Zg z{a!RB8PaOEKam*Tkthv05WI($&B!E=1H8iDXo>I6pYsj>cHXpN9GYMTIQJIRB zRGd4^yNr#?vg|vUfj^7dPC~p^0XKmp@U!9^#hBmnt*qk>yFuE==EuWm z9FkWRd+9q+F_@=aXLUM~L6eF?I{%pE(3uVqvCbZ*_xgLyEhXjR_`0@qI#1+@_V?eV ziz~1ABo29jTo`9aNZO`LQtP!Ssaw^`6UqK)Q!&@7#s5^3aYte9d2Ah`C(r}zwUNxaQHlg)_MTZ6YOIDcJTJtTSYURt2 z^4Bw(creB3mD=#XKo&{OLq6mK3z!&$=Xdw_mN)i|)aW8gvj5Gu}xRH4s<~}yR|0uZw^itt26yn#9K&*Ui zv|oekL)A{CRk!J!DJz*XmJYO+TlKcPNmiuXak2I0Hy|H;(#;)$(9a~T2>BeA;DMvs zdJ;S3+~`?jdI!$qe-JaPElwx!b-DX`qWZ^adf(H?#wOCAdjv?hvf!kSOlRaW_ErjU zprD9XDt6`GRmS*&K={evhib(OgwF+aN8-QtVbxbC=Ev5Ll4AvZlXDD`B7vAkjCRh{MaJ<^oDvM|W#2VbeKb1j>P{SwT0F+L|Bs!Y1ah)<;F{Lz(cy^odPZ0 zt%Yfcm!2u=ZvlokZtg_ae#bLyb6H|bM|t}Ayekxg3y(q8!SlBK37Kf1X)PVXKzF(8bdlIC%+bJ1iFQQyiIUywv>~uul$JKm&Z1RIe z>*NpXLupk&dEC8bSNJg|@LzEOz1f~3o&W%Es=yUvKLOTh-o7VZzXu{DWZyK^ zcmxQQpk~F?soQ+llE->|k9Lu?Vrw_awkJ2es?7AprmGD)aB2_v z_!>q*>xO%wQrpf-45?>xTG>#Y^i4|W0B3@tmH`yFn4UE0Qyv9WtUY*VH~&%Y*N@CB z)v5N=!y_V|T42n`K~PYb+vrM3d-(FlZDv}xJh1{lE!u}1Nem@_(2SW4Zgywz==Lfq*2+YWL9XbV{!uh;w7eaMMlF=1ym}1A2U5Rrg~WNq`}slW+MJJ|H6(A<4s)o%En)f?6_6{OzII2X^mSluSv)?9$krctpW`)72vM9 zqY{BV&>Y86-|--hkq^$wbVJNIZl`}zU$tL%lVR=B2ymA8JSr8&MZQ3u2asq2PhxMx zOZ7cp4T}=n+Lu?b?B`bH(WDuema|LB z8u?_?Xkdx!bZYYxPP58V4wF2*IAA`-WsvZQfqxnT(VGlrSuWDX<4Q_LkD&PwKu$z0 zE%e>Pgt(rFtd`j*tPutn0dS6K)@|I9WAqLGap`eAjt|uId&^=CJuW41Q;T*%VN9-7 z=U!+?NI;q3mHjCX799-y8ac8iLQ5(Nl1X^$RHb`Th?5N*xszC}J(NOlkVAf#^`MBa zSG)l$lG4n|1HaKFZ^-_?|JRoZ;XSX`HdEGamT^Mdte#ii_k`Y~A|r58hT zX+jPNmLIhUx9wfGhau^p(5i)$Wq3ueIQgQS%rcN>{R@`7tq<)_Gu_M49Nyx~2_{y#Hz`)483R*7^tfd`!`|PqbA{h+ zAa9y|X_(NT1}aqA0*`=HKP{hq5Q=5V zxl@J;a)Bn8;TxX|i<%Laj=7SHO4a{wl4|;CMU;GJs<*sZ>B??3V~?*Q=c4%`Bccy& zSLE=AJK*`#7kgB!qdavNx1JR|uk@KG6J)lDtP#U1f_+@>Hvr(+`1Boswx-QJ41#Bu zM(sj0Gy6Ge-Bt(WYgm&F#v=QDF{h&~Ek@wh<}Xhi3N0e(c_ZSV$o&R;s3v%;`gQt8 z7uinI0DY$sFU>8d(}9xdFw~Xo?Tzs4Tp2CtW@A1KJJf?p$d4TW+@}1>!tPkpeIJlK z)Lh193AKW``$0uK3L4=6j$LNGvag*t$E@joX~xB53g4MVDcixB-nn6~QUAmM!P*mM z*pdJdWdl3ChN;=d=hvfYo&ex~q6{3QrZ;%zeSSxBlW+)|Zub+|PPrLH**6^vHLOo~ z>G{|I&jWBhvs@fnA<>x!q&Wej|Fj0DUD|O%$m5i@MXyiZkbQtDTPzbCry>y=vaI+^ z+zd}O!@O8Ld}ecLONPLOKBl_z6uO$0;I{E!Q<0;SQic}lzQ`J%59P5N0BWi5B4?7) z32!`)o$Zx@x-cntmYk0(!pW>;Ol9PNf*NAH_XCU6o0~A<6CmmReDy|J{0~26@xZcZ z9+NH>+4wXdOV6y7_V1m7pZX`)xz8Di_7_H5pQQps#5TGAHKbfE4WD&KBLAa7Ar$#caV5 z!I#2Ztn&HYDT}0)Dw+DV(UZGlvuHhVdFY}es4q+TU;CS+{Uu|d^fXHpm}De#(z=hc z{!XYWywV71t)82F+#k|fi(XqNfY1C}hmW^b5thqX7mpD~itLj`I%-S`iHN|E2biqI^ zAst>=cvvvPM*T=yl-dNE=mvnY@=aq`F#>>71&})W%1#Dl;9LSVytlY-4OQ28ZFsUS zkkS0U1P*cNgZbxGKqZS-MXs;&mX;(s8aM|z#H}s|Y0RzJwE5WLYztb)-KFujUmBXL zEk*iYoQH9l#FL)3)0bcw{)WM#qQ2Livd?yF3$-1_5kgFIZ@TJ#YbIu#5U|c$6>*+k zP07Tn)X0$Y?xZ4r5O)?q-s~M=;Sg=-_f6sU=p<2K5lUV!yJrD|#_g_ErggG&un1Dd zAQBtiRA!@9ZPbAhRv@hs{Eh$Q^S14ltv;kc#NO)nvO9QLHrwv^ww@BwTXLLo2xFiY z$n5g4XG6jrhI}$JjE2kdl3zf#+8L(-kH(W*c*0uVHRx>`!(@iV0 zwB}jqg#EEV>9%p{M58J)Kg$a-)FFbcU^nkL}T``dk-W9Q_Xv7e-2~GCzC=pxKL8lkbw*2pqkn z!frJ9Q{>e;qFMbfdmj)@JH2w;Fx0J>sM)*WagE4RvZ-NTFq3 z!aljY*gUo5qEbz}E|^MgFWe1!TSm~SQwRWRlQP8_QtWQsPBNO$fw9%6(LAZke%Qs8 zy@RUF!1qhjl#yD}Dh}mQ2`ldtvM3gH1LK%9;&V)?Y5TfhXMViH4gcO@TtPo`Pa@lH z>7U4u24-tWOO2(HlJg=PJAe6{b-!Uiqbe6&^61PjhchQk2~zTc_53Jl&dFl@ukC?> zz6w#6A<3L-$-ycyI10d%i`JwYPQL!t(|~%PWp2m0lS@X`K-j(^_}gAv4Ko>~pMK%apM`-#om? z?BNPPAch zp#B)@0<1WOu9OnFHBT0A{iSdb7NF{Ci$oaDDUACwrlS;>{$nAb!SGMUmN2uhNs$)L zc20TNaba8VdE3=~vEfx*d`*t~rW=E`@vZ4Qz11$+lj}=!kv_dXvA_A;dN= z8_ZH-=L@y1#Jl%PC%D7=i^=7%%`erKaQp;RiWIj=+c?o$=~$$8_ww_9;hGtV$RI*w zWi^-AW{f>?~GuN z=p;DCQATaVx#Ni4$b(6xxip!k09K20$i3EJkV8fApTwON%!L}(FN7^ITl5iSixY(a zm&~-}@QyRyKkUKI94_5Uj%6gA2gbI#6WAP3qd)3gT(p6~+u?zA9#_%Bp7f}e1fd0t zx2td5gR}YUncz2*Rx@_vMYGN)p)*rtT;>Hlf9oeU216((%^P%A&gUYs<9Jg1&UI#e zVI&Ndsp%N4pg&oe-DpY1smbZ}lC+>9(rK5GT#tgc?_$scj6%$Ij~&EU8r8gvLrte^ zf2P8|y8t3f9e-KSH7zkn8Y}%q|IuwC){Wli5q$+wf`{vGV12CxfUwRJIsd7+9@RVe zDj8EvI8*XRKwG?3C}JITvf|0M76AUky^_-z3Tgln)!8ch@C$)H6#QRWqmHGR)_xXJ z`6lE(iI+n@$;N{LOvd>?&fxUbkU^;^6W3tY`C`(iU-CUF@kTYq%BHntV?OP|9}q8!-KQi^l)2w{_z)FaQZpMd|2klk5MpsA0*tZ72N81?+OK@JSi?w& zlLUnyT!QeskOx@Ifx-xEoPD=w^)JOp%Z9l31$Xyj9sppaf8o#GvWT^!EyAX2k(?WU z@k{tt7)09qwfD$Ks3LX$j;x+0RVOc1l7r+IGnDwrEY`5)moAqrF{=L}!U@&F*=|yR z8ApjK9fe_$r3Jw}Ma(B3iK zlX4Bf(6=mo;0xLWUaJ}A-_NV&;)XD;9xOVD9uc7i@3nv|IM5MD-{qF8gvEj1rei9@ z5y1C?j_N0}hVdFKny!vddgGBb$TRH-6B$)zGAK}=*Fyh$COYVpZ=*nFjc(?i1KlX; z_G6#wyCk?THf{*)cDkC4Rf0C7G%YYpP+uT`$Lx8SfreCd z-Ui5{$xR;m&*F2t;AdB5s>});s`c-fVQc9TfhXkk!YvdECZ<-9k}o^&_9$uxatieR z)za8Mb1;wX5N2rLTmQe%-usf$lvf%wK=+r#)1tDfaTuk#sYw@-TJJ{K!_4j}?3Kc4 zP)eiCyVpKNg0^#^Qgw0FOjkhj_A~5%O4a&$k3~gZiUK@KNq7rM8ZGS1)vm}amiGt} z*NQL$E|hz!g0D6@WOI&_BQ7vUEbk8auggn}xQ!xBAz;wd$J}>o?b~WPQUmg~w zZJ!Lho|N0pZcrEf4U{S*-^24_Lt#@=F9m}9xs!>!KVGv2Faynq9w)(BU8xx0(j?Jh zQ%I=Pw8wvE<#5N?yH!fYA!MwP^ltcJpNp^C z+JnIl=7gM3)Yd3H2I+h;(lSf=sYDF7#0^pmKJ(ji^8!_uls4`+r>n-eYOIPh-03mU`Qud*y>=CotW|fgDhpmy4pFFhjre7&(rIn z=y4l-V!t6z;|J{PEa3f}DN#FJ)tf%M8D6^cc7x;NVj0NoQ81?f=K^ReWIQ{2KVoe> zT^oLDu-CY)5c}6A^3|1}nLb5;c)9~}*?d;_W?Zh5_PR8V9!3#?WG_3IBZ7QB;Y#(wbm;*t7;5TwH z@q(yXMRE$~3F4nCaLMD6t28X@(rPM6bjQun(|5@F&0ZJUVWu9EL%}P<*sokHjtska zvK4ZW>qafR1?22!t181Nelh5eI_$9DRwpXbIvKbKoG>L-XnS)UY z&9^CrioUwQ-^sBRK+=EX5~ZYJhCqZ2TCkUkGCbfH_;@s39DtdQ>DDTPuRA+(-@=;5 z&W@YykD|*VWAI>mpO>;q(pb6JhI?IH6JQ#&v^A&^U- z{m7hLh;w%M{qBg3%?UXOzp10VjHE5u48HkD#c8C5Xy~uTsT0bI=Zm%hE{WR-ZPKV3 z8G~Bgn3BE_ovw|}Em6cwt}w?f4tQxgA+d*nCrR{pKwWHrfTc#)8L_^_7~BdO#-Mc~ z>y;{8gQ|aR=9A|Y1V4@gVF?`+ZR6xpu!nL03AO<3y+2kj)qtUFs#G}=27+3-1>t%S zxIdm5o@B}ID~JG#18%zw?AJgBEC79^lPaMdOl#|ps?PYeh#4~FSegVI@^;(OOCWje zI{Hy(;tFOw=UqIE9?_*FbKJi48ZIZrcfK8g5t)Y(mwWVbnAbRykkBB{0N6aNG0;QP z#VdEMRGYT(jiJ1Rg~*aWp`Kc$$3_TEo0bkYpfk0cR;u`_ieuQeR|ST^KNk{|^(f@* zYj$6!zdPzE&7EtT3D)5vO)kCpcdK00r$GOPiNUkI3&|iMHu&5@|b)V~rZ?*Zbvu9^3ps`~bG16bouY0|Pk}bu}C@_{L{&DXbPJAria5+zFP3xI!B*o0VltHmW{CZMyV| zxN=>B_pFTQ_g8|Jo|}=hyu#mG?Qt12eJ7DzjOr>{?CVEsmX|3tV1@koTFEj`z&Kz3|TVG4oe1 z_9^4Bz@qf+jzl2p?KBI69kK0qq;5vyE0o`<8HrEs6+BeZ+)-EuU}h)Rl=-Wk zwzh1BX*tQ&{|NDN6siG@-2KT%MQ7#^(-SA#CaOGhqAR@7hh+w7Ev9k!v&5-+lm!Op zSksA7M3${1N|P+S?bAcxq-}iRErD*KbKub9?LJxB{$e-L;ZYmBTbR^?4K&f;22e*- z7tDhg`XKl3rAy(b>!T@#-*?Htk9f6>Ac{qh88Rf($b?XE?4B`>3FffRWe!lg4 z|Bh#-FTF?JTvL%_;x8Nc=Oghg*2XYgl?BDy5g$!s*81jR zByYGlBawwx_?HyGhImEjHJ|tJ1Oy?T^#ff4sszT{AG&p$$ft2?G1^7Mo3K1~(#4-N zmBVj3n%;CtMTneRqLTS;`GSj$p3?_wVc7t{Z(VI8FD0>E5F#8?5SFi$WDIKDcGm8cHCNU3!~lWma1vb#BGg3C z^4Cs#x^lHNIm*U zXeg$bpgzD9-_!4Q{ozaNca%S~4rQQK_LGp1 zRG1u6xk6-GmuiQa78vcBrYwkoRQ?0@VxNCwY;x#;R6Z*UBq7}D{ zPqLdZt{l2VtKbmSl;sonEwLY;PW7vGMS_+f#uRleOM#Ik2_NJtup&nP@g;_Kr5ea2 zygoSPbhFTe>D^s~wQ>i4k*+^a%dgU-THOl}li1}RKw5ApkLG3QU+=l4DjbvjqSq+D zo~u={#K_sWeU2vT`0!7~!+Z2qPSa-$ER-8$Uo!pn?_QFmRh+@hsf)wXqZ{w8I-`Tx zh;$8M=IuN|2&L-9D1*UJ?H`OYuVXm)pEFsBz%0R#NNwyIPVAhgu|aChbl zT^ElRJ>t$5gY+%A$L4)@R|^F#7(kJS+UkOk-tKvDcf?1N7%q#Bkr*J$ev-EIu$$9hTy){s+m+mp^^Fr)5;7#hJN&AQ2%}UhZwXS+SAWL@=w7 z*cPX+v zujrJ)w!`zXS8<-(nLGimfjx?V(D2ZcV_gGo$}jNAEBa`I7MA3~dG!FDh#(EjV@| z{vNX&0)HoQ7g%R@O(-;VboqTSER+am-7-k91bPEQ=nZs$4-Eg3J0#ObFbh7fPT{dx zZs!wul=Ww#uD`ZHhb#h^><4*q?Hkvn@<__y=8_2Pb9?)6NK)xwE|`h3BW~CBW>6>G znAIm#M43p;d>n<2TeMmKU2bd$LX?Kg{}U(3?bXz?R#jlhP8Yin&m-EDgPyS{q2!Dq zlY4+$Z{gdMzy~NBP`xw76%wR}WKK&~uyE}TZ1>vya}A{+I&Fyz5r^h6nXp%>&WlCK z=N+jwm@yvPC<7nbMV`3WV_?tj7MxDru<%;0`x&6JgzV+hB7nU`-fxAVk9-@)OogyZ z%f3{gC#a?CGRo~UF4xcx)fb&BU116UeQw;&Lu|ufE3?oPQ*AmZXA%R3C?#Pl%f7^? zocXw8$e$Ji{$2RT9jjQj7@(6 zE0f^{sO#Jz9=^f;?PIY!7(A}d{Z0z|S=Xrz)1|pZhbegvm-|zvfHboTb>8kX_u-3- zN+*H!jBPM^-4nLCOwI?0$qG*h2o75xml<>$K-7m$JvfAfpjd^5xSSj+4w$3~U@|C- zX1CF(l`Ws&iZAlG?l9y`=sL7RZF!{j14+DY#P618syjg1+gwz9^kICdp9cITN;h2r zG;1tZ#?tAmn`>76rjL+0TFA&h(KDo4jry;7c@crK8~q3Tr!OE3)M+d*9V2UsYYhZ7 zMOI;gSr_C>U;m}F^6wd!RlZzC@bVovU>bJC#E{1$=WGBmxwJTh@O0BSkoOo2Egh24 z9ZQhKZ&rLK%`H;NQ&}SZacaE=?1u4rs>=w(lq-_QoO1NuY_B;6=Q&U)3%uN9{WACD z24EK=>t(>fVaUp@N%xEBjStR>*RO=tgo+tIsU@Aa=PH9k#1^Fz)_XvexxQVLn<|<~ znFwMZ^%B;A$J=MuH#lz$fxwW+Tx*%`vP$pz><0zE^zf_PZsFa`$QUA_`8r$>=tffi zQ@EbVQvTKCY=vj(GKt@AEHNhz@3oe7wEdrgn;tdBYF)qlpNSV!ggGi2Mg?=m-^;(7 zsfB1^3GtCL=$U_EaFp93&4X7m%Ye284=g|#JQGPlZpkwv@^}*l$5y-U<9k4|DAYsg z&KY#HIB%xhtM?iZ7GaK(Os6q}Te{=x3;Qk!+F4Q@LHuf58FY)*(L9Q`uCyu*WN|6zh^Bw(n|bV~rq?w~EARx66xK3amG2kP}TtN`bQ1+E~ueohtbqn*RpX=T9t7C}NyVpm#Q z({9Nary-u)p*eC_TntP-TT9RNus-|GsZGizq#$zobx$7Y=M~yBj>gi>;Q+&o*+T91De5^FLM>x zW_{Xs>s7|`K-&x_wC{NT-i7HA>OzAsLhZ?tm?XyP@*%l4WwoL>%%ngWsn{&D^kMJ0 z3{37jp7Wd?ru{at?b9YvgTk%?F1<-w{*1LpiAVZ3SB4ODAWsZga$_U$yureHpG`|V z7rO{AA@B-Sf>~6Jp1gy4$!v4OcTTY3To84O8vUPN*Q6EId!ij0VxaHs1P5b~W1&i= zWWOI~xUU;Q)8nBAC%wv@xWXH1ZTrb0gkCA7LHtrPU|~VaUHa|f`zcBa=J)|gZ(M!K zkW6@hIE7|1f|1FZ&zn=A~L=sfCvYZNGH0(^wKFU^N2b47_in@^EhM0$*31ZUp<6cCukjP&8)7ex`*F9(!#1zdqG1{aSN4kX( z$a(CMDZ2rZGVi$w%OQwhdFqfE9#=4m&IRU?L$#w>o?R06)^S0a@aprfO7JS3+d)uhsOM$C>#p!7B>L+nj)D{;um{p36J zM>+{`ZYJ&QLe(C6-^+1K`@&ndC4RpaCOikXW;Q3XZ^zDzLIF7+r4wK<;!2`5PW8FU zJWs9Og=@GX5pk7HRt2CYcKyQsNAY?kFxBSw4@pyn^=e>%3>Ow(8zcb-QU4O)f$M(F zi!AA-Ce#vPjFQ%VaI-K~es?9Y18`Oa6G+06L(&DLd}V(+`rH>R6OO6;?a#<@k$X!z zoh*&0im)U8Lt_k;DL?#0Hrf_!X5#jCXwMFjT)!1DnqiIgbeX8)f5vqGBMHl+QG2=G z>pA~zclt52U~$FvW2qQ`lynBVef8uA_700!&rU{p?yuU_h+2}&myky(Wn8)|F?qiu zO+zdlD2qtbvMzVtOgYc!o3{vt6^#!I;RDWBYo?GS=)ydvF!`k(|LScOJu4mEv@|{+ za_mMFQ_>*@ui4$kQfHT7xS&Lw>BH*xIybtRVOXH%e=1m6;WUVnvxvaBMz?6-@f9Cl z+$hskwSx;$%J28vG-dxbRpn$`G8+$Xs6He7YsY8Y%sr$b$8HmNNY1A6GS6fh+MX#p zR`URB>TRCULmg0ao(inOb3D1>{oJDZ&p7f;1DYp)E;rcN=Q2awo&D5t2@(uj@G<d$+K*2TWMuxg3zsNQ_^D+w z>S9~d8n>JH!CmONjvZTAx?650S~3R-#Ak$oWMBrfJYR zUvl>P2Ye={xWAR;qpYX6boCc$S(D$2YFfERZYbY(QE2R9-rI2_|Fk(Y$#GT^j}fRX z#4Q_9B#6eM26ZiIK&a%N4#@v{!9l$!(>xQ)SpPr6tP3HdB5W%5u&tg(sz`N>)s6zF z81;7^HFB>3G(}8Ch>GCcjD@6nt0)I?hJa%Y(QA&3zHj>?7AlQ{tZo0yC$#%w`c~Xx z4w=FFrod_fyq2KF^xrrG~6yyVR<-AWp&I z3(q5`Q9CGM`OVdjnPgk%wxy!iBg+2CDqrcI0Vi-H1uMHke28X0) zk85&6pD~muns=hDq+_ug!6Pq<_ToRFs~hJ&Rst2@{GZm>0b*Bg9^Mv58QbV)jP4w# zb^V)R*M!?zpOtEq4S?Z~H12seKd@{nd1X)ES#S>LrVs-d&d-IU`Kli*C0SQB9$Lyn zBDWP-1PTj(|8-yWt;PnVjT)*DxN>zvMD5u2R}SMo+r8M;vfg6Q{Ch=at}OuV&n&9C zhT;?Sru}-e^QoXh;m07OVoBkXjID8!(UQW@#k#r+T1J%CdgHQ&!FXR9awjp2qjU&; zEKCN=(vX|*P}3WeP|f=2?%zvuS!{Ut`u;OeR31AezrvmFgWluCneV}xovGuRa2o`3 zX42wvX+t0zh5^I=>+b1~;!=AWRJHJ(+I22}YTNN;)ap&Y)Jf%CBu?T!{f1(o?(k@* zC}y0FQxdWKepM~NQ@Vc|?joxlxjI{BTd_DDB9u!Yn-`%>e$GR=S_g83c}y(}ng`2Z^Y+@V03A z*YK}yPN!Ab`%i@{sC7>#?D-S;@*X7PcKZf=SE%ya#fsqkrKFYbiJKj7+1YFoNcZ(i zR*yUwRArluN~Ek&w?|0E1_Y<{O7ND4PU8_zu#h!99DVk8*57}PdBw5*6J5~q=#dF~ zKPgh%fs}N~F)KdknOFAGrsu6^tLq0va)p;?Lk-_LIOoeAXKBOw>sRJ2HNry9d7fJ# z0x9VDlv;8jek6irgPy^qK=?G$E!zt-#F3s#ts}Van{Fcv=Mmr9>jcs5T>pvHqa`Q< zvPtqb%h2!mjiG0ow7t|>wIWC6B*u+h45hVToR1MrmPX~!9MMXt%{pV29M3y3;a^~E zQgMzJf-3OR8Q+7I6;RSmJ2^_jY>s*(`ao?Jt z?&?&yb6M|Nqu6`iXQqT>{G|(K(2PqMf^D6h>;e|Z_B)8jI#2*h#uE7|yKW$<=hFn1 z={g&2f`#baQt$0qlfCh{2O-e?)7~;_;(_%tBc81jMGiGmJKw1NNJj^ZyYSouQ%@Sn zKXyF~k_Y`AU^HLtYn~2r5Zbl>QT5s-g zM|w@+nb;puv-zSr5Tn5qZf2V1CxLte^S1)DCUbWi=3WRQ){&x2qBWtuo3<)`A_kXI ze-&;N-h-Mjm~(CLqWVao1R~d^+SFv89jp^8V|xzA7x*g5b7o>yB zoG78j_m(TqH2BsjYjQhgQ|25;AE~&zSisP9`+hyn>e^x%iD9*zu|*;JB`5c-Bak^@ z-w;$~z6GX0A!X05Yc;2u0BmL(ys0h_qi0XkR0i!q^W-1~pg=b-h98`js@$KibSvek z@}IMkKUYfZQJs$Cu+cJH z8Zr{gX;=^4h!Ghx-SDFw-IrY-+)Wgxchh^p_^7xet#-L!CjW}5CYjlyM^`R${;3kl zH%-Kuy#^Npi%7&IP)rE$XGJkftqm!WI=cBR=Zb{##Q{qw5qrg=(S0%iNoLX{5E`cO z_@x%abm9#1L-x|vS-Ueczw^TbZzDr!50OJ~eTSC)6I6nQuq?1Po12HUF@$9z5SmX! zFy%5u)5&%)287+;4T*39`@Sx)=82V_KH6i*6&Jb*Mx%8g(itf5v*|k&(?JjnQp{OY zOYA-34!JPHX)IV+ET1j21(cK2J#wk@`sy(F+0~swTC^^rYxb{F&!A*Ue;fAry>}%2 z$fk-HJ2Kh8(QP#ka^y9lOfx>w2o+nniWZHI$Hgkk2+Ur*%v=7aZXz#mtFF6!};n`Zuj*1Fo(--Vu z-#n32dgiNCiYg+%E_5T=UYv+^o_<>gZN!$n37x`yHLk7(2ccU`X6t7M!+XtP4*fs4 z(u?rdq8|ZpJ$xH+ED68sVdGo1!wS?GMm<+WTIx%IFhf!5_T3M8F4wmYGAfYvvexRP zV1#GL&}G-qI3X{vKcTo?WPIWIDc!r$r`chGm=GKfL|nv3osN>Z3rG<^Bwk&j2`-Lm zIg|sD*d5MsLBS+U78ty+`}9cw_8v7IO>DBtQcGWBgq;1721J8TlbtA^(w}_*WTb5N zXJR2m>1M&`JDF+iSGgkZdeUj>C6tH^Imxg(3((Q~azek1RC$3fq~dsV#4gK|%_(H% z9@|+-&C`fSmg6~YfqU9%)#p#A^{8D~gdHLD6Nsrs5>*z3?N~6R*l9Hz^WL4RCYf9n zeauoiQ49)zuk0ZGHmBbmLmWf#El(6S;NjwXRRg@~z#r_n@!Qwi<4B(1RZPW)l<;^f z$%XPR>xS(Xv?JbStLT`5zLz)P(kWh&cjyc+ZS?=HqPJ{M%sKwr#4Z`V~Ei{@L($ zNZlql#>z;-z%TJ~lxM#xjXw8Q*tdO=dOK_X3v&L0BBCKMj#hrVO@L@CQn|JX(3uOz zN5a&Jy6LrvdsF5MVanlkUqL8*uP5Z|8lQ#7hFQBT=Wue4KN|uva|{(IU`4NKLr~lR zn+E8}?zhsmSZWgJ8*Wf4R{l3BA_el>8bBgc4_+obFaS}SILmmR8`$ASj)H_0i{e7i z%Jc~s#T5Eg6TGr$pbOwg=q-o$MC+-qwq4oPVzoKM1L|meE^i-p8ZxJH5H!Q@Bf<)3ivc z=Aw^gN7^Rk2;(+GiZ6sqEwhsKvSLgmE}^X{Fp>DekyQ^!P` z6-nkG5$>4!d}VIT(4~#jeT!>tdhL)0%g&=FUKB(BlFqYB=x{K=G^S}j61MLMv)c;)XMC@=s1N$ zy`fI2JjxqBe>8e~bPl9_@Kg;w0*vCxBRLY!T?r*9Y{?GOt+hLvD+YVfMb%5WM8-L@ z>P?jSgE`uQ;HnL9Uiov%x;?cQ3|0azw_Q*%>4?2zE3OedzgZXq)Y&cz<}eh)R0aaTwd?Q1a> zx^R)^bEYB==Rt-V65sCC=)3+PwF5Rzh}+pq6)y4c26UO-_Ti9n)5turA$Qqumk(#q z{#*;Z0SuCJch+nRlEjB<{POg1eH`V_gnm>Fz!N{Ke{qa2ITecCT2|pU@g3WGv1_5z zy|%?dRK~)qrv@E{Bb87Mzd66N_uC^eKR=~cbye9Nsr&4jRPYG-9mB=e$gsjsCviy@ z!0Ts^mYwp@I;F&~oOBA`)5?eTw5UmsY^*FsBTvsLJNMd@L+u#EWfAXgod+u9?AnkS zFo=o6xt%%O0HdLw#$J=4}r__q29*s4f*C> zzgkToD3A8j_9Mm@6N3{-Nw?{=uL(%upE%=BT;6tf`0x)(9oihQfe;IBr=gQQxL;-f z8$XTe1yK32n^6+z$>)FR-AKPPHj1{Js?xOyKh(zNg*1SIWYr_IahS{1oEK`R@T{3N z8Mh&68De!Kdc4Srs+%cBmCo}&*q0FV{-^-$3m5J36S$B{p8>IX^__1rd}E*6P9S(_ z%p^W}$#9EeS_fR}_Q&`+=uU#;I;I*=wth@q_?f(HkFX&tO5aoy7Rod@9u&*r$8*%e zOnCEFC8qq#5<5et;%tAAfndUKNlp1Xl^@XOYH+tdFbmdo!2J?mF=w4`>yhlm|eZp_|qdJu4` z7EA1`JZ3#=llgntLCR(tsvpF#0Y%Ypt4CDg$eb4TmVw^R2DY5pL-azD3lP$$Yqw>8 zC*stMj;8{pw~TiyOs-cT)?W_GFb?Phw0t+ClVp&r`~j^X5Y)DXEP0q zhvu&8RTBTDqTU;^YxpX|1jDEwE%QfTzc9;%4INM0%`pgJzelsLGRtAzBEV&wa+R9r zKr*)e+gw0VZ_rN;8*(MqlZU2Fad)B1BIikbz9h8I7g=haSnK@~YyZ=^|5tjRPwP4H zt}k7}xiJm-T9R^zNm5U;s3fRY=nW0FD+XSBJEI&W5wt{2;hwd`8AkNWkosQS`?dn)y~1ykl;|=(Ouo%KJfGzsr!_>c+KDn%Mb1vbn5A zt&usfJX+HDU`jXqA*W^OEQO)U*c#cSezVc!S`zP;sc7s0^`e`FQLZSndjl3?tlEMDy<*qA-XFV z7@jsr8FlGx$AfbW7upskxm3?>(U$2Xxg-$&luYR!oro_JR&ig^TcklMsMc7zJtK&R zg@f)(>&cgx>2=Wfwre>8iKnK? zzl8Y|R~y6k+vwA?iO{HPZkcO_0{N@><6JiPO>9O`@^o8s+C~9xK#3_`Ru}jyXt!N+ zMFQA)>2--3BbJ;%MPAlS;C(%)`j;#&Rq|W?m!~pfDoE)YoBo($mQrr3{eX6)>#!gT z1i7zJY$YoRyb?@<15HS&X19yx*y^qAE7!;7VUQZ~9b&~zko`Qav+MG*6U!wM$et}R ziXk>5{rZOF#|LyrGRc_a##Y%h_f<~L#hNb;3h+0f!Z4UaV%TyP&dp>yVJmcj7YdwJ zdVy^<8wTwXJdE~8Sv_8Xtfdz1Ey+ChOxbR^v&_CJ4<~o)^IHU*M zwc;HwL;H7ATcY-Aj}!6%N%8Dz+6(WWzpTRGoyr6WMYwUD*@{Al1*+&VFOjPBv7pQU zdX&s;L*|S6m{QnspTw#FG+$@^jEFX%`eD;^7?hJL1ktH89~k|g^3Mo0Ajhl?+!z^= z6@MX$eCVmg&R9%m8ib>aSn?ztT`l1xw3Hm63qG>d{?hkm=-NeX8EP_r;yP6w?Y;v< z%YNMTu@8lqH!2Iu)%=9d>kG{2gJfg?PeIxx#b{StcOffh{j8w^R{i72a`Gx$*#))} zjf4oQD%?6uX$Ye`l}=^~gKjy6(*m`7yt9(TqQ=-j9yUKV@J~9oW@FHP^0FOk5_jo)y!tW!0+7 z_G>s!>tLs`m-CbHm+A6K;nm%l@UMiw$&p6~h{BhQhzv{gDWPCaXh2hhZZ2maM8-v! z?M;Uk9ui1PDc^FHq?D!@%b+x^u{xL1x=-@(2Z6CjGe3uMftTz_W^NNW02)4-(kleH zJf%p&2UzoA{oIHby&XC?5PcC--Lo6PHFn%Y3du5wB_qveI$$g z)-yW|r?A(y+C*^m9f7xkc))<$R%!u4hMl&=K3&u0{tZ0@k<31l{wg+OIRQr3z5Pm* z#WsX%gg~HMR-?33RsEi5q17-O3|wqfl7y$~2DtZ^UEr4L!#VSpkAl(AD4Kb(@R78^ zZ4+f_It7(#ky9xL#B3SEtsL(}u-Iphm=}(3pD~kt4wAYsRxEaL`O@f3LG0G;_;eVv zG-)ctHJXYtQfEgoM5>UVY4Ecqmk~)x>2OeXiXj*tNs=b{Gf=ad>abGb5~_eX)Ud|1 z-4~op(D9(NQSsiWO<7jIX41AH|B%RMx@_@Hvg}rz{7K6Yszw7)g?A;!^bA$ME-R@( z>pB`EonP2Kxy^zDHDCu67hkABihQvA?~}h3sWX-ir-0!Wtewjd+>)15*o)Qsg|k_v zR|zF)15}smqLv=Ol8^#RS&`Z!(Fp;|MJ}$-2y3Rk5q)>|;&G zoq5%DNF`OPE=`}qB@`Y_^JoG+K^7U%iI((ZVsv%OBaxT5sh71_%(X0}V)EL%K)>j( zK_=8O-neZxP>a*lQ=g{Dr7D4eBfhL@LYtlqxDgu#{oB%+E>j9J24`GJH&rg&|1CI# zcIZH*)}Ge)Y9%lu_8Q=_*80?~R!FU(8;3*3g{zN2^_(>xe)#;e;|=Fw=kqL)eM!{k zw)U4o;6{%o5fDThjfU@*pam)Y7r^Xd8M*i}0i;sTdnN|}wbxm(Q+O!MmPuz*#rT5) zhCj+8NRf#a*9tRwQFZjW8&o9{x)o=?!*Zxf!z~)Dy|k^U8c9Z-H3;Ct9{A2IY&2(M zUP|^pxm%G1aXosscW*6B7)$_xTBLog5uHVn)+z$;IaLx^)yQVv1CBxz(%uigC}Kn{ z?M8rt<-#~==J261BP_o*{7%jt6V`l$DvsR7!(rQD*QATMn-5?at}gbgr`+$pY@5ex zG#9FyJ*lDc5I*MCN}h>@1azACadK`fwEzYL($LcNwEM}9*$B|I=VwsOd*jQ>1CiA6qaPgp>YTa${zeS zwEB(MAlOWL0d?(jsd&?3<0t7@lqv5C)FsaQSeJ3qfVL%@3Nx2-#braLMn%S4yN_q> zamNM41pHn0Syhz92c%~Vl<2?8^H@)vsGW?A@t&pYp+o@%%&w%QQHhwPT@nzC6Z~I6 za4Tt~5g^Sa6KqP~a1ZO3df%sOY^bcYS5aO3try_qV;p~PQRV~Ul|jCEE_nR0l#~rD z`Qg{`Qg)?;_~_gF{Ux$#NkUxw+(8)|nOObMY#nFWC@g{wldG}Qr}5&F+Cc1?3!24U z%#pAVZ`osGSr^u#%g99P#a)+f)lWMxgjSKFJ%SE;X(8N7*u(UtX{hUHZ& z6EA^ig!u2qJw#TEzW~sXc%>EwXhP^rhJ7~@D@LNdn(X$D*Hq2#w;9-GD437!&j4*FFAJ2IP-j19T6UU0cM~>CYYK_+nVJ=n*g1+r%E>b zvO8yTm_UM!N3FgI8>ByHd)M1m@aLKj7iPG)x8!of^5l{KNrb`bTI~H& zdW-5{&$P&g)BP*_DR+u1@F#s21w+PEA(gamSMD zH?Wb2D{+(RKVX#o55Wf_<-5Z1t91Myi+2*X&4FKr>PezwWHxJf*jhKC7)a706!1S} z*xhr;lddj*@l|u9`g~iPpR?v(s+OLi7nEoB;YBV0jIr(Y?+v1W*FN=AB=k%T96+q? z{2aHFaPj_0H@y!PK#T9T>GtP*DGMQI!C%Aiev0nymbyw5_TaP|4{Py}4$e~I9b)7n zkY-^XkK2%ts=dBpPMytQ8n^=1BA)w+!}8u2hmgD%n1VPzJ;)}? zr4^>LY`&yz0(K2CKpSU*yG8!Qa&mA+5d{$dg2m!qNC3(~o@VV$Ump6$Xx_r((razV z_jU;w49ReI?9QDcK}!`{S?nTSmM8Ib(~uo)ClK>kf98eHGw{$|8?hA&p6Xol@sJT0f4+$2%Ce4ogCGq~VFN}qKoCb1m-w!rX8AP(}v zsoi}Al#SCSWm1_Ui11nt|IIiBXDshSs4`hFq;BHka@vq)q}xKOYQY3Y_%rl6-fmCez6p0^UN%fYJBfl zcggZ@klFwht645H58gAeu$LHo%>ure_K4UAAn&m0}h3a`Fj?@!PwG$_M5hpzqmPiH){}Z;6QKYtQpWpojB%&=wwLKy03T$w2B z-^eKEw2F6$??b-vLefeoqGI*&xL87^ZB)=`DApo0H^N_Ey1T4K1Iu)UjbKiSjVq+7 zladSx<$5X;DWy1{UQFQbZ^TWj}7TOV@70ZH9@r707 zhvhxPRjbnnZ(fdFI=#-+4!NPT>9nm{sNliN7x8-fk%EN-u_|Yp9e==x$|6s>Wz%vq zt~nZwUo;Tm@o5J7#T?*{#_^H$d_q+5d$aFpSu=F%{d@j0)#7kJj)MFW;{txf^71xM zt+$N#mEF8{a8UkdvmkL@(AMa(erRfG<>HiSLf~s=-0>g z0N>N+AgB~8D#zBwBp~ZPO5;_*`}youkR*}K2&H$fX(7cVFU$%{aP*6lsg3|*+DA7* zGCEgz?dqiH91glNxaz@zjT-&cZ)D(DJ0&8oeu;~#?_+bXrv@o2c;srP*J~h4B(mZJ zb5M(BBMExyC4~Hb6j2Wkl{u?ST;(;DEP;XvnuGHKy+DeoOYA&!r7s2S9FS`jvodt< znO_bPzb&G~Tk+C!aC#ZBEylgkD3IzfsU@QGp{!{lAR@v;<)7PP#`4YMXxpg@XM-nn zIpiVoWf3_NnP|_Sk=RHbDAI}1F@Vm!w%tUCcMw?OH3lgBZxQog77HeJDa9pN=IDh5 zpuQ}QQEb4`i)icKW`}rDD#QUjSkj|SX$c>s=)rTr`J5uXst$!Mw@;cxlXSn3&3?Fb zKrL;|J6_U`OrEoAZ6mM5J!5DGW8WkbF5N|)MKXf{MBhOSJte@PfCjlp33%PB zs|e3bLu`$6pyl8%`3&{|{Cps}3|qo<;<mLmDc|Ih^kx`}Exb6oJ>zhPOmNI*!o{?S0ihhtsuSla!jh8K47Pvoi=K9uG7 zv=qvJp=#%K2oL)xr<#Yq2du1FfZy#r>Kgz5e+J=y8z01#=V&aRC#6hUl4LaXeslJ#v4tR73U zvM-D_SyE093voD;LuVe!W&r>4Qnh)Zn{Cx^fH*PFC6>19>3nwwrQ}A1KTenWj=KWX zUn5pEv3p*-f4&>ETr3%e-o5%|^%|(yzu_y9&^Wvk7ch4EBg3&B-TxHw2U|W!_R3{D zMqRI5cJ=CajfUQJ`oA@p%p$~j?aq__eD@|dgW*h%10;)9*?(X}z-50z6Q-*CDi~7% zoVWAw6e5-{#QP2=qqPhT@f85JVtk2W9CtYFvTc2;S(-qAgtHBOR3n;62M&DR3><(G zshTQ%NPDX|M#>o*6S7MK`Pb1wKi-tt*_J{CQFG`HHTr3@fdos<*nJ>e6^L=FB($_` zlbF_DmM*GJW@CZeQnV@VHFzCp%uJ;B=Copaysk}rL!tYN@=jmnV6sBdngtmv+E5O- zNd0}u@j=Ae-1ZU}h(>&eMfdo0wf%IU%Q^PzGw6*^aNmXph%(a;&Wk%zc(?r8c3 z*X$03G06Hw$5K5q{yh=cp0dVpm*x@`!g?8nowDS6hk1>29bi?HCxY)u-P}A|yjCaD zF-iW53Ax~C*rQbZlUunNeVTLCEI*50CVeHCAuQF;y4~+tgFb6Gyt*A3!qi~h-mmU} zPcn_+jwxO09V8#;WKhOU_IrVaT0qg(olLBV+IDLF))<=Lcf;L2Vdz~&o{YGQJ7QF@ zA`g`Kx8g@?^o#jDsE}=RVT4DP0qX~j##aKQRlW)~fxzfrZc)nLMGaQevSNc`F|zUu zFEhchroFyaJ<&3?rNAcxuL0RDTPmS|obm}*z%%oWi>EE)h|9{zQC?3rvuI7!c4W+L ze#{mbueQb!&CYe9T<8B~IX&E7oHjh$2+#Fc`nO_-!ceWq>aN9Su&5H0$5%NC#L$4H>$Esww}}j|g$&Rt!zuA2=IJ8ZR!-$1mtYaT?1=v;>U3#rcrjUe1tEP0jwM-Y6<8(Z&>U>< zccvPxJFs|M>Ik@=_^P9KwZk12bCDPuh0U4^KwhC2rzdVOIE}GH@2OS2&kJwwDQdlC;Ed{;fs17B+1fPm zB<4^}erny~ZeaZt*P_+-fV{gF%gv{NL#GoH-eV z#{ecq$G|;UckknHxl9(%PaIQf+pnsM)DE%5#xA;C`Y_y)cvn;fJy^t%V?4rii?8N}T*h|_+8ph%Wbo`V`5(H0cZamn=$nP=8&o?@Oe+@|qws;zi)K!d= zO?#Q3In2W`bBJ1^k9_?dcQktkWec{4dkUaA<+ZHX^jh* z>pEU1XS(>6!WS)_*t)0T4Nn^6X46@SWP)1(WD8Cpq&VE2zjE=&S#`i6F?xo!EC0v|TQBATPv(m2fvH5yU^bGKO59IVs1=8X(pK8yurXdqGyL2hU#vD;5UtLUW{i zI)Z8e3_etE`xpZ^lmwI`gRxdrL)U5|d^^)yqf8ZavR#u)ZMxa8oT|2mosp$X#FTS==^?W_vMy>x;|aiVf+3BdxaowCzgvl`YB@y3_}GTpE*) z=Q6z2NZ~Xtfh|&AVjiL0miNGYbUHp7bFqi*%GVHweQ-4+gG6Xx0G_Y|>xx)X+q&)$ zGGyj@^q8-3Bl%7&^4OZwYA|QZt}tFXkmyCMAXkRYawA0KKfrC`$RLMl#ou<4<42WC z;ASAfRyo;cPb?!NQ!y{llV9^Pe)sRuaJjm!XBnJI3F=xPP zt^_uM46A+OwB0lgA6l#^P?Fg-i#_hMXJU(f_VgRjG=4FeaJ}SE2#Q27WtR+oR%Ye0 zqnC)P(Uci0arB~S%JG@ss!FVNT&mE20xyCUXPKA42>BECw_+I9E=4hvg~OiykylT| zc+<$RS~tqm9ao)l)@6E1CRJ>uEN9B>qDv3~ z0~<#ubCK1NoS-*r>SqNs){!M@QXW32(*ooH=Zo3*AsZK`bOyFvyW5?Y#Wj!)8W~7c z>aaF*JKQQ*_WuSqJ0h~l{?(9wB#&)>*Y;{JgnORMW=1ZlMq4RA+jeM^@h+c*^^*ss z(`^77+QbU_$I*l2?#P>((7O=0{;!mLLPnfXW2}aR=rI6EK(@b72=4UkB$TImMiOIk zAD3pDaS+ns61c8E`{2^UoM5;<(g4{562*q`Czij#?*#bUMB=ktd5C}vZ1OV*gn|~@ zE+vPVMu_-?{CGyJUE|=|Mt5vxd?1ZY^*&`)i}& z&TBs2$_+aM&zwWrLD(6Vjb=~i1zGG7XD!f4kL1&I>k#Rr({0DVtG04L$QS1>zJaf@ z==DQes-*F0R+n523vWhC=chFo3K@3d-Po^4^&m%7t!UL&PuX<`ManMjMQSsfH^Qx| z)_aQ+6PpGAm5t3+i&=n=?$sF&~3 z%XcOA7js^W$D9Lxk#jA~|#+L$v z=z;-#t<^RjkdNZsJXXe+D4rpA$$&U&P0P4>a$NPjrl9;X>@MoCeG5FWRa^NWSsU|j%19o@_ym^F zMvV>g7fB_Qprd1sx;zI}P1%T7VT~eVxuWehcuqBJdAT#My!>k&rS`=sC@YqkTS#=D zRT3St>VwR&dUxoR8O)v+4)IUIDgHE?2m2hnE$m|wR8H}ja?LrhnYAGcII&%tNMCt3 zy60yL=?SNF#ElmjhXjYYGM{=l=Nlt2fhNCPOo%k&+u4RDBmvj-E^%2gJpAu!6mB!M)cq;-;9bVwd8+|_h8lzfb4{Y`TWGV?fB51yx+$E1(u zYzYDS_iJ$MQ&5lnyg+?diOIeF*}8JPD;*lrqn<#bz-^!0AhDRqAKoeh8)-LOcuT^q z3ke1oLHJkPMnsnMsL)xP7UxT>TTL&=DfuAPpq zcxgDkg^-dNA5n2y40u@EiAIGRy=0g6X4yI}POLB0 zl%UDvXS*k<3G|k*v?mxSZS{0dA)uH1CXtUEVAt8n*w-=5oO!M5Li=N`p9Y`Bm5F71 zTE(hrTy3Tbs8sW$BeXZPi7B0}(2mj*ev4Gi*N{f#mPUmZiVOO7ZhuO7ShYmYD0z}n z-@&-QQmJA7sB9DDf|o$row$2aEfLg!EA;IAc3W}LJxNhU7~vcb7MMk20n9a@H!s%M9HQeyzm2 zW7Mc6avTI36$zj@#{NM)$!+QC+>9!_x^|%rYxpLk)vMqLx zOwr#wA*!fWD+G4%S*gJi?&WQs84=x13nuel6IKOTX|ae-d$L;anF)xINbzeVWkN+{ z#Dt&N-S-AHn0&Oe&072W(#19ERvc$84ZvD-?#=k%pa`6af9MJmq|NM>y(ObT6by2b z)A@}Jc4z&+L9YOhfkDrVy}w;MjUR!6n!M#U?hWFlE9RWI82?lJJ^Qj0)Eu&55mXJaM8dvg*$NNSHZ~ltc3ZQ*cNBoq${WzdpODkX=kP)OQ5-1U3&Ba6 z&{0}p%3-_K_hq36H*{{c{5&ZQPU)@d@!{*8FreR)*~#}gy_AU{CW3FIG5ebwTFI7bgR@EcR?aSDL)2)wh&%*fEB%T-T5|YQ5Y)u82Q#Yvd~AZDUwk zXnv#9u@Z{+C$rD9&Kn-2gTS%?oR=~jphcLqof#_~R__Z>q&s1pwg$J<8u6*#6hj&3_?41%A$X69ekEXoQ=}sCbz}_L-{kHmWbc4C_%44!>CvO$1WbRC>Hqxf;n@=$f0vNd$nv zIqP36wGwgj`JRUtsnN3^EFW_H)FGA7FLY(y?rirTJ%0R=QKWnFOxczNt-0?an~UyoMB*D+|eXgE7AXF z6j0eJa5@DQ_GEy>;pU`gOE1r{a@11I?-E?Z|41Hmk8?)6azi0@Hc=09f$ddSmvzxy zT6W!wfa)vGg(j8i=wxs@Y#C^wJo{<-LD=jH$J=l*>6XqN6q8Sj5D~)1efMG{vO5wy z;TW!lSo+IRVM%w~L`OA)fV!@cP*573jtnZOs|u_fmOI&di98;}Ov62j=`hs;5$8Q8 zTcNNM*JaZaDF0(5n%8s`H3^h#Y3s+S=bf_MCRgn`ou?@{1a}5`X!AA4VH^1n6??VH z9ibgwtJ32`Tavq`LjP7;*rkUyF^E6apj0T5$~y5J@U;iK6D=Q$n-zFuWfIUj0Wli* zfx8h6?44Oa0eABpHN%y+|DVd%tE69m!GNO^9t8lbx?O;m&nQvmdU8Bc+WEdv(~*ZX4z)AY*-rRBBIdjLu;LmG$y~|Ey5MX0G0wL z>G}j@ka@2HRy#a$Hy4J3$TT^Mt8VpLMeqbv6L`$E_wG0jn=|wbQSjcby4D}c zRbgWTUyVn1Jp4$10_V?qNib8(9u*T=wu7Pj7k@p-zJ%c>;K5V&rnxhHJVy>QOInJx zgVzm463W5-N=#!!m1VTpKgwTD70mRM_bkd&I!c#rP_eO8T;MGi54|eYPt!XnIb9Qe zUbYo_KcquYCvYSPPI*ijpe*6=ED8&I&jp(%3|PQsFnpTH(xhu^1*)8FUrk<);2;O~ z6ae`f(I~p#++V2+2>{ooH}D?h5KT84~m z(whgmtS*%PSEk(15)Gmr-f#|+>+C980SrIG>XqZU_H4Kpr;<}c^bsQyb7jiVZ(hvu zUJ)iwU}6TaB2S+n-izwm{6VW|Poz?7BJV6@}TwFLZz)%{%AeM;q8OU01)J z?x*ScAH9_`=iSBT{$3q5Atv__{$M_}-{0=`TIeXSt{JiQZ;BvsxaSqm+?0ZeQ;$yf znCSisk?37rRq`p}t&enm#3SE-*i9uO)TEYbGXPfg*Cuozz?H)4SZOnWI+S(~1`;g@ zB0qo1g=2sQu}ZA)*^4926Hufl7VsWZ7vbcNdEg7T189HcRt833axBH3O^$r4;J7c= zC@@a_Cw|vjeUE=G*lJmz#MAZ(k$1SP|GRr{E~|ek zkWAT8wFedD!mtGsWZZIbP|!HZX#k0V5iy&?`|gd!L{SpqUG_Bz zuU7iTaIXpYdFSsHi?wzovWQrYk&|yZsGPBJ9D>jW8T929;NZ^k zVOiWrRHh~kVlgYRt2T}u*tBIXC1lnKt6@wP5G{bc0;U7;hN|1=03(Wrby~_QxS3K5(TC2$2dVc$5(g$f7SexKui*Jp$T)PXT^CYJ@uiFXTRkccmm@i!#^@^$w?EAf1_3}S$~N(M;HAx zq97g#Zi4PGHgmpc+%t(NDI46}Kkg|N+8w!{5r0Ezi;+>AO zSERj%4x2d26V4l*-02f<#^2QafRT+ZcTc)4hKbWYOM+88wF=+heurdC1Crkvr z*?RYqXpMP6t4hN)7dBuTC549k$IS?d{x?Q1N?I=ep<>>-xAb^#>dydMr2t+8#nD@Z z0`n}htGog=PRb{~V!6bwor5&LL74x*1sEY#e}A{bu)fM=^G%)9Pax1I$fF?E5 zYxG; zb9p}V5Bz@F45QxVEAQ+~;(=?`ZOY!bOD|63buUVq;)4$#QtW*=Tlt?G#2^8GLg(Bp z$H#$&AZMm0)wEo$#Qs=$oKP<|(t%#2uR;65@H3U(t+Gq*I^W&*Wm!1r6Li_*`}@h4 z#7cmImMmU24weuW-}00;;kacElfhJL$wm8l_1kmCS?{;d3d@YWF9+hl zK)ldt&ZfVrME`E>@gxBTR(aq<19aU=tw{Gt9$PL{g8LswMBJQypZ1X%{YwAX%uJUl zU!XtOF2_;29i_Yv)wG!oN|=ZU0H0x$@I-b@8E*%;SxF*z=abp}*jz#r@5iKJAf6Ou zc7FKvRl5Sz5*!$Gtb+s==PTZPzW`rtN~xSc=;#Xgdg9QShrVolHjeF@ zBwxWoVMw>FuRpq~@ZhdzlD$MXPrT4*V@U31|JBP)fO+~X>``ndGvhIPYv$6IfMk|A zc9wBr=GsDbF^3qb3)AcXk8(NrbJpYzm+u%l_WEwdEzckUu}S3wmlXF=02doPS}End zzatOlY(6Q_M*RA6ZPM1-MmkDW=iys#o-A0OpGR%xxU`2+T1<{a1fQ(ZRXToHAgY}N7xwE1;aGv2itdiw#sKG| zrT=OBR#B37W z3PFtJp>OCIWyxY~saefuq``z`{_(%q+|Dp$se>9|+bZE5u7+^!gbs$wJ|CF90>)FU zG{fopVpY6_+(_$(6fpYKb^Hzp6F*&%a%DEZv?Gf^(V?R}b?K()Pk5}9Fxd6i1-2CL z!3=mGocOvB(n)gyP+IfpL}CY3*ujUBRJVil{WMemT26;5@Rwwu*b$!`^lf6$!LuH> zcR@a~J?;x9zuMmyCye|>R7?&E^U_{3G*oIn*mL>nflVdN^J z{#S2qi787kT{w;r)HdICO6MWal-D4~z-{r=swmtm+*R)RPYa+8bMEPttqV&mQOX=nRETP_ z0b$%C&(cRx*p#{-Btej5aR5Ra&YNIn8Xqd9Gy3l;>Z!G}pn!mQaxjYaQel%KQ&2n5 z_?)6F!qF`e^?*%-H{LqLa7m(o|0}({l9~pzBLS$Mu&a=Y24^^9>}@^^oNg194{Y*H z=}De;AL%M|i8$$kjk{QO-$bNkRq}@yDe#hC)k&=`DG2F$3B`~UN&imv4rK#zS7X?` zv$Y0N4#&f&)70s{Ql)7gTo^;}r?i1GECoxLb|rp|#lw2b{s1y>DtezqY`prt9t6!X z<|{n83XHQnX^ol-m3pfYJ^?)!wqju8>P+@9#cw^@qIYHs5<_4r5C!NB4}qO_RJuY%I&K~=xEz&C25Eckjp$tULCuiKcQ-R_;Gc2XvsE-7gpZP%l1 zrt9=Z?y6PMV1`;)Ib*dTQnW-OCMlm*%ML}vZLS@TS>fQR8-M3q?P)x)QxyN7Dr^oX z>Rwh=<4rWdcyXU&U9hvg2|n-?KsND++)8vG3;+2c($Bsjc8@JsI4U1qW+zEZhIXu` zu|+F3%HG;Obe>!*g*lGkleL#^8e1lZCSmimaJ$j_G+)N3eK3*&1kcfp^^HytGd1YH z^PTpD0)i1OJwMGDp*aWd4g@cOeNB)qzu)H>-t-F`eRz-%7r3VH1e+&2;y+{@x3awG z2iUwK?0=}}J!TYaEmaxw1gBpY80!wny8C}-#no=X50(+EAm|?~aU&ORgPB`ef=t;0 zoBq&YX0hQ9C&i+dDATl5&+u1UAfje8y+6B9BX~5~P;zJ(l{l*5`M(IZDGVPd|0B~M zpT%6v`sV;a`D{HAocy5oR-W3127EY~=Yh`|GieB=Wsze_nLwwPAT@V3jDLw6QWDpF zaB=%Ao%)FZQ0q+|j;8H#=UI%}MOt&*)%Y7|Od?py8fS0DO^Z659?)^~kjfZMQ(F{1 zf6(>iP$Sdr>l-aWIitCxdPgG;(8`d)U_ZgQR%g&!rfK_U%27q6Wxc@Q;Jjd4`#?Gm z2OVPau~vIu55W9tbyK7!p`D);+C@2VBZ<2TK8it;Ad)E-I` z`h}j*0}f)lVJo6#_$Qo4^v(zJpqaDy|DcQ5z6QEPro3mdeQ{hVCh2AaKp=*rWi)Qx=B^L zOO;wLX*o=M!D1)Zof-k6}$@xfwcK ze!->AQLi+NFOuW?^fxkbV7r=#jN>_|K?IH04-u^{Qb2{uQfJ>h6abSoq>|ZV7TBjG zBm>hkKWN02#Zn(Z&zU(6*Ai~m``@)B_C(>hT|+_wPdoCLn%2)U^RHhSp@Zwr7PkO< zhr`FGSuQRAXMAvb+qPn$hg2WeZPnO#ZQ(VSJ4ga#u6FF{cJ(kblhs-C+aab4R9E)x zbow+eiqKH-N}?6kg;t|cg+I*ojgVZ^6U8e4>Jto+7Yxx=1LB%qMu&~jl#>i$Px*So zMG-I9trh&Z`#OFZK@H6o{Vn{4_+LUfiCQxc_JN;NZ}&YmRW zKF%-V2oh)mm3@$7tq*jOv05~>1oC%HWU83=zRJgb__T6dg2$}em`=diS3#4-Tfyx_ zWy2cjmlT-Pr?zd@8J2G_{}(s`jm64_s$r$ROF5iJewfs%&aR{bDV)kWkX~pSir)f#Uq?tijYW&>2-9qowG#^NAGL?x)9NuAeQg;dsxbOngW?A+y8td}6U1hS(fW zMhXhSsJs_oe{n~Xk%-fWGO_yg827@$GjsU}Dwz>XC&$GYJo9QcpU}u9u;bDlYcoWY z_C~b?c6V7}r-orI^wC=W z+}!B`R!S^2j-_cm$bMw9&do50HpA?tt$L(TT#Cp%6NVvC*Vc((gqcJ z!laWKK@15jM*X?gd1e%qcT`^zD5CO=XE5I5Y?vbdO1-llyzkw3ylvNV)55}ziy%{m zq@RjjM|-OQojxr2-(Q>4DA9?U7&|q5RT{Wv6Q+EH&me08j|?OF8{RcqkncJEIol&8 zq8R4}YCdE3dFSO@3Y-CbfdpUYB&YN0u~h8>CID1u1z)S{D;Vi!Ho?cTj$-AS-s9Fq zS?#oCNZ}_W*n-{WXL@0vA)u73Kk5KjKiyB6AjCX-16jx8ouLwOJ zM(az?pGk1s4E?4yZkW2ar1HYK-{?8Ru6A+b$8!f+UHVRD4NVJ8k)RwJHaU|l_{(7r z?IT2pgw?MXYX@k{)s_~DxwPx9kf;80S||@9e?CWSSywSgRtqZ6d3Z;4P?tK(%0>CU ztcsA%`Av;YRT4_&Zhk#{Z=T?FxOK0}YMQgCsAr3q2^)Qr>Ia#uPF41DN|sG|f-JqK zilHT8K=9_7aoL{5KNP*V;+MZ|zHP;iFoJCr`ns!ZuwwyQ^ZnWfJ6T@8IVp~n@6~xc zNS~2g>=c?G+f00uLmE(hT(r$U^|dU}){2jSh7e6C$kP)%SKez}-i6;QvwY&IoJ=y1 z0lzO3rA?h_Z`zFQSQH)Zw%*LIMj$;Y=dUh5?rhHF2}E;oOZoyTXiaUtaL zQpFC}^eWVZvOc7N=HqE!@#+JY(}5j_MO43xKq=9OzXtU;N$?{+pLifBQSAV~FM+)U-Rm-%!;P2I zvuRp}V(`=G!${5ExDj?C1P5rkKDZTQ-ZF&sc+U33vSJFL8U{(g*#)VhTO#xy~q(Y+|`^xu8P2DSsECq1*F|l{g zZ7Fjx^WO+2X7G4TiD0J$&{{FZ$D)&>mWX)5l2r4fUGyL2*SgME8_UZJC zl{Bf+^4<$1f;c!B-t9vI2hB_9I%+pvoXo}XAGD^4x2Am3Iv#fmT%MudN)hH;ThitpQ^x}6%GgvF_G*()@BT*x4?&SjjBr+nCy5d_6W=MqLD9usy&Pf zAFc~fQtzEM|3mLx+Isl&7LIBDS9=XSG@=M!dKlF9Z$8N~jkvhRbNr}|4NEVX3A&|r z=BQ=Wo(+8EeuvE|8!Z@w{nqJez_vxI3c0T{($Y&!{9l`8qf0&e3M2r3L|QG%n7?3x04kJ^4wRXv|EYnwWZ1o&(((Ez)jbNH2WhR4=h0<3uCXhe8= zYY`y`2aozhmIZO7xLiaBT#zqSpvsV-uRmP15^OIt z#wL-M8#2E}g}ixY@tbd>s}D+zsoue6mKQiaP}NNlZ_3IVOJ0{X)|+}lvSb55Psj%A zr$qKqL6-L-ZC+f6yqng8NZ)V7Y)fA^LmG@4+NO&+;XULX=vAG`x;JqObKe*c;PAC8 z)Bcxw*sBexCL+Rkfsckn$JX#%qk(%+_k{+ECThBLjxhHV8bI5R=iR*C=sQW)IraTN zWv<$_SZh(dOuy&4_KE;ffw3gMK4E*5VKl!;t_%qvB8sY_YHQb zYt@X96_^hKx!76b&YAg5f!0@96V#H?q#*Vmsz50eX92UsFo`~r=OAT}o(px}8^?^Q`#>F4z!WR40W)d$qhVLF1+e4xv!=|g6FT<^?1awoG-_X|a?(u4;Ns!f2vBCjE+8uf^ zAgsuH!0zC^6%tG*uQ^y>r&jD{7amH_SKXe za1An{2AH}l-QTK%B;>`o^3?GVZdL|u6;0 z_z0!6xOhz@6uBDx1y;-lU9N{^3EBdfDr1S3QP8R^V)X;8Mw+%=H^;c1Sbs0(_={VH zNh~+yFg;XIicI7liiSq9A5NSz3!H97#eCxbTBeHZTj03hXoQ+N`;MnN7)opbm`+%6 z3~5{1=}GKn%3UtJEhLtjK`~H3Qyrvm{;nVIZB&d2gndS~j2Fod>o@0SMSjUeOMN9A0iC9o48ucFa z?a&l{381-zzQ@-G59Gim5f=E5eywn?ydlf<6m?RBSMRXwO~Z6N@zI96?82N2?d9{V zChceA77});i@yx2ZeGj~%tnNJVi#&=!7${G_xGXzXzL_C{x3`2* zPj>j(P1PO$6Wif&F=LxsECYtc$W|%W)5SX>BG`*J?t!rLt;#S1S_6&aH47a8Sp|LM zzI+Hd{J%o)f!v|$ItUGW;1sB__KrGU3KLB8lX#!m^hW(t&;Nu|+))G3rlp{_4wyLk zu3A=pek%g;Q45pt+(>`2WSLyF#2!^tcOa4MO$c}EHY(8TIEGx}u%S8Wo(A4H0l6Rx z%-$HS3;t_Lw_j>eeq)U~wM#5!wJ3S-7Ae5YUNWk$JOaber_!uC13l;aA`nunQNYR-@W{h>GS1#E}>e$M#> z1bYo+!en^^avNa)N%K>oH%=vQQ~{6ihv?zI%ldppI{+}S&J(V3a;aPYvC!~3I!FH) zXmp!ORjZK;;+yh*Hm~FSQI-up$!x#}i~*tva5jham94b-q+oRwLYLiog;DCAY7=nO zy$^XT8eGi)<&i=;dJHf5b1`3~!p`!>CrF_HY7sE$lCwaWuZGMIYYr=YZbu?%9(127 z<%1S|+ET(r%62@pq#0={I z4H6edgZBqDK3$_Jv}ap&lPr^U$rboPNAu;CEjF~=G&$vQhh-a4

Bsq&QG4>18IIq@mGq`YN>q! z->OyCaQ)?F%R?oOj}>E2J7sj%j#(xOeqqRnirU1YZ7B?<26B1Z&U|rxKA}PUgXYqb zgSItC)_q!czyv=bpKuI@5sgL=a%NxgMJWtx9r+@)1_eA(PP|yX) z#sy`Yvs3YYPTBI&5l&Sy6oDOJSKl>W1h`=;U!?OX*eH6DS&U%ElAKRfdy`;N+Jep? zO5=y#V4W-SSGnnG-xg)nnEJsY7Y_6GleIZRZVlR(WoeS|ua9)&?Ox-W3#Hj*PFO{m zJ@T#7URV2=Tbn-?1xQd;TRLvEDOmW^1d`vrTecEnVYo00vZjnL0gDa%>BW04wLfR) zw8co$B`G|9o_T!;;0(InKE#t6XIGrWDTC1yL<;qBKzM_jZ70wC@d$&c7t)gO&G7G2 z39i_1eby*wx(1HQ78W>-;!7(87@(!;?<=--D!U|W0aQd5koskm#~W(m@gM6%@RY`( zkee(;vuJAeakik4h@iDd5GXW}VuU95K6IM!jY=y4XCrM7Vjulb&?hPeZWjK`-$r3J z-qSLZB>kHOy-f6VO5?XjZb!!={I?_3D`m_BVeVa1HVobnD_?z|u4e*gV!hsyfe1|r z>1jkXxH!Ig#XSsXE6}|{lfWxHDch$w#nJ>uk{0x)TG?AeB_7bnC?Z9*Ocy!|k8C*5 z9b|v9hX!ESzTauqIvUiHV-h}P#bd1O)is6M8)_-{gJfK8SRSw70e$?vDWUTSAtUOL zgg3jVy))Xrj4u221oAvwb6?DMklTK$Y=Fs673#*mKx^GU3ffY79y3jwNnJwY2L#@O zGED8WVD!VEy`=Qut?BPqF&94Fkh8qK$Eg?NT}j3$x+;66-`zs@h!oX%#l_(ahLK1> zEPr-F1oZ}*dXVEjV}0TPtCqEAK=raN$isUhZ6H#$iGI0k~56*%5{=_w);K7@5eKlK0X+6Vo@< zk#~i%vNk=CbZ%N2{IIxnlt2D2b_K3fzz^}* zW2%1ZmUVhjk7a^ba=QwOycc7No`j>+;^19Xl#t2`4N4=sUbbg(o-9HfKFcvapBIR@ zR^-rS1aQ9gh-6uHkPm=r`!YpTC>XJ!eaP-!wVr|h1*&3WTqLwcLl=-j78)rSb@xW@ z=A~0^v5L%B~1Trx~RsX*6wUi4_VBoIU^j`S3Ck9^}MyFukw~`1uKYtoSl~HFVXv)`F<; zwcMI7(zdI7qILTK5#mPtLS>#=CTwM)50O}W{l3M?u*Zv#U~Y!t-WE$0vA<-+OAX~IxDQ|qbYLD(6}I4!#xZVJcnCD0dxB#evk+lz&!g2Gr`M?gMe-jHjad-&y<-PBEykt$p>aM~DGzwp#xs0eCp~2*mmPwYPIaWjNy2yDNTBWPT?wgS zDkcouTes@zVc}hCyk>3TSC}g-f(yxb%a;1InOC%-u55ZzFoEW23bv#p#Kbx$;N<|I9p#z7lnW#UuQ>U8us9|5+2KjT)chs1eM?r@x9^HrfYRD@H0xs{f= z%B_P^`S{{jL3X$65OvqS0fPr`eHFrUvnKz^EWKAJG=EY*`k7##wOv`zHeh=$ss43( z)Swg9)Xo{8m-Dyy1iJ?~1iogt1I5MR#$Q{v_!Q^sCM0l0*m?SLl0{hA93Y*s1hkjH zlBioCnjDy$m^XhVm@8`hm=+=Up&Bk)lF6umk72CmKzo{Oj%FY^<%AsNxhWYYJ#HD{ zl1tyhRRzpU?he|9tB`-lHQcAsn)Hky8|^g}vsI5FD8C(pmEkR|J!%U;+=uf+O6@+- zgf+@d$R#`?sL)-Hl4V>i(#w;OiaZmelj_-dt@dySau(IZ_`xQ$kvICP(T~L7V@ENe zCDZHnS91;B-~+wKHI6fega0XC)F?;yba#n5ln`yP7j~9GV5(^Fb@jb3;{qN;7R5_> zZ|*sf-ioS7*I!%#T0Nla9pCOO=xww|%xmkpP6kk8ur5-6Z)2zk^(>mWL+F^GCYZYx zm9SxYhr(B*SD8+=ThS6t4k+wN)YA1^GLb9;&2B>Ge~&h@xXM=-mxd+LDA1w_;oAc@ z3I!OE1Br1Sj5(Tm61m;&_|)kLX(Sl7BtJ$bV|!WsAxW}NOFg$`;xgvRhyf>TQcFMw zYk62cMNkz7Y~o(Qvb^c)C{aY!`Pg2+t4?5Rm`Jsc%S1LydchAXzMRWE3E0ks4f0i$ zt_DkEY}hN%+U&H4hI*dwaJu4+uc?%Umkd#xUx%R$327hk*Mm1<$d71Jd~Mi(5%?HeCfIWtzV<9#l*0g&IBP?Fp;`t?w544S1unpMT)9LGmSNE;d8v)7< zKoLy@vcRdP9Zu7PD45owAcK9T8#l);C)aKyd-gh`&m8CE!J_?(w`1O8c%W!IYBA>- zY~=LHHkoUA^vuTI3qosbTeQ1n*zVTdQ6D`y@*O|y&o>m)B<)gA4H*cw@7dANM*Xg;*N-mPZ_m}lzba&Ja!SRx#Ar&kXij5%cOc%* zWA@2-ZG)T&qlHPgydXavz%Px6O~!s{a>jOC5Bg>TZyl{=F92o58Wppos-Bn2@<4d6 zvhC5l?CKGb2Aq7psd`WCXR7ho3=q*LNvEB!fI3LO6RR}B!I6#|nTq3IDDV}DQqmJG zciMv&t0k8fr4npxYcx3;h#4`9m1SdbT*X?pi=4I}od9tW?(xOucT_^MWp_NgzryA3 zpO@jMI#*YdGJB&Lc4g{Um3^kh&guEyql)m_l%OuqF3xrBAko$J0xS7Y|Je@WXY$Bp z0G1E@v*RO{E7ba{g4`8OZ!QnNdeYO#K>M<7+OJWk7w8KMY?C)W{6iKVwNHj9XQ;!6 zCK^aVY7Bk^7>DI`JfQfc;Tclif~FxV@cKExW|8<2?C6cNzIQ&X%qG?k2zC@zqvn>R%TY8~wrvCe`GMFb7Ek zW&!{1Q$cBXzu-X~E}bMo+o*jXfz~2_Dg4BToV6>g1Opk)Z@rFy?*UdshE~VOh(}}l zQg}C17(z(WIJwsuq}EwZTuTB#lwcyqGu%-3gGyE=WdEPM3&AKEBV{Z{C7^ka#x^bh zReO8VJJ{}NIi*jN-+3pb9dfRn*Kuw*K4WOqA4m)!I5FNuP>SMb>fWECn_E(z+fPrV zC^L0BC5z(HT=B94TfdP1$isn4MEAR}CNPOQX6_*1MWLBccs=>&KCj>Zi1J9DJ+o5G z?*`;R01q;Ll31IXd9ESJm!Uq)D*{J{L@{1W<_G01F{&P&`ibrKP#ZLpVvLuCC?i#= zk-q8y2_%Ogw9rS=s0ftWautg|zIjx1*S+S!7y*EwGRTum)Gz$Ftr)XUN|j=A4LQ3d zh8R_TSRgiFUkWJQG3BXI+uuHx1Ld~T@l@+M$tXemm98xzk0B87+Gs&#c8szEYR(~G zn@O^~Hy|fokriN;m_HKVz@q_AUuVU+^@+&uot7_m7V@X6YXJ`tE*S1HJf&W9lwYpJ ze@dzo3P7*d3t1KG_tQ$k6KHDTfrR*-L6oeR);)#*#>!od}Ggp&JlN_p(h1`qm9@2~D zF%!v^S%OLSQArdoznStxgovK`HjU8%uPX$4yAmp$Vk{fI+OdN6B(5wDZ2Q5K8n_E!MK#OAOVFy~%xgi>k zZ*ZeAGE#d&C3?DT^#%++{7ngazwB@KuY0^pkU~1dT~dsErNkK7|F;h`|H`;!9H$)d zNhp6L!*!kYr4ZJWZ!ufy%h21HcnjF0SKbGeeq^!*is&d$%T5BhC|Yv0)Cn~U(yJLOS^+v9(TY(WfV ztUqJS-dNCUrmo`3K*eU6OWIU^M*KDlUb>sriy8-w!nu^)OO#W;;+U|Ro&0n=E(c07{;-q|Y184hg@y$ISWVpK~x1on`|+YL}h&nC{X z*>26=crRk0FT%$`XW1PF;?ywhSM6sZIoq0<)$ch?D@_}M$kw2-2*^KQND&N@KwJ>i z512@n6c^eWvALId@-0yLBIm+;3rJCF+<=c}L1j-f1|c7!lc86&l;u?Dj9S7&D(%zE zu*3;wb!6VoE>+owm7OqP2j_w%9W7ewxM^p4J4VVFv&yp z7@qaL$N~ym|-!wr$hw*NMp6%`0&!9UTZJv>U z1JgNZ8+5Tm)1t4agb1W>)-N+)qP8PKxYe*Ju+}zCE554?PcK!|1UN?!sQB77$Fgf8FzvL!H@Hxps*$MfIr2CDR;(5Z81n~)m(K3 z=~qcC8sPXs$?*v7xm;mQW$WtgZK#DgER0;KQundq!5tw5G`3=rhH?cB=`@UN&1kYI zAL_D-GXb?2z4}V*Rim$om87#6cL_1XKUJygjgQkJMWSIbxIafek-b>h5h5+HGtp!X zm^vti9*nE?D=v0T@U$RP@m^A|1w&mZD0c`1LhHW6&i6lPtt#V76CY<%MHZHW^LBGlPK08$YI*jMozEKN^MQ8V>37 z53X>q8sT$S{`hg-@sLk6NHduuRc24{Xoo~Z6QRaP7NgQ~bMUYiv&wK7+)x(_WIton zZO$%XFw9ZTsCYf6{c{NZut%#8{n-f$?ST>S;+1ZZu>@I&CUj+vm3wlU)_4@VW7UcrC-h*^_Ak6kM zsh1AHOe8xT-cM}-?7DbAYsrV1mP(#zK>Fp9KTwM8@qfAtI~V~8#HY|NpQag`$Z~EkAc;h*HWEtPq1MRYz3O? zSW+S;x^7_DQo*Sut1!+}t*7EmZbFD6KcNk=gwS^GP#kF7Ezzusplct47Z~VEr4FNJx zM>7p;0EwT>$xIg%6=h|3o2K3G&ROl$cj8=#aejIH(hXA-2i>I&`@}J~DbE6@0<*|j zD1rtS5=s&hiF{!2#SX0zbg{YJDcUO+WPgZktmXooW+tC^Rtk#M;BK^%gkKfmreneD zJH=mOp6}^KdwAr)Frvd&97VRW#dmr_ z9kNs1JSc(54)Ii`$}C35cCZ!nnt^pmIsX$@iM|c`+ZaL~3OS-bvjbTaI76gzQQ#W~ zs~A9))ReR{g(;&))^;A!BK;C_GRbmk`P@oUa1g5UZ=58wzck@oJxh*AOt{ zjyqG@^$?E~P4RrEErk(|5nNQXh^y-c!}kb2){=1Dd$1sg;uPm2tGacC4+>jgkT z%*+Xn+{M?-x7mBOOD_tG?@90bwdA^+vw2t~c2og`lfR6(^6yS`K|4US$r++BiOJt#?4nTFkS~VS z|Iha7z>9SMGT8N1H{Gk52_Qt~-GsEwMQ@-%WpMw$F@Eir{Q?OI7J#dwvJZ#P7;@*tfF|0c!RgC^hh>_s2%4}WN*3f z1>^nQ9W2MAE2WwA787XwaUUDk+To~11}CY%7Ya-=zW^_r!03c;+#o?G>L^B0pm4St zy9?8EHiN_O#s(e~bMR1b{!+NKQkp;hy>Uso z?=H8YA#oFFRSg5;<+S{@_Q&w99krwQ?6%#9V;q-vNu* z+Y^vwonI#?h@_E?duWavK7Ous%Ni^lAofIek{z=)8m?=maS(Q-%+ioCzsw-QhTn+p zC{qiQOd3+knWp+KSm)X~r9C{+m8oiy#)7$nEf7Ek(2#`v}5VMWw!#&_g^-6=BRfCu4>XSZ<;*$i}^ z8xR3bHrwr4*6gxDa7-aYfvi+9vc!{Q+!*;mx$nV1HR!PwP450rlb?<7>do!|W!}Je zepxDhzeB&9ZmhHoMb`bbG?0g;>|v0#a-3I1c5*WM?+nr^r%fGny^*@+8882ic7~*5 z#vTV)%PC69y|jo9NzD9(P`tnobSD2 zMJxFLHo%KrIZDGH&6Od`&l9OR@R)UT7cC5Y)r3tep`vC3GXz3uEk^as<}c}$af=s0%rU> zU+F1$`+66aQs(myU?&R98pplA@FGT?9KO}ok^MZfR~%&ZtP4!=55~8>fWD5?V|b`3}!e5^D9WJ z9#^}zhuK$frwtxR8p$N0W(gw&fl{LJXolaBLkI=C2i+S$_5B9eJ9$%zaerUEi z+-LH>kkjFB9TOR|2E_&Nw(5QGgMhZ4IK3f!&P1zDYihlN1r{Vplg(|}S`6a735Ri} z()F>{1d67kBW#}g=mZVzgm)tFNLha^q;L&pDqNnR>T^t7@N`}2(J(uA5?rWS67>?E zuFy!cim0YOil4V$JKcwh$=U0dHL+wVs^(^C|Hsu1FFY4v1LjL&u6ulsO$6>7L^S_o z&RA#4mtVBd1&9)>+@E@Xw9Gz+&MBl$Zg>lxQr{eeY0%#@k>>G;a=k+{GQ;`6BeflV zgyOZd=eCWuFAm`NVt%!ar84$bxGdk@Dr=zzaoc$RRP^N{sMx_a)Um_IwQUBy?$Ib% z26B}U`4Q_Cw^5lyYxc*c1d397$(09Pg4gj`Lm~#7XiD?moX;b!4IvZ>17S*1!vKPr za1T#a!9c3&GeADeq0MLEax0oeqdppi)nR|>Oe@s|9=6xC@;<`R_%b1e(i-ZU>@7L~ n000001M +#endif +#include +#ifndef _WIN32 +#include +#endif // !_WIN32 +#include +#include +#include +#include +#include +#ifdef _WIN32 +#include +#endif // _WIN32 +#include + +#ifdef _WIN32 +#define vsnprintf _vsnprintf +#endif // _WIN32 + +/* Backwards compatibility with headers shipped with Visual Studio 2005 and + * earlier. */ +#ifdef _WIN32 +WINBASEAPI BOOL WINAPI IsDebuggerPresent(VOID); +#endif // _WIN32 + +// Size of guard bytes around dynamically allocated blocks. +#define MALLOC_GUARD_SIZE 16 +// Pattern used to initialize guard blocks. +#define MALLOC_GUARD_PATTERN 0xEF +// Pattern used to initialize memory allocated with test_malloc(). +#define MALLOC_ALLOC_PATTERN 0xBA +#define MALLOC_FREE_PATTERN 0xCD +// Alignment of allocated blocks. NOTE: This must be base2. +#define MALLOC_ALIGNMENT sizeof(size_t) + +// Printf formatting for source code locations. +#define SOURCE_LOCATION_FORMAT "%s:%d" + +// Calculates the number of elements in an array. +#define ARRAY_LENGTH(x) (sizeof(x) / sizeof((x)[0])) + +// Declare and initialize the pointer member of ValuePointer variable name +// with ptr. +#define declare_initialize_value_pointer_pointer(name, ptr) \ + ValuePointer name ; \ + name.value = 0; \ + name.pointer = (void*)(ptr) + +// Declare and initialize the value member of ValuePointer variable name +// with val. +#define declare_initialize_value_pointer_value(name, val) \ + ValuePointer name ; \ + name.value = val + +// Cast a LargestIntegralType to pointer_type via a ValuePointer. +#define cast_largest_integral_type_to_pointer( \ + pointer_type, largest_integral_type) \ + ((pointer_type)((ValuePointer*)&(largest_integral_type))->pointer) + +// Used to cast LargetIntegralType to void* and vice versa. +typedef union ValuePointer { + LargestIntegralType value; + void *pointer; +} ValuePointer; + +// Doubly linked list node. +typedef struct ListNode { + const void *value; + int refcount; + struct ListNode *next; + struct ListNode *prev; +} ListNode; + +// Debug information for malloc(). +typedef struct MallocBlockInfo { + void* block; // Address of the block returned by malloc(). + size_t allocated_size; // Total size of the allocated block. + size_t size; // Request block size. + SourceLocation location; // Where the block was allocated. + ListNode node; // Node within list of all allocated blocks. +} MallocBlockInfo; + +// State of each test. +typedef struct TestState { + const ListNode *check_point; // Check point of the test if there's a + // setup function. + void *state; // State associated with the test. +} TestState; + +// Determines whether two values are the same. +typedef int (*EqualityFunction)(const void *left, const void *right); + +// Value of a symbol and the place it was declared. +typedef struct SymbolValue { + SourceLocation location; + LargestIntegralType value; +} SymbolValue; + +/* Contains a list of values for a symbol. + * NOTE: Each structure referenced by symbol_values_list_head must have a + * SourceLocation as its' first member. + */ +typedef struct SymbolMapValue { + const char *symbol_name; + ListNode symbol_values_list_head; +} SymbolMapValue; + +// Used by list_free() to deallocate values referenced by list nodes. +typedef void (*CleanupListValue)(const void *value, void *cleanup_value_data); + +// Structure used to check the range of integer types. +typedef struct CheckIntegerRange { + CheckParameterEvent event; + LargestIntegralType minimum; + LargestIntegralType maximum; +} CheckIntegerRange; + +// Structure used to check whether an integer value is in a set. +typedef struct CheckIntegerSet { + CheckParameterEvent event; + const LargestIntegralType *set; + size_t size_of_set; +} CheckIntegerSet; + +/* Used to check whether a parameter matches the area of memory referenced by + * this structure. */ +typedef struct CheckMemoryData { + CheckParameterEvent event; + const void *memory; + size_t size; +} CheckMemoryData; + +static ListNode* list_initialize(ListNode * const node); +static ListNode* list_add(ListNode * const head, ListNode *new_node); +static ListNode* list_add_value(ListNode * const head, const void *value, + const int count); +static ListNode* list_remove( + ListNode * const node, const CleanupListValue cleanup_value, + void * const cleanup_value_data); +static void list_remove_free( + ListNode * const node, const CleanupListValue cleanup_value, + void * const cleanup_value_data); +static int list_empty(const ListNode * const head); +static int list_find( + ListNode * const head, const void *value, + const EqualityFunction equal_func, ListNode **output); +static int list_first(ListNode * const head, ListNode **output); +static ListNode* list_free( + ListNode * const head, const CleanupListValue cleanup_value, + void * const cleanup_value_data); + +static void add_symbol_value( + ListNode * const symbol_map_head, const char * const symbol_names[], + const size_t number_of_symbol_names, const void* value, const int count); +static int get_symbol_value( + ListNode * const symbol_map_head, const char * const symbol_names[], + const size_t number_of_symbol_names, void **output); +static void free_value(const void *value, void *cleanup_value_data); +static void free_symbol_map_value( + const void *value, void *cleanup_value_data); +static void remove_always_return_values(ListNode * const map_head, + const size_t number_of_symbol_names); +static int check_for_leftover_values( + const ListNode * const map_head, const char * const error_message, + const size_t number_of_symbol_names); +// This must be called at the beginning of a test to initialize some data +// structures. +static void initialize_testing(const char *test_name); +// This must be called at the end of a test to free() allocated structures. +static void teardown_testing(const char *test_name); +static void fail_if_leftover_values(const char *test_name); + + +// Keeps track of the calling context returned by setenv() so that the fail() +// method can jump out of a test. +static jmp_buf global_run_test_env; +static int global_running_test = 0; + +// Keeps track of the calling context returned by setenv() so that +// mock_assert() can optionally jump back to expect_assert_failure(). +jmp_buf global_expect_assert_env; +const char *global_last_failed_assert = NULL; +int global_expecting_assert = 0; + +// Keeps a map of the values that functions will have to return to provide +// mocked interfaces. +static ListNode global_function_result_map_head; +// Location of the last mock value returned was declared. +static SourceLocation global_last_mock_value_location; + +/* Keeps a map of the values that functions expect as parameters to their + * mocked interfaces. */ +static ListNode global_function_parameter_map_head; +// Location of last parameter value checked was declared. +static SourceLocation global_last_parameter_location; + +// List of all currently allocated blocks. +static ListNode global_allocated_blocks; + +#ifndef _WIN32 +// Signals caught by exception_handler(). +static const int exception_signals[] = { + SIGFPE, + SIGILL, + SIGSEGV, + SIGBUS, + SIGSYS, +}; + +// Default signal functions that should be restored after a test is complete. +typedef void (*SignalFunction)(int signal); +static SignalFunction default_signal_functions[ + ARRAY_LENGTH(exception_signals)]; + +#else // _WIN32 + +// The default exception filter. +static LPTOP_LEVEL_EXCEPTION_FILTER previous_exception_filter; + +// Fatal exceptions. +typedef struct ExceptionCodeInfo { + DWORD code; + const char* description; +} ExceptionCodeInfo; + +#define EXCEPTION_CODE_INFO(exception_code) {exception_code, #exception_code} + +static const ExceptionCodeInfo exception_codes[] = { + EXCEPTION_CODE_INFO(EXCEPTION_ACCESS_VIOLATION), + EXCEPTION_CODE_INFO(EXCEPTION_ARRAY_BOUNDS_EXCEEDED), + EXCEPTION_CODE_INFO(EXCEPTION_DATATYPE_MISALIGNMENT), + EXCEPTION_CODE_INFO(EXCEPTION_FLT_DENORMAL_OPERAND), + EXCEPTION_CODE_INFO(EXCEPTION_FLT_DIVIDE_BY_ZERO), + EXCEPTION_CODE_INFO(EXCEPTION_FLT_INEXACT_RESULT), + EXCEPTION_CODE_INFO(EXCEPTION_FLT_INVALID_OPERATION), + EXCEPTION_CODE_INFO(EXCEPTION_FLT_OVERFLOW), + EXCEPTION_CODE_INFO(EXCEPTION_FLT_STACK_CHECK), + EXCEPTION_CODE_INFO(EXCEPTION_FLT_UNDERFLOW), + EXCEPTION_CODE_INFO(EXCEPTION_GUARD_PAGE), + EXCEPTION_CODE_INFO(EXCEPTION_ILLEGAL_INSTRUCTION), + EXCEPTION_CODE_INFO(EXCEPTION_INT_DIVIDE_BY_ZERO), + EXCEPTION_CODE_INFO(EXCEPTION_INT_OVERFLOW), + EXCEPTION_CODE_INFO(EXCEPTION_INVALID_DISPOSITION), + EXCEPTION_CODE_INFO(EXCEPTION_INVALID_HANDLE), + EXCEPTION_CODE_INFO(EXCEPTION_IN_PAGE_ERROR), + EXCEPTION_CODE_INFO(EXCEPTION_NONCONTINUABLE_EXCEPTION), + EXCEPTION_CODE_INFO(EXCEPTION_PRIV_INSTRUCTION), + EXCEPTION_CODE_INFO(EXCEPTION_STACK_OVERFLOW), +}; +#endif // !_WIN32 + + +// Exit the currently executing test. +static void exit_test(const int quit_application) { + if (global_running_test) { + longjmp(global_run_test_env, 1); + } else if (quit_application) { + exit(-1); + } +} + + +// Initialize a SourceLocation structure. +static void initialize_source_location(SourceLocation * const location) { + assert_true(location); + location->file = NULL; + location->line = 0; +} + + +// Determine whether a source location is currently set. +static int source_location_is_set(const SourceLocation * const location) { + assert_true(location); + return location->file && location->line; +} + + +// Set a source location. +static void set_source_location( + SourceLocation * const location, const char * const file, + const int line) { + assert_true(location); + location->file = file; + location->line = line; +} + + +// Create function results and expected parameter lists. +void initialize_testing(const char *test_name) { + list_initialize(&global_function_result_map_head); + initialize_source_location(&global_last_mock_value_location); + list_initialize(&global_function_parameter_map_head); + initialize_source_location(&global_last_parameter_location); +} + + +static void fail_if_leftover_values(const char *test_name) { + int error_occurred = 0; + remove_always_return_values(&global_function_result_map_head, 1); + if (check_for_leftover_values( + &global_function_result_map_head, + "%s() has remaining non-returned values.\n", 1)) { + error_occurred = 1; + } + + remove_always_return_values(&global_function_parameter_map_head, 2); + if (check_for_leftover_values( + &global_function_parameter_map_head, + "%s parameter still has values that haven't been checked.\n", 2)) { + error_occurred = 1; + } + if (error_occurred) { + exit_test(1); + } +} + + +void teardown_testing(const char *test_name) { + list_free(&global_function_result_map_head, free_symbol_map_value, + (void*)0); + initialize_source_location(&global_last_mock_value_location); + list_free(&global_function_parameter_map_head, free_symbol_map_value, + (void*)1); + initialize_source_location(&global_last_parameter_location); +} + +// Initialize a list node. +static ListNode* list_initialize(ListNode * const node) { + node->value = NULL; + node->next = node; + node->prev = node; + node->refcount = 1; + return node; +} + + +/* Adds a value at the tail of a given list. + * The node referencing the value is allocated from the heap. */ +static ListNode* list_add_value(ListNode * const head, const void *value, + const int refcount) { + ListNode * const new_node = (ListNode*)malloc(sizeof(ListNode)); + assert_true(head); + assert_true(value); + new_node->value = value; + new_node->refcount = refcount; + return list_add(head, new_node); +} + + +// Add new_node to the end of the list. +static ListNode* list_add(ListNode * const head, ListNode *new_node) { + assert_true(head); + assert_true(new_node); + new_node->next = head; + new_node->prev = head->prev; + head->prev->next = new_node; + head->prev = new_node; + return new_node; +} + + +// Remove a node from a list. +static ListNode* list_remove( + ListNode * const node, const CleanupListValue cleanup_value, + void * const cleanup_value_data) { + assert_true(node); + node->prev->next = node->next; + node->next->prev = node->prev; + if (cleanup_value) { + cleanup_value(node->value, cleanup_value_data); + } + return node; +} + + +/* Remove a list node from a list and free the node. */ +static void list_remove_free( + ListNode * const node, const CleanupListValue cleanup_value, + void * const cleanup_value_data) { + assert_true(node); + free(list_remove(node, cleanup_value, cleanup_value_data)); +} + + +/* Frees memory kept by a linked list + * The cleanup_value function is called for every "value" field of nodes in the + * list, except for the head. In addition to each list value, + * cleanup_value_data is passed to each call to cleanup_value. The head + * of the list is not deallocated. + */ +static ListNode* list_free( + ListNode * const head, const CleanupListValue cleanup_value, + void * const cleanup_value_data) { + assert_true(head); + while (!list_empty(head)) { + list_remove_free(head->next, cleanup_value, cleanup_value_data); + } + return head; +} + + +// Determine whether a list is empty. +static int list_empty(const ListNode * const head) { + assert_true(head); + return head->next == head; +} + + +/* Find a value in the list using the equal_func to compare each node with the + * value. + */ +static int list_find(ListNode * const head, const void *value, + const EqualityFunction equal_func, ListNode **output) { + ListNode *current; + assert_true(head); + for (current = head->next; current != head; current = current->next) { + if (equal_func(current->value, value)) { + *output = current; + return 1; + } + } + return 0; +} + +// Returns the first node of a list +static int list_first(ListNode * const head, ListNode **output) { + ListNode *target_node; + assert_true(head); + if (list_empty(head)) { + return 0; + } + target_node = head->next; + *output = target_node; + return 1; +} + + +// Deallocate a value referenced by a list. +static void free_value(const void *value, void *cleanup_value_data) { + assert_true(value); + free((void*)value); +} + + +// Releases memory associated to a symbol_map_value. +static void free_symbol_map_value(const void *value, + void *cleanup_value_data) { + SymbolMapValue * const map_value = (SymbolMapValue*)value; + assert_true(value); + list_free(&map_value->symbol_values_list_head, + cleanup_value_data ? free_symbol_map_value : free_value, + (void *)((char *) cleanup_value_data - 1)); + free(map_value); +} + + +/* Determine whether a symbol name referenced by a symbol_map_value + * matches the specified function name. */ +static int symbol_names_match(const void *map_value, const void *symbol) { + return !strcmp(((SymbolMapValue*)map_value)->symbol_name, + (const char*)symbol); +} + + +/* Adds a value to the queue of values associated with the given + * hierarchy of symbols. It's assumed value is allocated from the heap. + */ +static void add_symbol_value(ListNode * const symbol_map_head, + const char * const symbol_names[], + const size_t number_of_symbol_names, + const void* value, const int refcount) { + const char* symbol_name; + ListNode *target_node; + SymbolMapValue *target_map_value; + assert_true(symbol_map_head); + assert_true(symbol_names); + assert_true(number_of_symbol_names); + symbol_name = symbol_names[0]; + + if (!list_find(symbol_map_head, symbol_name, symbol_names_match, + &target_node)) { + SymbolMapValue * const new_symbol_map_value = + malloc(sizeof(*new_symbol_map_value)); + new_symbol_map_value->symbol_name = symbol_name; + list_initialize(&new_symbol_map_value->symbol_values_list_head); + target_node = list_add_value(symbol_map_head, new_symbol_map_value, + 1); + } + + target_map_value = (SymbolMapValue*)target_node->value; + if (number_of_symbol_names == 1) { + list_add_value(&target_map_value->symbol_values_list_head, + value, refcount); + } else { + add_symbol_value(&target_map_value->symbol_values_list_head, + &symbol_names[1], number_of_symbol_names - 1, value, + refcount); + } +} + + +/* Gets the next value associated with the given hierarchy of symbols. + * The value is returned as an output parameter with the function returning the + * node's old refcount value if a value is found, 0 otherwise. + * This means that a return value of 1 indicates the node was just removed from + * the list. + */ +static int get_symbol_value( + ListNode * const head, const char * const symbol_names[], + const size_t number_of_symbol_names, void **output) { + const char* symbol_name; + ListNode *target_node; + assert_true(head); + assert_true(symbol_names); + assert_true(number_of_symbol_names); + assert_true(output); + symbol_name = symbol_names[0]; + + if (list_find(head, symbol_name, symbol_names_match, &target_node)) { + SymbolMapValue *map_value; + ListNode *child_list; + int return_value = 0; + assert_true(target_node); + assert_true(target_node->value); + + map_value = (SymbolMapValue*)target_node->value; + child_list = &map_value->symbol_values_list_head; + + if (number_of_symbol_names == 1) { + ListNode *value_node = NULL; + return_value = list_first(child_list, &value_node); + assert_true(return_value); + *output = (void*) value_node->value; + return_value = value_node->refcount; + if (--value_node->refcount == 0) { + list_remove_free(value_node, NULL, NULL); + } + } else { + return_value = get_symbol_value( + child_list, &symbol_names[1], number_of_symbol_names - 1, + output); + } + if (list_empty(child_list)) { + list_remove_free(target_node, free_symbol_map_value, (void*)0); + } + return return_value; + } else { + print_error("No entries for symbol %s.\n", symbol_name); + } + return 0; +} + + +/* Traverse down a tree of symbol values and remove the first symbol value + * in each branch that has a refcount < -1 (i.e should always be returned + * and has been returned at least once). + */ +static void remove_always_return_values(ListNode * const map_head, + const size_t number_of_symbol_names) { + ListNode *current; + assert_true(map_head); + assert_true(number_of_symbol_names); + current = map_head->next; + while (current != map_head) { + SymbolMapValue * const value = (SymbolMapValue*)current->value; + ListNode * const next = current->next; + ListNode *child_list; + assert_true(value); + child_list = &value->symbol_values_list_head; + + if (!list_empty(child_list)) { + if (number_of_symbol_names == 1) { + ListNode * const child_node = child_list->next; + // If this item has been returned more than once, free it. + if (child_node->refcount < -1) { + list_remove_free(child_node, free_value, NULL); + } + } else { + remove_always_return_values(child_list, + number_of_symbol_names - 1); + } + } + + if (list_empty(child_list)) { + list_remove_free(current, free_value, NULL); + } + current = next; + } +} + +/* Checks if there are any leftover values set up by the test that were never + * retrieved through execution, and fail the test if that is the case. + */ +static int check_for_leftover_values( + const ListNode * const map_head, const char * const error_message, + const size_t number_of_symbol_names) { + const ListNode *current; + int symbols_with_leftover_values = 0; + assert_true(map_head); + assert_true(number_of_symbol_names); + + for (current = map_head->next; current != map_head; + current = current->next) { + const SymbolMapValue * const value = + (SymbolMapValue*)current->value; + const ListNode *child_list; + assert_true(value); + child_list = &value->symbol_values_list_head; + + if (!list_empty(child_list)) { + if (number_of_symbol_names == 1) { + const ListNode *child_node; + print_error(error_message, value->symbol_name); + print_error(" Remaining item(s) declared at...\n"); + + for (child_node = child_list->next; child_node != child_list; + child_node = child_node->next) { + const SourceLocation * const location = child_node->value; + print_error(" " SOURCE_LOCATION_FORMAT "\n", + location->file, location->line); + } + } else { + print_error("%s.", value->symbol_name); + check_for_leftover_values(child_list, error_message, + number_of_symbol_names - 1); + } + symbols_with_leftover_values ++; + } + } + return symbols_with_leftover_values; +} + + +// Get the next return value for the specified mock function. +LargestIntegralType _mock(const char * const function, const char* const file, + const int line) { + void *result; + const int rc = get_symbol_value(&global_function_result_map_head, + &function, 1, &result); + if (rc) { + SymbolValue * const symbol = (SymbolValue*)result; + const LargestIntegralType value = symbol->value; + global_last_mock_value_location = symbol->location; + if (rc == 1) { + free(symbol); + } + return value; + } else { + print_error("ERROR: " SOURCE_LOCATION_FORMAT " - Could not get value " + "to mock function %s\n", file, line, function); + if (source_location_is_set(&global_last_mock_value_location)) { + print_error("Previously returned mock value was declared at " + SOURCE_LOCATION_FORMAT "\n", + global_last_mock_value_location.file, + global_last_mock_value_location.line); + } else { + print_error("There were no previously returned mock values for " + "this test.\n"); + } + exit_test(1); + } + return 0; +} + + +// Add a return value for the specified mock function name. +void _will_return(const char * const function_name, const char * const file, + const int line, const LargestIntegralType value, + const int count) { + SymbolValue * const return_value = malloc(sizeof(*return_value)); + assert_true(count > 0 || count == -1); + return_value->value = value; + set_source_location(&return_value->location, file, line); + add_symbol_value(&global_function_result_map_head, &function_name, 1, + return_value, count); +} + + +/* Add a custom parameter checking function. If the event parameter is NULL + * the event structure is allocated internally by this function. If event + * parameter is provided it must be allocated on the heap and doesn't need to + * be deallocated by the caller. + */ +void _expect_check( + const char* const function, const char* const parameter, + const char* const file, const int line, + const CheckParameterValue check_function, + const LargestIntegralType check_data, + CheckParameterEvent * const event, const int count) { + CheckParameterEvent * const check = + event ? event : malloc(sizeof(*check)); + const char* symbols[] = {function, parameter}; + check->parameter_name = parameter; + check->check_value = check_function; + check->check_value_data = check_data; + set_source_location(&check->location, file, line); + add_symbol_value(&global_function_parameter_map_head, symbols, 2, check, + count); +} + + +/* Returns 1 if the specified values are equal. If the values are not equal + * an error is displayed and 0 is returned. */ +static int values_equal_display_error(const LargestIntegralType left, + const LargestIntegralType right) { + const int equal = left == right; + if (!equal) { + print_error(LargestIntegralTypePrintfFormat " != " + LargestIntegralTypePrintfFormat "\n", left, right); + } + return equal; +} + +/* Returns 1 if the specified values are not equal. If the values are equal + * an error is displayed and 0 is returned. */ +static int values_not_equal_display_error(const LargestIntegralType left, + const LargestIntegralType right) { + const int not_equal = left != right; + if (!not_equal) { + print_error(LargestIntegralTypePrintfFormat " == " + LargestIntegralTypePrintfFormat "\n", left, right); + } + return not_equal; +} + + +/* Determine whether value is contained within check_integer_set. + * If invert is 0 and the value is in the set 1 is returned, otherwise 0 is + * returned and an error is displayed. If invert is 1 and the value is not + * in the set 1 is returned, otherwise 0 is returned and an error is + * displayed. */ +static int value_in_set_display_error( + const LargestIntegralType value, + const CheckIntegerSet * const check_integer_set, const int invert) { + int succeeded = invert; + assert_true(check_integer_set); + { + const LargestIntegralType * const set = check_integer_set->set; + const size_t size_of_set = check_integer_set->size_of_set; + size_t i; + for (i = 0; i < size_of_set; i++) { + if (set[i] == value) { + // If invert = 0 and item is found, succeeded = 1. + // If invert = 1 and item is found, succeeded = 0. + succeeded = !succeeded; + break; + } + } + if (succeeded) { + return 1; + } + print_error("%d is %sin the set (", value, invert ? "" : "not "); + for (i = 0; i < size_of_set; i++) { + print_error("%d, ", set[i]); + } + print_error(")\n"); + } + return 0; +} + + +/* Determine whether a value is within the specified range. If the value is + * within the specified range 1 is returned. If the value isn't within the + * specified range an error is displayed and 0 is returned. */ +static int integer_in_range_display_error( + const LargestIntegralType value, const LargestIntegralType range_min, + const LargestIntegralType range_max) { + if (value >= range_min && value <= range_max) { + return 1; + } + print_error("%d is not within the range %d-%d\n", value, range_min, + range_max); + return 0; +} + + +/* Determine whether a value is within the specified range. If the value + * is not within the range 1 is returned. If the value is within the + * specified range an error is displayed and zero is returned. */ +static int integer_not_in_range_display_error( + const LargestIntegralType value, const LargestIntegralType range_min, + const LargestIntegralType range_max) { + if (value < range_min || value > range_max) { + return 1; + } + print_error("%d is within the range %d-%d\n", value, range_min, + range_max); + return 0; +} + + +/* Determine whether the specified strings are equal. If the strings are equal + * 1 is returned. If they're not equal an error is displayed and 0 is + * returned. */ +static int string_equal_display_error( + const char * const left, const char * const right) { + if (strcmp(left, right) == 0) { + return 1; + } + print_error("\"%s\" != \"%s\"\n", left, right); + return 0; +} + + +/* Determine whether the specified strings are equal. If the strings are not + * equal 1 is returned. If they're not equal an error is displayed and 0 is + * returned */ +static int string_not_equal_display_error( + const char * const left, const char * const right) { + if (strcmp(left, right) != 0) { + return 1; + } + print_error("\"%s\" == \"%s\"\n", left, right); + return 0; +} + + +/* Determine whether the specified areas of memory are equal. If they're equal + * 1 is returned otherwise an error is displayed and 0 is returned. */ +static int memory_equal_display_error(const char* const a, const char* const b, + const size_t size) { + int differences = 0; + size_t i; + for (i = 0; i < size; i++) { + const char l = a[i]; + const char r = b[i]; + if (l != r) { + print_error("difference at offset %d 0x%02x 0x%02x\n", i, l, r); + differences ++; + } + } + if (differences) { + print_error("%d bytes of 0x%08x and 0x%08x differ\n", differences, + a, b); + return 0; + } + return 1; +} + + +/* Determine whether the specified areas of memory are not equal. If they're + * not equal 1 is returned otherwise an error is displayed and 0 is + * returned. */ +static int memory_not_equal_display_error( + const char* const a, const char* const b, const size_t size) { + int same = 0; + size_t i; + for (i = 0; i < size; i++) { + const char l = a[i]; + const char r = b[i]; + if (l == r) { + same ++; + } + } + if (same == size) { + print_error("%d bytes of 0x%08x and 0x%08x the same\n", same, + a, b); + return 0; + } + return 1; +} + + +// CheckParameterValue callback to check whether a value is within a set. +static int check_in_set(const LargestIntegralType value, + const LargestIntegralType check_value_data) { + return value_in_set_display_error(value, + cast_largest_integral_type_to_pointer(CheckIntegerSet*, + check_value_data), 0); +} + + +// CheckParameterValue callback to check whether a value isn't within a set. +static int check_not_in_set(const LargestIntegralType value, + const LargestIntegralType check_value_data) { + return value_in_set_display_error(value, + cast_largest_integral_type_to_pointer(CheckIntegerSet*, + check_value_data), 1); +} + + +/* Create the callback data for check_in_set() or check_not_in_set() and + * register a check event. */ +static void expect_set( + const char* const function, const char* const parameter, + const char* const file, const int line, + const LargestIntegralType values[], const size_t number_of_values, + const CheckParameterValue check_function, const int count) { + CheckIntegerSet * const check_integer_set = + malloc(sizeof(*check_integer_set) + + (sizeof(values[0]) * number_of_values)); + LargestIntegralType * const set = (LargestIntegralType*)( + check_integer_set + 1); + declare_initialize_value_pointer_pointer(check_data, check_integer_set); + assert_true(values); + assert_true(number_of_values); + memcpy(set, values, number_of_values * sizeof(values[0])); + check_integer_set->set = set; + _expect_check( + function, parameter, file, line, check_function, + check_data.value, &check_integer_set->event, count); +} + + +// Add an event to check whether a value is in a set. +void _expect_in_set( + const char* const function, const char* const parameter, + const char* const file, const int line, + const LargestIntegralType values[], const size_t number_of_values, + const int count) { + expect_set(function, parameter, file, line, values, number_of_values, + check_in_set, count); +} + + +// Add an event to check whether a value isn't in a set. +void _expect_not_in_set( + const char* const function, const char* const parameter, + const char* const file, const int line, + const LargestIntegralType values[], const size_t number_of_values, + const int count) { + expect_set(function, parameter, file, line, values, number_of_values, + check_not_in_set, count); +} + + +// CheckParameterValue callback to check whether a value is within a range. +static int check_in_range(const LargestIntegralType value, + const LargestIntegralType check_value_data) { + CheckIntegerRange * const check_integer_range = + cast_largest_integral_type_to_pointer(CheckIntegerRange*, + check_value_data); + assert_true(check_integer_range); + return integer_in_range_display_error(value, check_integer_range->minimum, + check_integer_range->maximum); +} + + +// CheckParameterValue callback to check whether a value is not within a range. +static int check_not_in_range(const LargestIntegralType value, + const LargestIntegralType check_value_data) { + CheckIntegerRange * const check_integer_range = + cast_largest_integral_type_to_pointer(CheckIntegerRange*, + check_value_data); + assert_true(check_integer_range); + return integer_not_in_range_display_error( + value, check_integer_range->minimum, check_integer_range->maximum); +} + + +/* Create the callback data for check_in_range() or check_not_in_range() and + * register a check event. */ +static void expect_range( + const char* const function, const char* const parameter, + const char* const file, const int line, + const LargestIntegralType minimum, const LargestIntegralType maximum, + const CheckParameterValue check_function, const int count) { + CheckIntegerRange * const check_integer_range = + malloc(sizeof(*check_integer_range)); + declare_initialize_value_pointer_pointer(check_data, check_integer_range); + check_integer_range->minimum = minimum; + check_integer_range->maximum = maximum; + _expect_check(function, parameter, file, line, check_function, + check_data.value, &check_integer_range->event, count); +} + + +// Add an event to determine whether a parameter is within a range. +void _expect_in_range( + const char* const function, const char* const parameter, + const char* const file, const int line, + const LargestIntegralType minimum, const LargestIntegralType maximum, + const int count) { + expect_range(function, parameter, file, line, minimum, maximum, + check_in_range, count); +} + + +// Add an event to determine whether a parameter is not within a range. +void _expect_not_in_range( + const char* const function, const char* const parameter, + const char* const file, const int line, + const LargestIntegralType minimum, const LargestIntegralType maximum, + const int count) { + expect_range(function, parameter, file, line, minimum, maximum, + check_not_in_range, count); +} + + +/* CheckParameterValue callback to check whether a value is equal to an + * expected value. */ +static int check_value(const LargestIntegralType value, + const LargestIntegralType check_value_data) { + return values_equal_display_error(value, check_value_data); +} + + +// Add an event to check a parameter equals an expected value. +void _expect_value( + const char* const function, const char* const parameter, + const char* const file, const int line, + const LargestIntegralType value, const int count) { + _expect_check(function, parameter, file, line, check_value, value, NULL, + count); +} + + +/* CheckParameterValue callback to check whether a value is not equal to an + * expected value. */ +static int check_not_value(const LargestIntegralType value, + const LargestIntegralType check_value_data) { + return values_not_equal_display_error(value, check_value_data); +} + + +// Add an event to check a parameter is not equal to an expected value. +void _expect_not_value( + const char* const function, const char* const parameter, + const char* const file, const int line, + const LargestIntegralType value, const int count) { + _expect_check(function, parameter, file, line, check_not_value, value, + NULL, count); +} + + +// CheckParameterValue callback to check whether a parameter equals a string. +static int check_string(const LargestIntegralType value, + const LargestIntegralType check_value_data) { + return string_equal_display_error( + cast_largest_integral_type_to_pointer(char*, value), + cast_largest_integral_type_to_pointer(char*, check_value_data)); +} + + +// Add an event to check whether a parameter is equal to a string. +void _expect_string( + const char* const function, const char* const parameter, + const char* const file, const int line, const char* string, + const int count) { + declare_initialize_value_pointer_pointer(string_pointer, (char*)string); + _expect_check(function, parameter, file, line, check_string, + string_pointer.value, NULL, count); +} + + +/* CheckParameterValue callback to check whether a parameter is not equals to + * a string. */ +static int check_not_string(const LargestIntegralType value, + const LargestIntegralType check_value_data) { + return string_not_equal_display_error( + cast_largest_integral_type_to_pointer(char*, value), + cast_largest_integral_type_to_pointer(char*, check_value_data)); +} + + +// Add an event to check whether a parameter is not equal to a string. +void _expect_not_string( + const char* const function, const char* const parameter, + const char* const file, const int line, const char* string, + const int count) { + declare_initialize_value_pointer_pointer(string_pointer, (char*)string); + _expect_check(function, parameter, file, line, check_not_string, + string_pointer.value, NULL, count); +} + +/* CheckParameterValue callback to check whether a parameter equals an area of + * memory. */ +static int check_memory(const LargestIntegralType value, + const LargestIntegralType check_value_data) { + CheckMemoryData * const check = cast_largest_integral_type_to_pointer( + CheckMemoryData*, check_value_data); + assert_true(check); + return memory_equal_display_error( + cast_largest_integral_type_to_pointer(void*, value), + check->memory, check->size); +} + + +/* Create the callback data for check_memory() or check_not_memory() and + * register a check event. */ +static void expect_memory_setup( + const char* const function, const char* const parameter, + const char* const file, const int line, + const void * const memory, const size_t size, + const CheckParameterValue check_function, const int count) { + CheckMemoryData * const check_data = malloc(sizeof(*check_data) + size); + void * const mem = (void*)(check_data + 1); + declare_initialize_value_pointer_pointer(check_data_pointer, check_data); + assert_true(memory); + assert_true(size); + memcpy(mem, memory, size); + check_data->memory = mem; + check_data->size = size; + _expect_check(function, parameter, file, line, check_function, + check_data_pointer.value, &check_data->event, count); +} + + +// Add an event to check whether a parameter matches an area of memory. +void _expect_memory( + const char* const function, const char* const parameter, + const char* const file, const int line, const void* const memory, + const size_t size, const int count) { + expect_memory_setup(function, parameter, file, line, memory, size, + check_memory, count); +} + + +/* CheckParameterValue callback to check whether a parameter is not equal to + * an area of memory. */ +static int check_not_memory(const LargestIntegralType value, + const LargestIntegralType check_value_data) { + CheckMemoryData * const check = cast_largest_integral_type_to_pointer( + CheckMemoryData*, check_value_data); + assert_true(check); + return memory_not_equal_display_error( + cast_largest_integral_type_to_pointer(void*, value), check->memory, + check->size); +} + + +// Add an event to check whether a parameter doesn't match an area of memory. +void _expect_not_memory( + const char* const function, const char* const parameter, + const char* const file, const int line, const void* const memory, + const size_t size, const int count) { + expect_memory_setup(function, parameter, file, line, memory, size, + check_not_memory, count); +} + + +// CheckParameterValue callback that always returns 1. +static int check_any(const LargestIntegralType value, + const LargestIntegralType check_value_data) { + return 1; +} + + +// Add an event to allow any value for a parameter. +void _expect_any( + const char* const function, const char* const parameter, + const char* const file, const int line, const int count) { + _expect_check(function, parameter, file, line, check_any, 0, NULL, + count); +} + + +void _check_expected( + const char * const function_name, const char * const parameter_name, + const char* file, const int line, const LargestIntegralType value) { + void *result; + const char* symbols[] = {function_name, parameter_name}; + const int rc = get_symbol_value(&global_function_parameter_map_head, + symbols, 2, &result); + if (rc) { + CheckParameterEvent * const check = (CheckParameterEvent*)result; + int check_succeeded; + global_last_parameter_location = check->location; + check_succeeded = check->check_value(value, check->check_value_data); + if (rc == 1) { + free(check); + } + if (!check_succeeded) { + print_error("ERROR: Check of parameter %s, function %s failed\n" + "Expected parameter declared at " + SOURCE_LOCATION_FORMAT "\n", + parameter_name, function_name, + global_last_parameter_location.file, + global_last_parameter_location.line); + _fail(file, line); + } + } else { + print_error("ERROR: " SOURCE_LOCATION_FORMAT " - Could not get value " + "to check parameter %s of function %s\n", file, line, + parameter_name, function_name); + if (source_location_is_set(&global_last_parameter_location)) { + print_error("Previously declared parameter value was declared at " + SOURCE_LOCATION_FORMAT "\n", + global_last_parameter_location.file, + global_last_parameter_location.line); + } else { + print_error("There were no previously declared parameter values " + "for this test.\n"); + } + exit_test(1); + } +} + + + +/* Replacement for assert. */ +void mock_assert(const int result, const char* const expression, + const char* const file, const int line) { + if (!result) { + if (global_expecting_assert) { + global_last_failed_assert = expression; + longjmp(global_expect_assert_env, result); + } else { + print_error("ASSERT: %s\n", expression); + _fail(file, line); + } + } +} + + +void _assert_true(const LargestIntegralType result, + const char * const expression, + const char * const file, const int line) { + if (!result) { + print_error("%s\n", expression); + _fail(file, line); + } +} + +void _assert_int_equal( + const LargestIntegralType a, const LargestIntegralType b, + const char * const file, const int line) { + if (!values_equal_display_error(a, b)) { + _fail(file, line); + } +} + + +void _assert_int_not_equal( + const LargestIntegralType a, const LargestIntegralType b, + const char * const file, const int line) { + if (!values_not_equal_display_error(a, b)) { + _fail(file, line); + } +} + + +void _assert_string_equal(const char * const a, const char * const b, + const char * const file, const int line) { + if (!string_equal_display_error(a, b)) { + _fail(file, line); + } +} + + +void _assert_string_not_equal(const char * const a, const char * const b, + const char *file, const int line) { + if (!string_not_equal_display_error(a, b)) { + _fail(file, line); + } +} + + +void _assert_memory_equal(const void * const a, const void * const b, + const size_t size, const char* const file, + const int line) { + if (!memory_equal_display_error((const char*)a, (const char*)b, size)) { + _fail(file, line); + } +} + + +void _assert_memory_not_equal(const void * const a, const void * const b, + const size_t size, const char* const file, + const int line) { + if (!memory_not_equal_display_error((const char*)a, (const char*)b, + size)) { + _fail(file, line); + } +} + + +void _assert_in_range( + const LargestIntegralType value, const LargestIntegralType minimum, + const LargestIntegralType maximum, const char* const file, + const int line) { + if (!integer_in_range_display_error(value, minimum, maximum)) { + _fail(file, line); + } +} + +void _assert_not_in_range( + const LargestIntegralType value, const LargestIntegralType minimum, + const LargestIntegralType maximum, const char* const file, + const int line) { + if (!integer_not_in_range_display_error(value, minimum, maximum)) { + _fail(file, line); + } +} + +void _assert_in_set(const LargestIntegralType value, + const LargestIntegralType values[], + const size_t number_of_values, const char* const file, + const int line) { + CheckIntegerSet check_integer_set; + check_integer_set.set = values; + check_integer_set.size_of_set = number_of_values; + if (!value_in_set_display_error(value, &check_integer_set, 0)) { + _fail(file, line); + } +} + +void _assert_not_in_set(const LargestIntegralType value, + const LargestIntegralType values[], + const size_t number_of_values, const char* const file, + const int line) { + CheckIntegerSet check_integer_set; + check_integer_set.set = values; + check_integer_set.size_of_set = number_of_values; + if (!value_in_set_display_error(value, &check_integer_set, 1)) { + _fail(file, line); + } +} + + +// Get the list of allocated blocks. +static ListNode* get_allocated_blocks_list() { + // If it initialized, initialize the list of allocated blocks. + if (!global_allocated_blocks.value) { + list_initialize(&global_allocated_blocks); + global_allocated_blocks.value = (void*)1; + } + return &global_allocated_blocks; +} + +// Use the real malloc in this function. +#undef malloc +void* _test_malloc(const size_t size, const char* file, const int line) { + char* ptr; + MallocBlockInfo *block_info; + ListNode * const block_list = get_allocated_blocks_list(); + const size_t allocate_size = size + (MALLOC_GUARD_SIZE * 2) + + sizeof(*block_info) + MALLOC_ALIGNMENT; + char* const block = (char*)malloc(allocate_size); + assert_true(block); + + // Calculate the returned address. + ptr = (char*)(((size_t)block + MALLOC_GUARD_SIZE + sizeof(*block_info) + + MALLOC_ALIGNMENT) & ~(MALLOC_ALIGNMENT - 1)); + + // Initialize the guard blocks. + memset(ptr - MALLOC_GUARD_SIZE, MALLOC_GUARD_PATTERN, MALLOC_GUARD_SIZE); + memset(ptr + size, MALLOC_GUARD_PATTERN, MALLOC_GUARD_SIZE); + memset(ptr, MALLOC_ALLOC_PATTERN, size); + + block_info = (MallocBlockInfo*)(ptr - (MALLOC_GUARD_SIZE + + sizeof(*block_info))); + set_source_location(&block_info->location, file, line); + block_info->allocated_size = allocate_size; + block_info->size = size; + block_info->block = block; + block_info->node.value = block_info; + list_add(block_list, &block_info->node); + return ptr; +} +#define malloc test_malloc + + +void* _test_calloc(const size_t number_of_elements, const size_t size, + const char* file, const int line) { + void* const ptr = _test_malloc(number_of_elements * size, file, line); + if (ptr) { + memset(ptr, 0, number_of_elements * size); + } + return ptr; +} + + +// Use the real free in this function. +#undef free +void _test_free(void* const ptr, const char* file, const int line) { + unsigned int i; + char *block = (char*)ptr; + MallocBlockInfo *block_info; + _assert_true((LargestIntegralType)ptr, "ptr", file, line); + block_info = (MallocBlockInfo*)(block - (MALLOC_GUARD_SIZE + + sizeof(*block_info))); + // Check the guard blocks. + { + char *guards[2] = {block - MALLOC_GUARD_SIZE, + block + block_info->size}; + for (i = 0; i < ARRAY_LENGTH(guards); i++) { + unsigned int j; + char * const guard = guards[i]; + for (j = 0; j < MALLOC_GUARD_SIZE; j++) { + const char diff = guard[j] - MALLOC_GUARD_PATTERN; + if (diff) { + print_error( + "Guard block of 0x%08x size=%d allocated by " + SOURCE_LOCATION_FORMAT " at 0x%08x is corrupt\n", + (size_t)ptr, block_info->size, + block_info->location.file, block_info->location.line, + (size_t)&guard[j]); + _fail(file, line); + } + } + } + } + list_remove(&block_info->node, NULL, NULL); + + block = block_info->block; + memset(block, MALLOC_FREE_PATTERN, block_info->allocated_size); + free(block); +} +#define free test_free + + +// Crudely checkpoint the current heap state. +static const ListNode* check_point_allocated_blocks() { + return get_allocated_blocks_list()->prev; +} + + +/* Display the blocks allocated after the specified check point. This + * function returns the number of blocks displayed. */ +static int display_allocated_blocks(const ListNode * const check_point) { + const ListNode * const head = get_allocated_blocks_list(); + const ListNode *node; + int allocated_blocks = 0; + assert_true(check_point); + assert_true(check_point->next); + + for (node = check_point->next; node != head; node = node->next) { + const MallocBlockInfo * const block_info = node->value; + assert_true(block_info); + + if (!allocated_blocks) { + print_error("Blocks allocated...\n"); + } + print_error(" 0x%08x : " SOURCE_LOCATION_FORMAT "\n", + block_info->block, block_info->location.file, + block_info->location.line); + allocated_blocks ++; + } + return allocated_blocks; +} + + +// Free all blocks allocated after the specified check point. +static void free_allocated_blocks(const ListNode * const check_point) { + const ListNode * const head = get_allocated_blocks_list(); + const ListNode *node; + assert_true(check_point); + + node = check_point->next; + assert_true(node); + + while (node != head) { + MallocBlockInfo * const block_info = (MallocBlockInfo*)node->value; + node = node->next; + free((char*)block_info + sizeof(*block_info) + MALLOC_GUARD_SIZE); + } +} + + +// Fail if any any blocks are allocated after the specified check point. +static void fail_if_blocks_allocated(const ListNode * const check_point, + const char * const test_name) { + const int allocated_blocks = display_allocated_blocks(check_point); + if (allocated_blocks) { + free_allocated_blocks(check_point); + print_error("ERROR: %s leaked %d block(s)\n", test_name, + allocated_blocks); + exit_test(1); + } +} + + +void _fail(const char * const file, const int line) { + print_error("ERROR: " SOURCE_LOCATION_FORMAT " Failure!\n", file, line); + exit_test(1); +} + + +#ifndef _WIN32 +static void exception_handler(int sig) { + print_error("%s\n", strsignal(sig)); + exit_test(1); +} + +#else // _WIN32 + +static LONG WINAPI exception_filter(EXCEPTION_POINTERS *exception_pointers) { + EXCEPTION_RECORD * const exception_record = + exception_pointers->ExceptionRecord; + const DWORD code = exception_record->ExceptionCode; + unsigned int i; + for (i = 0; i < ARRAY_LENGTH(exception_codes); i++) { + const ExceptionCodeInfo * const code_info = &exception_codes[i]; + if (code == code_info->code) { + static int shown_debug_message = 0; + fflush(stdout); + print_error("%s occurred at 0x%08x.\n", code_info->description, + exception_record->ExceptionAddress); + if (!shown_debug_message) { + print_error( + "\n" + "To debug in Visual Studio...\n" + "1. Select menu item File->Open Project\n" + "2. Change 'Files of type' to 'Executable Files'\n" + "3. Open this executable.\n" + "4. Select menu item Debug->Start\n" + "\n" + "Alternatively, set the environment variable \n" + "UNIT_TESTING_DEBUG to 1 and rebuild this executable, \n" + "then click 'Debug' in the popup dialog box.\n" + "\n"); + shown_debug_message = 1; + } + exit_test(0); + return EXCEPTION_EXECUTE_HANDLER; + } + } + return EXCEPTION_CONTINUE_SEARCH; +} +#endif // !_WIN32 + + +// Standard output and error print methods. +void vprint_message(const char* const format, va_list args) { + char buffer[1024]; + vsnprintf(buffer, sizeof(buffer), format, args); + puts(buffer); +#ifdef _WIN32 + OutputDebugString(buffer); +#endif // _WIN32 +} + + +void vprint_error(const char* const format, va_list args) { + char buffer[1024]; + vsnprintf(buffer, sizeof(buffer), format, args); + fputs(buffer, stderr); +#ifdef _WIN32 + OutputDebugString(buffer); +#endif // _WIN32 +} + + +void print_message(const char* const format, ...) { + va_list args; + va_start(args, format); + vprint_message(format, args); + va_end(args); +} + + +void print_error(const char* const format, ...) { + va_list args; + va_start(args, format); + vprint_error(format, args); + va_end(args); +} + + +int _run_test( + const char * const function_name, const UnitTestFunction Function, + void ** const state, const UnitTestFunctionType function_type, + const void* const heap_check_point) { + const ListNode * const check_point = heap_check_point ? + heap_check_point : check_point_allocated_blocks(); + void *current_state = NULL; + int rc = 1; + int handle_exceptions = 1; +#ifdef _WIN32 + handle_exceptions = !IsDebuggerPresent(); +#endif // _WIN32 +#if UNIT_TESTING_DEBUG + handle_exceptions = 0; +#endif // UNIT_TESTING_DEBUG + + if (handle_exceptions) { +#ifndef _WIN32 + unsigned int i; + for (i = 0; i < ARRAY_LENGTH(exception_signals); i++) { + default_signal_functions[i] = signal( + exception_signals[i], exception_handler); + } +#else // _WIN32 + previous_exception_filter = SetUnhandledExceptionFilter( + exception_filter); +#endif // !_WIN32 + } + + if (function_type == UNIT_TEST_FUNCTION_TYPE_TEST) { + print_message("%s: Starting test\n", function_name); + } + initialize_testing(function_name); + global_running_test = 1; + if (setjmp(global_run_test_env) == 0) { + Function(state ? state : ¤t_state); + fail_if_leftover_values(function_name); + + /* If this is a setup function then ignore any allocated blocks + * only ensure they're deallocated on tear down. */ + if (function_type != UNIT_TEST_FUNCTION_TYPE_SETUP) { + fail_if_blocks_allocated(check_point, function_name); + } + + global_running_test = 0; + + if (function_type == UNIT_TEST_FUNCTION_TYPE_TEST) { + print_message("%s: Test completed successfully.\n", function_name); + } + rc = 0; + } else { + global_running_test = 0; + print_message("%s: Test failed.\n", function_name); + } + teardown_testing(function_name); + + if (handle_exceptions) { +#ifndef _WIN32 + unsigned int i; + for (i = 0; i < ARRAY_LENGTH(exception_signals); i++) { + signal(exception_signals[i], default_signal_functions[i]); + } +#else // _WIN32 + if (previous_exception_filter) { + SetUnhandledExceptionFilter(previous_exception_filter); + previous_exception_filter = NULL; + } +#endif // !_WIN32 + } + + return rc; +} + + +int _run_tests(const UnitTest * const tests, const size_t number_of_tests) { + // Whether to execute the next test. + int run_next_test = 1; + // Whether the previous test failed. + int previous_test_failed = 0; + // Check point of the heap state. + const ListNode * const check_point = check_point_allocated_blocks(); + // Current test being executed. + size_t current_test = 0; + // Number of tests executed. + size_t tests_executed = 0; + // Number of failed tests. + size_t total_failed = 0; + // Number of setup functions. + size_t setups = 0; + // Number of teardown functions. + size_t teardowns = 0; + /* A stack of test states. A state is pushed on the stack + * when a test setup occurs and popped on tear down. */ + TestState* test_states = malloc(number_of_tests * sizeof(*test_states)); + size_t number_of_test_states = 0; + // Names of the tests that failed. + const char** failed_names = malloc(number_of_tests * + sizeof(*failed_names)); + void **current_state = NULL; + // Make sure LargestIntegralType is at least the size of a pointer. + assert_true(sizeof(LargestIntegralType) >= sizeof(void*)); + + while (current_test < number_of_tests) { + const ListNode *test_check_point = NULL; + TestState *current_TestState; + const UnitTest * const test = &tests[current_test++]; + if (!test->function) { + continue; + } + + switch (test->function_type) { + case UNIT_TEST_FUNCTION_TYPE_TEST: + run_next_test = 1; + break; + case UNIT_TEST_FUNCTION_TYPE_SETUP: { + // Checkpoint the heap before the setup. + current_TestState = &test_states[number_of_test_states++]; + current_TestState->check_point = check_point_allocated_blocks(); + test_check_point = current_TestState->check_point; + current_state = ¤t_TestState->state; + *current_state = NULL; + run_next_test = 1; + setups ++; + break; + } + case UNIT_TEST_FUNCTION_TYPE_TEARDOWN: + // Check the heap based on the last setup checkpoint. + assert_true(number_of_test_states); + current_TestState = &test_states[--number_of_test_states]; + test_check_point = current_TestState->check_point; + current_state = ¤t_TestState->state; + teardowns ++; + break; + default: + print_error("Invalid unit test function type %d\n", + test->function_type); + exit_test(1); + break; + } + + if (run_next_test) { + int failed = _run_test(test->name, test->function, current_state, + test->function_type, test_check_point); + if (failed) { + failed_names[total_failed] = test->name; + } + + switch (test->function_type) { + case UNIT_TEST_FUNCTION_TYPE_TEST: + previous_test_failed = failed; + total_failed += failed; + tests_executed ++; + break; + + case UNIT_TEST_FUNCTION_TYPE_SETUP: + if (failed) { + total_failed ++; + tests_executed ++; + // Skip forward until the next test or setup function. + run_next_test = 0; + } + previous_test_failed = 0; + break; + + case UNIT_TEST_FUNCTION_TYPE_TEARDOWN: + // If this test failed. + if (failed && !previous_test_failed) { + total_failed ++; + } + break; + default: + assert_false("BUG: shouldn't be here!"); + break; + } + } + } + + if (total_failed) { + size_t i; + print_error("%d out of %d tests failed!\n", total_failed, + tests_executed); + for (i = 0; i < total_failed; i++) { + print_error(" %s\n", failed_names[i]); + } + } else { + print_message("All %d tests passed\n", tests_executed); + } + + if (number_of_test_states) { + print_error("Mismatched number of setup %d and teardown %d " + "functions\n", setups, teardowns); + total_failed = -1; + } + + free(test_states); + free((void*)failed_names); + + fail_if_blocks_allocated(check_point, "run_tests"); + return (int)total_failed; +} diff --git a/tests/cmocka/cmockery.h b/tests/cmocka/cmockery.h new file mode 100755 index 00000000..4d5235cd --- /dev/null +++ b/tests/cmocka/cmockery.h @@ -0,0 +1,484 @@ +/* + * Copyright 2008 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef CMOCKERY_H_ +#define CMOCKERY_H_ +/* + * These headers or their equivalents should be included prior to including + * this header file. + * + * #include + * #include + * #include + * + * This allows test applications to use custom definitions of C standard + * library functions and types. + */ + +// For those who are used to __func__ from gcc. +#ifndef __func__ +#define __func__ __FUNCTION__ +#endif + +/* Largest integral type. This type should be large enough to hold any + * pointer or integer supported by the compiler. */ +#ifndef LargestIntegralType +#define LargestIntegralType unsigned long long +#endif // LargestIntegralType + +// Printf format used to display LargestIntegralType. +#ifndef LargestIntegralTypePrintfFormat +#ifdef _WIN32 +#define LargestIntegralTypePrintfFormat "%I64x" +#else +#define LargestIntegralTypePrintfFormat "%llx" +#endif // _WIN32 +#endif // LargestIntegralTypePrintfFormat + +// Perform an unsigned cast to LargestIntegralType. +#define cast_to_largest_integral_type(value) \ + ((LargestIntegralType)(value)) + +// Retrieves a return value for the current function. +#define mock() _mock(__func__, __FILE__, __LINE__) + +/* Stores a value to be returned by the specified function later. + * The count parameter returns the number of times the value should be returned + * by mock(). If count is set to -1 the value will always be returned. + */ +#define will_return(function, value) \ + _will_return(#function, __FILE__, __LINE__, \ + cast_to_largest_integral_type(value), 1) +#define will_return_count(function, value, count) \ + _will_return(#function, __FILE__, __LINE__, \ + cast_to_largest_integral_type(value), count) + +/* Add a custom parameter checking function. If the event parameter is NULL + * the event structure is allocated internally by this function. If event + * parameter is provided it must be allocated on the heap and doesn't need to + * be deallocated by the caller. + */ +#define expect_check(function, parameter, check_function, check_data) \ + _expect_check(#function, #parameter, __FILE__, __LINE__, check_function, \ + cast_to_largest_integral_type(check_data), NULL, 0) + +/* Add an event to check a parameter, using check_expected(), against a set of + * values. See will_return() for a description of the count parameter. + */ +#define expect_in_set(function, parameter, value_array) \ + expect_in_set_count(function, parameter, value_array, 1) +#define expect_in_set_count(function, parameter, value_array, count) \ + _expect_in_set(#function, #parameter, __FILE__, __LINE__, value_array, \ + sizeof(value_array) / sizeof((value_array)[0]), count) +#define expect_not_in_set(function, parameter, value_array) \ + expect_not_in_set_count(function, parameter, value_array, 1) +#define expect_not_in_set_count(function, parameter, value_array, count) \ + _expect_not_in_set( \ + #function, #parameter, __FILE__, __LINE__, value_array, \ + sizeof(value_array) / sizeof((value_array)[0]), count) + + +/* Add an event to check a parameter, using check_expected(), against a + * signed range. Where range is minimum <= value <= maximum. + * See will_return() for a description of the count parameter. + */ +#define expect_in_range(function, parameter, minimum, maximum) \ + expect_in_range_count(function, parameter, minimum, maximum, 1) +#define expect_in_range_count(function, parameter, minimum, maximum, count) \ + _expect_in_range(#function, #parameter, __FILE__, __LINE__, minimum, \ + maximum, count) + +/* Add an event to check a parameter, using check_expected(), against a + * signed range. Where range is value < minimum or value > maximum. + * See will_return() for a description of the count parameter. + */ +#define expect_not_in_range(function, parameter, minimum, maximum) \ + expect_not_in_range_count(function, parameter, minimum, maximum, 1) +#define expect_not_in_range_count(function, parameter, minimum, maximum, \ + count) \ + _expect_not_in_range(#function, #parameter, __FILE__, __LINE__, \ + minimum, maximum, count) + +/* Add an event to check whether a parameter, using check_expected(), is or + * isn't a value. See will_return() for a description of the count parameter. + */ +#define expect_value(function, parameter, value) \ + expect_value_count(function, parameter, value, 1) +#define expect_value_count(function, parameter, value, count) \ + _expect_value(#function, #parameter, __FILE__, __LINE__, \ + cast_to_largest_integral_type(value), count) +#define expect_not_value(function, parameter, value) \ + expect_not_value_count(function, parameter, value, 1) +#define expect_not_value_count(function, parameter, value, count) \ + _expect_not_value(#function, #parameter, __FILE__, __LINE__, \ + cast_to_largest_integral_type(value), count) + +/* Add an event to check whether a parameter, using check_expected(), + * is or isn't a string. See will_return() for a description of the count + * parameter. + */ +#define expect_string(function, parameter, string) \ + expect_string_count(function, parameter, string, 1) +#define expect_string_count(function, parameter, string, count) \ + _expect_string(#function, #parameter, __FILE__, __LINE__, \ + (const char*)(string), count) +#define expect_not_string(function, parameter, string) \ + expect_not_string_count(function, parameter, string, 1) +#define expect_not_string_count(function, parameter, string, count) \ + _expect_not_string(#function, #parameter, __FILE__, __LINE__, \ + (const char*)(string), count) + +/* Add an event to check whether a parameter, using check_expected() does or + * doesn't match an area of memory. See will_return() for a description of + * the count parameter. + */ +#define expect_memory(function, parameter, memory, size) \ + expect_memory_count(function, parameter, memory, size, 1) +#define expect_memory_count(function, parameter, memory, size, count) \ + _expect_memory(#function, #parameter, __FILE__, __LINE__, \ + (const void*)(memory), size, count) +#define expect_not_memory(function, parameter, memory, size) \ + expect_not_memory_count(function, parameter, memory, size, 1) +#define expect_not_memory_count(function, parameter, memory, size, count) \ + _expect_not_memory(#function, #parameter, __FILE__, __LINE__, \ + (const void*)(memory), size, count) + + +/* Add an event to allow any value for a parameter checked using + * check_expected(). See will_return() for a description of the count + * parameter. + */ +#define expect_any(function, parameter) \ + expect_any_count(function, parameter, 1) +#define expect_any_count(function, parameter, count) \ + _expect_any(#function, #parameter, __FILE__, __LINE__, count) + +/* Determine whether a function parameter is correct. This ensures the next + * value queued by one of the expect_*() macros matches the specified variable. + */ +#define check_expected(parameter) \ + _check_expected(__func__, #parameter, __FILE__, __LINE__, \ + cast_to_largest_integral_type(parameter)) + +// Assert that the given expression is true. +#define assert_true(c) _assert_true(cast_to_largest_integral_type(c), #c, \ + __FILE__, __LINE__) +// Assert that the given expression is false. +#define assert_false(c) _assert_true(!(cast_to_largest_integral_type(c)), #c, \ + __FILE__, __LINE__) + +// Assert that the two given integers are equal, otherwise fail. +#define assert_int_equal(a, b) \ + _assert_int_equal(cast_to_largest_integral_type(a), \ + cast_to_largest_integral_type(b), \ + __FILE__, __LINE__) +// Assert that the two given integers are not equal, otherwise fail. +#define assert_int_not_equal(a, b) \ + _assert_int_not_equal(cast_to_largest_integral_type(a), \ + cast_to_largest_integral_type(b), \ + __FILE__, __LINE__) + +// Assert that the two given strings are equal, otherwise fail. +#define assert_string_equal(a, b) \ + _assert_string_equal((const char*)(a), (const char*)(b), __FILE__, \ + __LINE__) +// Assert that the two given strings are not equal, otherwise fail. +#define assert_string_not_equal(a, b) \ + _assert_string_not_equal((const char*)(a), (const char*)(b), __FILE__, \ + __LINE__) + +// Assert that the two given areas of memory are equal, otherwise fail. +#define assert_memory_equal(a, b, size) \ + _assert_memory_equal((const char*)(a), (const char*)(b), size, __FILE__, \ + __LINE__) +// Assert that the two given areas of memory are not equal, otherwise fail. +#define assert_memory_not_equal(a, b, size) \ + _assert_memory_not_equal((const char*)(a), (const char*)(b), size, \ + __FILE__, __LINE__) + +// Assert that the specified value is >= minimum and <= maximum. +#define assert_in_range(value, minimum, maximum) \ + _assert_in_range( \ + cast_to_largest_integral_type(value), \ + cast_to_largest_integral_type(minimum), \ + cast_to_largest_integral_type(maximum), __FILE__, __LINE__) + +// Assert that the specified value is < minumum or > maximum +#define assert_not_in_range(value, minimum, maximum) \ + _assert_not_in_range( \ + cast_to_largest_integral_type(value), \ + cast_to_largest_integral_type(minimum), \ + cast_to_largest_integral_type(maximum), __FILE__, __LINE__) + +// Assert that the specified value is within a set. +#define assert_in_set(value, values, number_of_values) \ + _assert_in_set(value, values, number_of_values, __FILE__, __LINE__) +// Assert that the specified value is not within a set. +#define assert_not_in_set(value, values, number_of_values) \ + _assert_not_in_set(value, values, number_of_values, __FILE__, __LINE__) + + +// Forces the test to fail immediately and quit. +#define fail() _fail(__FILE__, __LINE__) + +// Generic method to kick off testing +#define run_test(f) _run_test(#f, f, NULL, UNIT_TEST_FUNCTION_TYPE_TEST, NULL) + +// Initializes a UnitTest structure. +#define unit_test(f) { #f, f, UNIT_TEST_FUNCTION_TYPE_TEST } +#define unit_test_setup(test, setup) \ + { #test "_" #setup, setup, UNIT_TEST_FUNCTION_TYPE_SETUP } +#define unit_test_teardown(test, teardown) \ + { #test "_" #teardown, teardown, UNIT_TEST_FUNCTION_TYPE_TEARDOWN } + +/* Initialize an array of UnitTest structures with a setup function for a test + * and a teardown function. Either setup or teardown can be NULL. + */ +#define unit_test_setup_teardown(test, setup, teardown) \ + unit_test_setup(test, setup), \ + unit_test(test), \ + unit_test_teardown(test, teardown) + +/* + * Run tests specified by an array of UnitTest structures. The following + * example illustrates this macro's use with the unit_test macro. + * + * void Test0(); + * void Test1(); + * + * int main(int argc, char* argv[]) { + * const UnitTest tests[] = { + * unit_test(Test0); + * unit_test(Test1); + * }; + * return run_tests(tests); + * } + */ +#define run_tests(tests) _run_tests(tests, sizeof(tests) / sizeof(tests)[0]) + +// Dynamic allocators +#define test_malloc(size) _test_malloc(size, __FILE__, __LINE__) +#define test_calloc(num, size) _test_calloc(num, size, __FILE__, __LINE__) +#define test_free(ptr) _test_free(ptr, __FILE__, __LINE__) + +// Redirect malloc, calloc and free to the unit test allocators. +#if UNIT_TESTING +#define malloc test_malloc +#define calloc test_calloc +#define free test_free +#endif // UNIT_TESTING + +/* + * Ensure mock_assert() is called. If mock_assert() is called the assert + * expression string is returned. + * For example: + * + * #define assert mock_assert + * + * void showmessage(const char *message) { + * assert(message); + * } + * + * int main(int argc, const char* argv[]) { + * expect_assert_failure(show_message(NULL)); + * printf("succeeded\n"); + * return 0; + * } + */ +#define expect_assert_failure(function_call) \ + { \ + const int expression = setjmp(global_expect_assert_env); \ + global_expecting_assert = 1; \ + if (expression) { \ + print_message("Expected assertion %s occurred\n", \ + *((const char**)&expression)); \ + global_expecting_assert = 0; \ + } else { \ + function_call ; \ + global_expecting_assert = 0; \ + print_error("Expected assert in %s\n", #function_call); \ + _fail(__FILE__, __LINE__); \ + } \ + } + +// Function prototype for setup, test and teardown functions. +typedef void (*UnitTestFunction)(void **state); + +// Function that determines whether a function parameter value is correct. +typedef int (*CheckParameterValue)(const LargestIntegralType value, + const LargestIntegralType check_value_data); + +// Type of the unit test function. +typedef enum UnitTestFunctionType { + UNIT_TEST_FUNCTION_TYPE_TEST = 0, + UNIT_TEST_FUNCTION_TYPE_SETUP, + UNIT_TEST_FUNCTION_TYPE_TEARDOWN, +} UnitTestFunctionType; + +/* Stores a unit test function with its name and type. + * NOTE: Every setup function must be paired with a teardown function. It's + * possible to specify NULL function pointers. + */ +typedef struct UnitTest { + const char* name; + UnitTestFunction function; + UnitTestFunctionType function_type; +} UnitTest; + + +// Location within some source code. +typedef struct SourceLocation { + const char* file; + int line; +} SourceLocation; + +// Event that's called to check a parameter value. +typedef struct CheckParameterEvent { + SourceLocation location; + const char *parameter_name; + CheckParameterValue check_value; + LargestIntegralType check_value_data; +} CheckParameterEvent; + +// Used by expect_assert_failure() and mock_assert(). +extern int global_expecting_assert; +extern jmp_buf global_expect_assert_env; + +// Retrieves a value for the given function, as set by "will_return". +LargestIntegralType _mock(const char * const function, const char* const file, + const int line); + +void _expect_check( + const char* const function, const char* const parameter, + const char* const file, const int line, + const CheckParameterValue check_function, + const LargestIntegralType check_data, CheckParameterEvent * const event, + const int count); + +void _expect_in_set( + const char* const function, const char* const parameter, + const char* const file, const int line, const LargestIntegralType values[], + const size_t number_of_values, const int count); +void _expect_not_in_set( + const char* const function, const char* const parameter, + const char* const file, const int line, const LargestIntegralType values[], + const size_t number_of_values, const int count); + +void _expect_in_range( + const char* const function, const char* const parameter, + const char* const file, const int line, + const LargestIntegralType minimum, + const LargestIntegralType maximum, const int count); +void _expect_not_in_range( + const char* const function, const char* const parameter, + const char* const file, const int line, + const LargestIntegralType minimum, + const LargestIntegralType maximum, const int count); + +void _expect_value( + const char* const function, const char* const parameter, + const char* const file, const int line, const LargestIntegralType value, + const int count); +void _expect_not_value( + const char* const function, const char* const parameter, + const char* const file, const int line, const LargestIntegralType value, + const int count); + +void _expect_string( + const char* const function, const char* const parameter, + const char* const file, const int line, const char* string, + const int count); +void _expect_not_string( + const char* const function, const char* const parameter, + const char* const file, const int line, const char* string, + const int count); + +void _expect_memory( + const char* const function, const char* const parameter, + const char* const file, const int line, const void* const memory, + const size_t size, const int count); +void _expect_not_memory( + const char* const function, const char* const parameter, + const char* const file, const int line, const void* const memory, + const size_t size, const int count); + +void _expect_any( + const char* const function, const char* const parameter, + const char* const file, const int line, const int count); + +void _check_expected( + const char * const function_name, const char * const parameter_name, + const char* file, const int line, const LargestIntegralType value); + +// Can be used to replace assert in tested code so that in conjuction with +// check_assert() it's possible to determine whether an assert condition has +// failed without stopping a test. +void mock_assert(const int result, const char* const expression, + const char * const file, const int line); + +void _will_return(const char * const function_name, const char * const file, + const int line, const LargestIntegralType value, + const int count); +void _assert_true(const LargestIntegralType result, + const char* const expression, + const char * const file, const int line); +void _assert_int_equal( + const LargestIntegralType a, const LargestIntegralType b, + const char * const file, const int line); +void _assert_int_not_equal( + const LargestIntegralType a, const LargestIntegralType b, + const char * const file, const int line); +void _assert_string_equal(const char * const a, const char * const b, + const char * const file, const int line); +void _assert_string_not_equal(const char * const a, const char * const b, + const char *file, const int line); +void _assert_memory_equal(const void * const a, const void * const b, + const size_t size, const char* const file, + const int line); +void _assert_memory_not_equal(const void * const a, const void * const b, + const size_t size, const char* const file, + const int line); +void _assert_in_range( + const LargestIntegralType value, const LargestIntegralType minimum, + const LargestIntegralType maximum, const char* const file, const int line); +void _assert_not_in_range( + const LargestIntegralType value, const LargestIntegralType minimum, + const LargestIntegralType maximum, const char* const file, const int line); +void _assert_in_set( + const LargestIntegralType value, const LargestIntegralType values[], + const size_t number_of_values, const char* const file, const int line); +void _assert_not_in_set( + const LargestIntegralType value, const LargestIntegralType values[], + const size_t number_of_values, const char* const file, const int line); + +void* _test_malloc(const size_t size, const char* file, const int line); +void* _test_calloc(const size_t number_of_elements, const size_t size, + const char* file, const int line); +void _test_free(void* const ptr, const char* file, const int line); + +void _fail(const char * const file, const int line); +int _run_test( + const char * const function_name, const UnitTestFunction Function, + void ** const state, const UnitTestFunctionType function_type, + const void* const heap_check_point); +int _run_tests(const UnitTest * const tests, const size_t number_of_tests); + +// Standard output and error print methods. +void print_message(const char* const format, ...); +void print_error(const char* const format, ...); +void vprint_message(const char* const format, va_list args); +void vprint_error(const char* const format, va_list args); + +#endif // CMOCKERY_H_ diff --git a/tests/cmocka/rangeset_tests.c b/tests/cmocka/rangeset_tests.c index 98d8d4d5..589554f9 100644 --- a/tests/cmocka/rangeset_tests.c +++ b/tests/cmocka/rangeset_tests.c @@ -1,9 +1,9 @@ #include #include #include -#include #include "rangeset.h" +#include "cmockery.h" /* for "print" functions */ #include "debug_print.c" @@ -30,18 +30,18 @@ int main(void) { /* Array of test functions */ - const struct CMUnitTest tests[] = + const struct UnitTest tests[] = { - cmocka_unit_test(test_irange_basic), - cmocka_unit_test(test_irange_list_union_merge), - cmocka_unit_test(test_irange_list_union_lossy_cov), - cmocka_unit_test(test_irange_list_union_complete_cov), - cmocka_unit_test(test_irange_list_union_intersecting), - cmocka_unit_test(test_irange_list_intersection), + unit_test(test_irange_basic), + unit_test(test_irange_list_union_merge), + unit_test(test_irange_list_union_lossy_cov), + unit_test(test_irange_list_union_complete_cov), + unit_test(test_irange_list_union_intersecting), + unit_test(test_irange_list_intersection), }; /* Run series of tests */ - return cmocka_run_group_tests(tests, NULL, NULL); + return run_tests(tests); } /* diff --git a/travis/dep-ubuntu-llvm.sh b/travis/dep-ubuntu-llvm.sh deleted file mode 100755 index e640d5b5..00000000 --- a/travis/dep-ubuntu-llvm.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/sh - -cat ./travis/llvm-snapshot.gpg.key | sudo apt-key add - -echo "deb http://apt.llvm.org/trusty/ llvm-toolchain-$(lsb_release -cs)-$LLVM_VER main" | sudo tee /etc/apt/sources.list.d/llvm.list diff --git a/travis/dep-ubuntu-postgres.sh b/travis/dep-ubuntu-postgres.sh deleted file mode 100755 index 41c7d346..00000000 --- a/travis/dep-ubuntu-postgres.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/sh - -cat ./travis/postgresql.gpg.key | sudo apt-key add - -echo "deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main $PG_VER" | sudo tee /etc/apt/sources.list.d/pgdg.list diff --git a/travis/llvm-snapshot.gpg.key b/travis/llvm-snapshot.gpg.key deleted file mode 100644 index aa6b105a..00000000 --- a/travis/llvm-snapshot.gpg.key +++ /dev/null @@ -1,52 +0,0 @@ ------BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v1.4.12 (GNU/Linux) - -mQINBFE9lCwBEADi0WUAApM/mgHJRU8lVkkw0CHsZNpqaQDNaHefD6Rw3S4LxNmM -EZaOTkhP200XZM8lVdbfUW9xSjA3oPldc1HG26NjbqqCmWpdo2fb+r7VmU2dq3NM -R18ZlKixiLDE6OUfaXWKamZsXb6ITTYmgTO6orQWYrnW6ckYHSeaAkW0wkDAryl2 -B5v8aoFnQ1rFiVEMo4NGzw4UX+MelF7rxaaregmKVTPiqCOSPJ1McC1dHFN533FY -Wh/RVLKWo6npu+owtwYFQW+zyQhKzSIMvNujFRzhIxzxR9Gn87MoLAyfgKEzrbbT -DhqqNXTxS4UMUKCQaO93TzetX/EBrRpJj+vP640yio80h4Dr5pAd7+LnKwgpTDk1 -G88bBXJAcPZnTSKu9I2c6KY4iRNbvRz4i+ZdwwZtdW4nSdl2792L7Sl7Nc44uLL/ -ZqkKDXEBF6lsX5XpABwyK89S/SbHOytXv9o4puv+65Ac5/UShspQTMSKGZgvDauU -cs8kE1U9dPOqVNCYq9Nfwinkf6RxV1k1+gwtclxQuY7UpKXP0hNAXjAiA5KS5Crq -7aaJg9q2F4bub0mNU6n7UI6vXguF2n4SEtzPRk6RP+4TiT3bZUsmr+1ktogyOJCc -Ha8G5VdL+NBIYQthOcieYCBnTeIH7D3Sp6FYQTYtVbKFzmMK+36ERreL/wARAQAB -tD1TeWx2ZXN0cmUgTGVkcnUgLSBEZWJpYW4gTExWTSBwYWNrYWdlcyA8c3lsdmVz -dHJlQGRlYmlhbi5vcmc+iQI4BBMBAgAiBQJRPZQsAhsDBgsJCAcDAgYVCAIJCgsE -FgIDAQIeAQIXgAAKCRAVz00Yr090Ibx+EADArS/hvkDF8juWMXxh17CgR0WZlHCC -9CTBWkg5a0bNN/3bb97cPQt/vIKWjQtkQpav6/5JTVCSx2riL4FHYhH0iuo4iAPR -udC7Cvg8g7bSPrKO6tenQZNvQm+tUmBHgFiMBJi92AjZ/Qn1Shg7p9ITivFxpLyX -wpmnF1OKyI2Kof2rm4BFwfSWuf8Fvh7kDMRLHv+MlnK/7j/BNpKdozXxLcwoFBmn -l0WjpAH3OFF7Pvm1LJdf1DjWKH0Dc3sc6zxtmBR/KHHg6kK4BGQNnFKujcP7TVdv -gMYv84kun14pnwjZcqOtN3UJtcx22880DOQzinoMs3Q4w4o05oIF+sSgHViFpc3W -R0v+RllnH05vKZo+LDzc83DQVrdwliV12eHxrMQ8UYg88zCbF/cHHnlzZWAJgftg -hB08v1BKPgYRUzwJ6VdVqXYcZWEaUJmQAPuAALyZESw94hSo28FAn0/gzEc5uOYx -K+xG/lFwgAGYNb3uGM5m0P6LVTfdg6vDwwOeTNIExVk3KVFXeSQef2ZMkhwA7wya -KJptkb62wBHFE+o9TUdtMCY6qONxMMdwioRE5BYNwAsS1PnRD2+jtlI0DzvKHt7B -MWd8hnoUKhMeZ9TNmo+8CpsAtXZcBho0zPGz/R8NlJhAWpdAZ1CmcPo83EW86Yq7 -BxQUKnNHcwj2ebkCDQRRPZQsARAA4jxYmbTHwmMjqSizlMJYNuGOpIidEdx9zQ5g -zOr431/VfWq4S+VhMDhs15j9lyml0y4ok215VRFwrAREDg6UPMr7ajLmBQGau0Fc -bvZJ90l4NjXp5p0NEE/qOb9UEHT7EGkEhaZ1ekkWFTWCgsy7rRXfZLxB6sk7pzLC -DshyW3zjIakWAnpQ5j5obiDy708pReAuGB94NSyb1HoW/xGsGgvvCw4r0w3xPStw -F1PhmScE6NTBIfLliea3pl8vhKPlCh54Hk7I8QGjo1ETlRP4Qll1ZxHJ8u25f/ta -RES2Aw8Hi7j0EVcZ6MT9JWTI83yUcnUlZPZS2HyeWcUj+8nUC8W4N8An+aNps9l/ -21inIl2TbGo3Yn1JQLnA1YCoGwC34g8QZTJhElEQBN0X29ayWW6OdFx8MDvllbBV -ymmKq2lK1U55mQTfDli7S3vfGz9Gp/oQwZ8bQpOeUkc5hbZszYwP4RX+68xDPfn+ -M9udl+qW9wu+LyePbW6HX90LmkhNkkY2ZzUPRPDHZANU5btaPXc2H7edX4y4maQa -xenqD0lGh9LGz/mps4HEZtCI5CY8o0uCMF3lT0XfXhuLksr7Pxv57yue8LLTItOJ -d9Hmzp9G97SRYYeqU+8lyNXtU2PdrLLq7QHkzrsloG78lCpQcalHGACJzrlUWVP/ -fN3Ht3kAEQEAAYkCHwQYAQIACQUCUT2ULAIbDAAKCRAVz00Yr090IbhWEADbr50X -OEXMIMGRLe+YMjeMX9NG4jxs0jZaWHc/WrGR+CCSUb9r6aPXeLo+45949uEfdSsB -pbaEdNWxF5Vr1CSjuO5siIlgDjmT655voXo67xVpEN4HhMrxugDJfCa6z97P0+ML -PdDxim57uNqkam9XIq9hKQaurxMAECDPmlEXI4QT3eu5qw5/knMzDMZj4Vi6hovL -wvvAeLHO/jsyfIdNmhBGU2RWCEZ9uo/MeerPHtRPfg74g+9PPfP6nyHD2Wes6yGd -oVQwtPNAQD6Cj7EaA2xdZYLJ7/jW6yiPu98FFWP74FN2dlyEA2uVziLsfBrgpS4l -tVOlrO2YzkkqUGrybzbLpj6eeHx+Cd7wcjI8CalsqtL6cG8cUEjtWQUHyTbQWAgG -5VPEgIAVhJ6RTZ26i/G+4J8neKyRs4vz+57UGwY6zI4AB1ZcWGEE3Bf+CDEDgmnP -LSwbnHefK9IljT9XU98PelSryUO/5UPw7leE0akXKB4DtekToO226px1VnGp3Bov -1GBGvpHvL2WizEwdk+nfk8LtrLzej+9FtIcq3uIrYnsac47Pf7p0otcFeTJTjSq3 -krCaoG4Hx0zGQG2ZFpHrSrZTVy6lxvIdfi0beMgY6h78p6M9eYZHQHc02DjFkQXN -bXb5c6gCHESH5PXwPU4jQEE7Ib9J6sbk7ZT2Mw== -=j+4q ------END PGP PUBLIC KEY BLOCK----- diff --git a/travis/pg-travis-test.sh b/travis/pg-travis-test.sh deleted file mode 100755 index 890897a4..00000000 --- a/travis/pg-travis-test.sh +++ /dev/null @@ -1,139 +0,0 @@ -#!/bin/bash - -set -eux - -sudo apt-get update - - -# required packages -apt_packages="postgresql-$PG_VER postgresql-server-dev-$PG_VER postgresql-common python-pip python-dev build-essential" -pip_packages="testgres" - -# exit code -status=0 - -# pg_config path -pg_ctl_path=/usr/lib/postgresql/$PG_VER/bin/pg_ctl -initdb_path=/usr/lib/postgresql/$PG_VER/bin/initdb -config_path=/usr/lib/postgresql/$PG_VER/bin/pg_config - - -# bug: http://www.postgresql.org/message-id/20130508192711.GA9243@msgid.df7cb.de -sudo update-alternatives --remove-all postmaster.1.gz - -# stop all existing instances (because of https://github.com/travis-ci/travis-cookbooks/pull/221) -sudo service postgresql stop -# ... and make sure they don't come back -echo 'exit 0' | sudo tee /etc/init.d/postgresql -sudo chmod a+x /etc/init.d/postgresql - -# install required packages -sudo apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -y install -qq $apt_packages - - -# perform code analysis if necessary -if [ $CHECK_CODE = "true" ]; then - - if [ "$CC" = "clang" ]; then - sudo apt-get -y install -qq clang-$LLVM_VER - - scan-build-$LLVM_VER --status-bugs make USE_PGXS=1 PG_CONFIG=$config_path || status=$? - exit $status - - elif [ "$CC" = "gcc" ]; then - sudo apt-get -y install -qq cppcheck - - cppcheck --template "{file} ({line}): {severity} ({id}): {message}" \ - --enable=warning,portability,performance \ - --suppress=redundantAssignment \ - --suppress=uselessAssignmentPtrArg \ - --suppress=incorrectStringBooleanError \ - --std=c89 src/*.c src/*.h 2> cppcheck.log - - if [ -s cppcheck.log ]; then - cat cppcheck.log - status=1 # error - fi - - exit $status - fi - - # don't forget to "make clean" - make clean USE_PGXS=1 PG_CONFIG=$config_path -fi - - -# create cluster 'test' -CLUSTER_PATH=$(pwd)/test_cluster -$initdb_path -D $CLUSTER_PATH -U $USER -A trust - -# build pg_pathman (using CFLAGS_SL for gcov) -make USE_PGXS=1 CC=${CC} PG_CONFIG=$config_path CFLAGS_SL="$($config_path --cflags_sl) -coverage" -sudo make install USE_PGXS=1 PG_CONFIG=$config_path - -# check build -status=$? -if [ $status -ne 0 ]; then exit $status; fi - -# set permission to write postgres locks -sudo chown $USER /var/run/postgresql/ - -# add pg_pathman to shared_preload_libraries and restart cluster 'test' -echo "shared_preload_libraries = 'pg_pathman'" >> $CLUSTER_PATH/postgresql.conf -echo "port = 55435" >> $CLUSTER_PATH/postgresql.conf -$pg_ctl_path -D $CLUSTER_PATH start -l postgres.log -w - -# run regression tests -PGPORT=55435 PGUSER=$USER PG_CONFIG=$config_path make installcheck USE_PGXS=1 || status=$? - -# show diff if it exists -if test -f regression.diffs; then cat regression.diffs; fi - - -set +u - -# create virtual environment and activate it -virtualenv /tmp/envs/pg_pathman -source /tmp/envs/pg_pathman/bin/activate - -# install pip packages -pip3 install $pip_packages - -# run python tests -make USE_PGXS=1 PG_CONFIG=$config_path python_tests || status=$? - -# deactivate virtual environment -deactivate - -set -u - - -# install cmake for cmocka -sudo apt-get -y install -qq cmake - -# build & install cmocka -CMOCKA_VER=1.1.1 -cd tests/cmocka -tar xf cmocka-$CMOCKA_VER.tar.xz -cd cmocka-$CMOCKA_VER -mkdir build && cd build -cmake .. -make && sudo make install -cd ../../../.. - -# export path to libcmocka.so -LD_LIBRARY_PATH=/usr/local/lib -export LD_LIBRARY_PATH - -# run cmocka tests (using CFLAGS_SL for gcov) -make USE_PGXS=1 PG_CONFIG=$config_path PG_CPPFLAGS="-coverage" cmocka_tests || status=$? - -# remove useless gcov files -rm -f tests/cmocka/*.gcno -rm -f tests/cmocka/*.gcda - -#generate *.gcov files -gcov src/*.c src/compat/*.c src/include/*.h src/include/compat/*.h - - -exit $status diff --git a/travis/postgresql.gpg.key b/travis/postgresql.gpg.key deleted file mode 100644 index 8480576e..00000000 --- a/travis/postgresql.gpg.key +++ /dev/null @@ -1,77 +0,0 @@ ------BEGIN PGP PUBLIC KEY BLOCK----- - -mQINBE6XR8IBEACVdDKT2HEH1IyHzXkb4nIWAY7echjRxo7MTcj4vbXAyBKOfjja -UrBEJWHN6fjKJXOYWXHLIYg0hOGeW9qcSiaa1/rYIbOzjfGfhE4x0Y+NJHS1db0V -G6GUj3qXaeyqIJGS2z7m0Thy4Lgr/LpZlZ78Nf1fliSzBlMo1sV7PpP/7zUO+aA4 -bKa8Rio3weMXQOZgclzgeSdqtwKnyKTQdXY5MkH1QXyFIk1nTfWwyqpJjHlgtwMi -c2cxjqG5nnV9rIYlTTjYG6RBglq0SmzF/raBnF4Lwjxq4qRqvRllBXdFu5+2pMfC -IZ10HPRdqDCTN60DUix+BTzBUT30NzaLhZbOMT5RvQtvTVgWpeIn20i2NrPWNCUh -hj490dKDLpK/v+A5/i8zPvN4c6MkDHi1FZfaoz3863dylUBR3Ip26oM0hHXf4/2U -A/oA4pCl2W0hc4aNtozjKHkVjRx5Q8/hVYu+39csFWxo6YSB/KgIEw+0W8DiTII3 -RQj/OlD68ZDmGLyQPiJvaEtY9fDrcSpI0Esm0i4sjkNbuuh0Cvwwwqo5EF1zfkVj -Tqz2REYQGMJGc5LUbIpk5sMHo1HWV038TWxlDRwtOdzw08zQA6BeWe9FOokRPeR2 -AqhyaJJwOZJodKZ76S+LDwFkTLzEKnYPCzkoRwLrEdNt1M7wQBThnC5z6wARAQAB -tBxQb3N0Z3JlU1FMIERlYmlhbiBSZXBvc2l0b3J5iQJOBBMBCAA4AhsDBQsJCAcD -BRUKCQgLBRYCAwEAAh4BAheAFiEEuXsK/KoaR/BE8kSgf8x9RqzMTPgFAlhtCD8A -CgkQf8x9RqzMTPgECxAAk8uL+dwveTv6eH21tIHcltt8U3Ofajdo+D/ayO53LiYO -xi27kdHD0zvFMUWXLGxQtWyeqqDRvDagfWglHucIcaLxoxNwL8+e+9hVFIEskQAY -kVToBCKMXTQDLarz8/J030Pmcv3ihbwB+jhnykMuyyNmht4kq0CNgnlcMCdVz0d3 -z/09puryIHJrD+A8y3TD4RM74snQuwc9u5bsckvRtRJKbP3GX5JaFZAqUyZNRJRJ -Tn2OQRBhCpxhlZ2afkAPFIq2aVnEt/Ie6tmeRCzsW3lOxEH2K7MQSfSu/kRz7ELf -Cz3NJHj7rMzC+76Rhsas60t9CjmvMuGONEpctijDWONLCuch3Pdj6XpC+MVxpgBy -2VUdkunb48YhXNW0jgFGM/BFRj+dMQOUbY8PjJjsmVV0joDruWATQG/M4C7O8iU0 -B7o6yVv4m8LDEN9CiR6r7H17m4xZseT3f+0QpMe7iQjz6XxTUFRQxXqzmNnloA1T -7VjwPqIIzkj/u0V8nICG/ktLzp1OsCFatWXh7LbU+hwYl6gsFH/mFDqVxJ3+DKQi -vyf1NatzEwl62foVjGUSpvh3ymtmtUQ4JUkNDsXiRBWczaiGSuzD9Qi0ONdkAX3b -ewqmN4TfE+XIpCPxxHXwGq9Rv1IFjOdCX0iG436GHyTLC1tTUIKF5xV4Y0+cXIOI -RgQQEQgABgUCTpdI7gAKCRDFr3dKWFELWqaPAKD1TtT5c3sZz92Fj97KYmqbNQZP -+ACfSC6+hfvlj4GxmUjp1aepoVTo3weJAhwEEAEIAAYFAk6XSQsACgkQTFprqxLS -p64F8Q//cCcutwrH50UoRFejg0EIZav6LUKejC6kpLeubbEtuaIH3r2zMblPGc4i -+eMQKo/PqyQrceRXeNNlqO6/exHozYi2meudxa6IudhwJIOn1MQykJbNMSC2sGUp -1W5M1N5EYgt4hy+qhlfnD66LR4G+9t5FscTJSy84SdiOuqgCOpQmPkVRm1HX5X1+ -dmnzMOCk5LHHQuiacV0qeGO7JcBCVEIDr+uhU1H2u5GPFNHm5u15n25tOxVivb94 -xg6NDjouECBH7cCVuW79YcExH/0X3/9G45rjdHlKPH1OIUJiiX47OTxdG3dAbB4Q -fnViRJhjehFscFvYWSqXo3pgWqUsEvv9qJac2ZEMSz9x2mj0ekWxuM6/hGWxJdB+ -+985rIelPmc7VRAXOjIxWknrXnPCZAMlPlDLu6+vZ5BhFX0Be3y38f7GNCxFkJzl -hWZ4Cj3WojMj+0DaC1eKTj3rJ7OJlt9S9xnO7OOPEUTGyzgNIDAyCiu8F4huLPaT -ape6RupxOMHZeoCVlqx3ouWctelB2oNXcxxiQ/8y+21aHfD4n/CiIFwDvIQjl7dg -mT3u5Lr6yxuosR3QJx1P6rP5ZrDTP9khT30t+HZCbvs5Pq+v/9m6XDmi+NlU7Zuh -Ehy97tL3uBDgoL4b/5BpFL5U9nruPlQzGq1P9jj40dxAaDAX/WKJAj0EEwEIACcC -GwMFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AFAlB5KywFCQPDFt8ACgkQf8x9RqzM -TPhuCQ//QAjRSAOCQ02qmUAikT+mTB6baOAakkYq6uHbEO7qPZkv4E/M+HPIJ4wd -nBNeSQjfvdNcZBA/x0hr5EMcBneKKPDj4hJ0panOIRQmNSTThQw9OU351gm3YQct -AMPRUu1fTJAL/AuZUQf9ESmhyVtWNlH/56HBfYjE4iVeaRkkNLJyX3vkWdJSMwC/ -LO3Lw/0M3R8itDsm74F8w4xOdSQ52nSRFRh7PunFtREl+QzQ3EA/WB4AIj3VohIG -kWDfPFCzV3cyZQiEnjAe9gG5pHsXHUWQsDFZ12t784JgkGyO5wT26pzTiuApWM3k -/9V+o3HJSgH5hn7wuTi3TelEFwP1fNzI5iUUtZdtxbFOfWMnZAypEhaLmXNkg4zD -kH44r0ss9fR0DAgUav1a25UnbOn4PgIEQy2fgHKHwRpCy20d6oCSlmgyWsR40EPP -YvtGq49A2aK6ibXmdvvFT+Ts8Z+q2SkFpoYFX20mR2nsF0fbt1lfH65P64dukxeR -GteWIeNakDD40bAAOH8+OaoTGVBJ2ACJfLVNM53PEoftavAwUYMrR910qvwYfd/4 -6rh46g1Frr9SFMKYE9uvIJIgDsQB3QBp71houU4H55M5GD8XURYs+bfiQpJG1p7e -B8e5jZx1SagNWc4XwL2FzQ9svrkbg1Y+359buUiP7T6QXX2zY++JAj0EEwEIACcC -GwMFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AFAlEqbZUFCQg2wEEACgkQf8x9RqzM -TPhFMQ//WxAfKMdpSIA9oIC/yPD/dJpY/+DyouOljpE6MucMy/ArBECjFTBwi/j9 -NYM4ynAk34IkhuNexc1i9/05f5RM6+riLCLgAOsADDbHD4miZzoSxiVr6GQ3YXMb -OGld9kV9Sy6mGNjcUov7iFcf5Hy5w3AjPfKuR9zXswyfzIU1YXObiiZT38l55pp/ -BSgvGVQsvbNjsff5CbEKXS7q3xW+WzN0QWF6YsfNVhFjRGj8hKtHvwKcA02wwjLe -LXVTm6915ZUKhZXUFc0vM4Pj4EgNswH8Ojw9AJaKWJIZmLyW+aP+wpu6YwVCicxB -Y59CzBO2pPJDfKFQzUtrErk9irXeuCCLesDyirxJhv8o0JAvmnMAKOLhNFUrSQ2m -+3EnF7zhfz70gHW+EG8X8mL/EN3/dUM09j6TVrjtw43RLxBzwMDeariFF9yC+5bL -tnGgxjsB9Ik6GV5v34/NEEGf1qBiAzFmDVFRZlrNDkq6gmpvGnA5hUWNr+y0i01L -jGyaLSWHYjgw2UEQOqcUtTFK9MNzbZze4mVaHMEz9/aMfX25R6qbiNqCChveIm8m -Yr5Ds2zdZx+G5bAKdzX7nx2IUAxFQJEE94VLSp3npAaTWv3sHr7dR8tSyUJ9poDw -gw4W9BIcnAM7zvFYbLF5FNggg/26njHCCN70sHt8zGxKQINMc6SJAj0EEwEIACcC -GwMFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AFAlLpFRkFCQ6EJy0ACgkQf8x9RqzM -TPjOZA//Zp0e25pcvle7cLc0YuFr9pBv2JIkLzPm83nkcwKmxaWayUIG4Sv6pH6h -m8+S/CHQij/yFCX+o3ngMw2J9HBUvafZ4bnbI0RGJ70GsAwraQ0VlkIfg7GUw3Tz -voGYO42rZTru9S0K/6nFP6D1HUu+U+AsJONLeb6oypQgInfXQExPZyliUnHdipei -4WR1YFW6sjSkZT/5C3J1wkAvPl5lvOVthI9Zs6bZlJLZwusKxU0UM4Btgu1Sf3nn -JcHmzisixwS9PMHE+AgPWIGSec/N27a0KmTTvImV6K6nEjXJey0K2+EYJuIBsYUN -orOGBwDFIhfRk9qGlpgt0KRyguV+AP5qvgry95IrYtrOuE7307SidEbSnvO5ezNe -mE7gT9Z1tM7IMPfmoKph4BfpNoH7aXiQh1Wo+ChdP92hZUtQrY2Nm13cmkxYjQ4Z -gMWfYMC+DA/GooSgZM5i6hYqyyfAuUD9kwRN6BqTbuAUAp+hCWYeN4D88sLYpFh3 -paDYNKJ+Gf7Yyi6gThcV956RUFDH3ys5Dk0vDL9NiWwdebWfRFbzoRM3dyGP889a -OyLzS3mh6nHzZrNGhW73kslSQek8tjKrB+56hXOnb4HaElTZGDvD5wmrrhN94kby -Gtz3cydIohvNO9d90+29h0eGEDYti7j7maHkBKUAwlcPvMg5m3Y= -=DA1T ------END PGP PUBLIC KEY BLOCK----- From 17cc5fb94b802266eda56e03f2915891eb7608fa Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 29 Jun 2017 13:01:47 +0300 Subject: [PATCH 053/528] Fix condition in Dockerfile.tmpl --- Dockerfile.tmpl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index b74538fc..2192600e 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -1,9 +1,9 @@ FROM postgres:${PG_VERSION}-alpine -ENV LANG=C.UTF-8 PGDATA=/pg/data +ENV LANG=C.UTF-8 PGDATA=/pg/data SELCC=${CC} RUN apk --no-cache add python3 gcc make musl-dev ${CC} -RUN if ${CHECK_CODE} -eq "true" && ${CC} -eq "gcc"; then \ +RUN if ${CHECK_CODE} -eq "true" && ${SELCC} -eq "gcc"; then \ echo 'http://dl-cdn.alpinelinux.org/alpine/v3.6/main' > /etc/apk/repositories && \ echo 'http://dl-cdn.alpinelinux.org/alpine/v3.6/community' >> /etc/apk/repositories && \ apk --no-cache add cppcheck; \ From 1c93a96f448ff69d9d9aca6a5b6aa7c55bc8f846 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 29 Jun 2017 13:06:58 +0300 Subject: [PATCH 054/528] Simplify Dockerfile.tmpl --- Dockerfile.tmpl | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index 2192600e..a5013263 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -1,13 +1,10 @@ FROM postgres:${PG_VERSION}-alpine ENV LANG=C.UTF-8 PGDATA=/pg/data SELCC=${CC} -RUN apk --no-cache add python3 gcc make musl-dev ${CC} -RUN if ${CHECK_CODE} -eq "true" && ${SELCC} -eq "gcc"; then \ - echo 'http://dl-cdn.alpinelinux.org/alpine/v3.6/main' > /etc/apk/repositories && \ +RUN echo 'http://dl-cdn.alpinelinux.org/alpine/v3.6/main' > /etc/apk/repositories && \ echo 'http://dl-cdn.alpinelinux.org/alpine/v3.6/community' >> /etc/apk/repositories && \ - apk --no-cache add cppcheck; \ - fi && \ + apk --no-cache add python3 gcc make musl-dev cppcheck ${CC} && \ pip3 install testgres && \ mkdir -p /pg/data && \ mkdir /pg/pg_pathman && \ From b2c151b42355f2743bc76545615c4ef112cff21f Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 29 Jun 2017 13:21:19 +0300 Subject: [PATCH 055/528] Fix docker image --- Dockerfile.tmpl | 2 +- run_tests.sh | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index a5013263..de22cf8f 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -1,6 +1,6 @@ FROM postgres:${PG_VERSION}-alpine -ENV LANG=C.UTF-8 PGDATA=/pg/data SELCC=${CC} +ENV LANG=C.UTF-8 PGDATA=/pg/data RUN echo 'http://dl-cdn.alpinelinux.org/alpine/v3.6/main' > /etc/apk/repositories && \ echo 'http://dl-cdn.alpinelinux.org/alpine/v3.6/community' >> /etc/apk/repositories && \ diff --git a/run_tests.sh b/run_tests.sh index b87c00e3..26de354d 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -2,7 +2,9 @@ set -eux -id +echo CC=$CC +echo CHECK_CODE=$CHECK_CODE +echo PG_VERSION=$PG_VERSION # perform code analysis if necessary if [ $CHECK_CODE = "true" ]; then From aac3bc2dbe97492efdc674a8cabf01af4d0ea616 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 29 Jun 2017 13:27:00 +0300 Subject: [PATCH 056/528] Change variable name --- .travis.yml | 20 ++++++++++---------- Dockerfile.tmpl | 4 ++-- run_tests.sh | 5 +++-- 3 files changed, 15 insertions(+), 14 deletions(-) diff --git a/.travis.yml b/.travis.yml index b498e674..e0212902 100644 --- a/.travis.yml +++ b/.travis.yml @@ -10,22 +10,22 @@ services: - docker install: - - sed -e 's/${CHECK_CODE}/'${CHECK_CODE}/g -e 's/${CC}/'${CC}/g -e 's/${PG_VERSION}/'${PG_VERSION}/g Dockerfile.tmpl > Dockerfile + - sed -e 's/${CHECK_CODE}/'${CHECK_CODE}/g -e 's/${COMPILER}/'${COMPILER}/g -e 's/${PG_VERSION}/'${PG_VERSION}/g Dockerfile.tmpl > Dockerfile - docker-compose build script: - docker-compose run tests env: - - PG_VERSION=10 CHECK_CODE=true CC=clang - - PG_VERSION=9.6 CHECK_CODE=true CC=clang - - PG_VERSION=9.5 CHECK_CODE=true CC=clang - - PG_VERSION=10 CHECK_CODE=true CC=gcc - - PG_VERSION=10 CHECK_CODE=false CC=gcc - - PG_VERSION=9.6 CHECK_CODE=true CC=gcc - - PG_VERSION=9.6 CHECK_CODE=false CC=gcc - - PG_VERSION=9.5 CHECK_CODE=true CC=gcc - - PG_VERSION=9.5 CHECK_CODE=false CC=gcc + - PG_VERSION=10 CHECK_CODE=true COMPILER=clang + - PG_VERSION=9.6 CHECK_CODE=true COMPILER=clang + - PG_VERSION=9.5 CHECK_CODE=true COMPILER=clang + - PG_VERSION=10 CHECK_CODE=true COMPILER=gcc + - PG_VERSION=10 CHECK_CODE=false COMPILER=gcc + - PG_VERSION=9.6 CHECK_CODE=true COMPILER=gcc + - PG_VERSION=9.6 CHECK_CODE=false COMPILER=gcc + - PG_VERSION=9.5 CHECK_CODE=true COMPILER=gcc + - PG_VERSION=9.5 CHECK_CODE=false COMPILER=gcc after_success: - bash <(curl -s https://codecov.io/bash) diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index de22cf8f..cf2b1aa6 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -4,7 +4,7 @@ ENV LANG=C.UTF-8 PGDATA=/pg/data RUN echo 'http://dl-cdn.alpinelinux.org/alpine/v3.6/main' > /etc/apk/repositories && \ echo 'http://dl-cdn.alpinelinux.org/alpine/v3.6/community' >> /etc/apk/repositories && \ - apk --no-cache add python3 gcc make musl-dev cppcheck ${CC} && \ + apk --no-cache add python3 gcc make musl-dev cppcheck ${COMPILER} && \ pip3 install testgres && \ mkdir -p /pg/data && \ mkdir /pg/pg_pathman && \ @@ -16,4 +16,4 @@ ADD . /pg/pg_pathman WORKDIR /pg/pg_pathman RUN chmod -R go+rwX /pg/pg_pathman USER postgres -ENTRYPOINT PGDATA=${PGDATA} CC=${CC} CHECK_CODE=${CHECK_CODE} bash run_tests.sh +ENTRYPOINT PGDATA=${PGDATA} COMPILER=${COMPILER} CHECK_CODE=${CHECK_CODE} bash run_tests.sh diff --git a/run_tests.sh b/run_tests.sh index 26de354d..f5216d7b 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -3,17 +3,18 @@ set -eux echo CC=$CC +echo COMPILER=$COMPILER echo CHECK_CODE=$CHECK_CODE echo PG_VERSION=$PG_VERSION # perform code analysis if necessary if [ $CHECK_CODE = "true" ]; then - if [ "$CC" = "clang" ]; then + if [ "$COMPILER" = "clang" ]; then scan-build --status-bugs make USE_PGXS=1 || status=$? exit $status - elif [ "$CC" = "gcc" ]; then + elif [ "$COMPILER" = "gcc" ]; then cppcheck --template "{file} ({line}): {severity} ({id}): {message}" \ --enable=warning,portability,performance \ --suppress=redundantAssignment \ From ad285a903e9c2c61c4db8feeb95c7283c8c71b16 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 29 Jun 2017 13:29:47 +0300 Subject: [PATCH 057/528] Fix variable error --- run_tests.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/run_tests.sh b/run_tests.sh index f5216d7b..abc2b128 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -2,7 +2,6 @@ set -eux -echo CC=$CC echo COMPILER=$COMPILER echo CHECK_CODE=$CHECK_CODE echo PG_VERSION=$PG_VERSION From 39929932f761585035bc700ce0040d0fa48f0c73 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 29 Jun 2017 14:07:55 +0300 Subject: [PATCH 058/528] Change travis configuration --- .travis.yml | 18 +++++++++--------- Dockerfile.tmpl | 5 +++-- run_tests.sh | 46 +++++++++++++++++++++------------------------- 3 files changed, 33 insertions(+), 36 deletions(-) diff --git a/.travis.yml b/.travis.yml index e0212902..7e99f5dc 100644 --- a/.travis.yml +++ b/.travis.yml @@ -17,15 +17,15 @@ script: - docker-compose run tests env: - - PG_VERSION=10 CHECK_CODE=true COMPILER=clang - - PG_VERSION=9.6 CHECK_CODE=true COMPILER=clang - - PG_VERSION=9.5 CHECK_CODE=true COMPILER=clang - - PG_VERSION=10 CHECK_CODE=true COMPILER=gcc - - PG_VERSION=10 CHECK_CODE=false COMPILER=gcc - - PG_VERSION=9.6 CHECK_CODE=true COMPILER=gcc - - PG_VERSION=9.6 CHECK_CODE=false COMPILER=gcc - - PG_VERSION=9.5 CHECK_CODE=true COMPILER=gcc - - PG_VERSION=9.5 CHECK_CODE=false COMPILER=gcc + - PG_VERSION=10 CHECK_CODE=clang + - PG_VERSION=9.6 CHECK_CODE=clang + - PG_VERSION=9.5 CHECK_CODE=clang + - PG_VERSION=10 CHECK_CODE=cppcheck + - PG_VERSION=10 CHECK_CODE=false + - PG_VERSION=9.6 CHECK_CODE=cppcheck + - PG_VERSION=9.6 CHECK_CODE=false + - PG_VERSION=9.5 CHECK_CODE=cppcheck + - PG_VERSION=9.5 CHECK_CODE=false after_success: - bash <(curl -s https://codecov.io/bash) diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index cf2b1aa6..beda726b 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -4,7 +4,8 @@ ENV LANG=C.UTF-8 PGDATA=/pg/data RUN echo 'http://dl-cdn.alpinelinux.org/alpine/v3.6/main' > /etc/apk/repositories && \ echo 'http://dl-cdn.alpinelinux.org/alpine/v3.6/community' >> /etc/apk/repositories && \ - apk --no-cache add python3 gcc make musl-dev cppcheck ${COMPILER} && \ + apk --no-cache add python3 gcc make musl-dev cppcheck && \ + apk --no-cache add clang-analyzer --repository http://dl-3.alpinelinux.org/alpine/edge/main/ && \ pip3 install testgres && \ mkdir -p /pg/data && \ mkdir /pg/pg_pathman && \ @@ -16,4 +17,4 @@ ADD . /pg/pg_pathman WORKDIR /pg/pg_pathman RUN chmod -R go+rwX /pg/pg_pathman USER postgres -ENTRYPOINT PGDATA=${PGDATA} COMPILER=${COMPILER} CHECK_CODE=${CHECK_CODE} bash run_tests.sh +ENTRYPOINT PGDATA=${PGDATA} CHECK_CODE=${CHECK_CODE} bash run_tests.sh diff --git a/run_tests.sh b/run_tests.sh index abc2b128..ebda8f79 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -2,37 +2,33 @@ set -eux -echo COMPILER=$COMPILER echo CHECK_CODE=$CHECK_CODE echo PG_VERSION=$PG_VERSION # perform code analysis if necessary -if [ $CHECK_CODE = "true" ]; then - - if [ "$COMPILER" = "clang" ]; then - scan-build --status-bugs make USE_PGXS=1 || status=$? - exit $status - - elif [ "$COMPILER" = "gcc" ]; then - cppcheck --template "{file} ({line}): {severity} ({id}): {message}" \ - --enable=warning,portability,performance \ - --suppress=redundantAssignment \ - --suppress=uselessAssignmentPtrArg \ - --suppress=incorrectStringBooleanError \ - --std=c89 src/*.c src/*.h 2> cppcheck.log - - if [ -s cppcheck.log ]; then - cat cppcheck.log - status=1 # error - fi - - exit $status - fi - - # don't forget to "make clean" - make USE_PGXS=1 clean +if [ "$CHECK_CODE" = "clang" ]; then + scan-build --status-bugs make USE_PGXS=1 || status=$? + exit $status + +elif [ "$CHECK_CODE" = "cppcheck" ]; then + cppcheck --template "{file} ({line}): {severity} ({id}): {message}" \ + --enable=warning,portability,performance \ + --suppress=redundantAssignment \ + --suppress=uselessAssignmentPtrArg \ + --suppress=incorrectStringBooleanError \ + --std=c89 src/*.c src/include/*.h 2> cppcheck.log + + if [ -s cppcheck.log ]; then + cat cppcheck.log + status=1 # error + fi + + exit $status fi +# don't forget to "make clean" +make USE_PGXS=1 clean + # initialize database initdb From 7cf3500b6f60c3f89ac90e80a466cd568a41be28 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 29 Jun 2017 14:24:24 +0300 Subject: [PATCH 059/528] Fix few errors in tests --- run_tests.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/run_tests.sh b/run_tests.sh index ebda8f79..dc1f4114 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -3,7 +3,8 @@ set -eux echo CHECK_CODE=$CHECK_CODE -echo PG_VERSION=$PG_VERSION + +status=0 # perform code analysis if necessary if [ "$CHECK_CODE" = "clang" ]; then @@ -15,6 +16,7 @@ elif [ "$CHECK_CODE" = "cppcheck" ]; then --enable=warning,portability,performance \ --suppress=redundantAssignment \ --suppress=uselessAssignmentPtrArg \ + --suppress=literalWithCharPtrCompare \ --suppress=incorrectStringBooleanError \ --std=c89 src/*.c src/include/*.h 2> cppcheck.log From 8de8b0e1c0621cbbd5a307725cb46dd6c583406f Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 29 Jun 2017 14:29:06 +0300 Subject: [PATCH 060/528] Fix .travis.yml --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 7e99f5dc..576d9efb 100644 --- a/.travis.yml +++ b/.travis.yml @@ -10,7 +10,7 @@ services: - docker install: - - sed -e 's/${CHECK_CODE}/'${CHECK_CODE}/g -e 's/${COMPILER}/'${COMPILER}/g -e 's/${PG_VERSION}/'${PG_VERSION}/g Dockerfile.tmpl > Dockerfile + - sed -e 's/${CHECK_CODE}/'${CHECK_CODE}/g -e 's/${PG_VERSION}/'${PG_VERSION}/g Dockerfile.tmpl > Dockerfile - docker-compose build script: From c596089cee4b91f0080e0550d025a377ca8940ac Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 29 Jun 2017 16:12:44 +0300 Subject: [PATCH 061/528] Try to optimize tests --- Dockerfile.tmpl | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index beda726b..529324ca 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -2,12 +2,20 @@ FROM postgres:${PG_VERSION}-alpine ENV LANG=C.UTF-8 PGDATA=/pg/data -RUN echo 'http://dl-cdn.alpinelinux.org/alpine/v3.6/main' > /etc/apk/repositories && \ - echo 'http://dl-cdn.alpinelinux.org/alpine/v3.6/community' >> /etc/apk/repositories && \ +RUN if [ "${CHECK_CODE}" = "clang" ] ; then \ + apk --no-cache add clang-analyzer --repository http://dl-3.alpinelinux.org/alpine/edge/main; \ + fi + +RUN if [ "${CHECK_CODE}" = "cppcheck" ] ; then \ + apk --no-cache add cppcheck --repository http://dl-cdn.alpinelinux.org/alpine/v3.6/community \ + fi + +RUN if [ "${CHECK_CODE}" = "false" ] ; then \ apk --no-cache add python3 gcc make musl-dev cppcheck && \ - apk --no-cache add clang-analyzer --repository http://dl-3.alpinelinux.org/alpine/edge/main/ && \ pip3 install testgres && \ - mkdir -p /pg/data && \ + fi + +RUN mkdir -p /pg/data && \ mkdir /pg/pg_pathman && \ chown postgres:postgres ${PGDATA} && \ chmod a+rwx /usr/local/lib/postgresql && \ From cc1b0229849b995dae24aa114dcbc2d00cafd87d Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 29 Jun 2017 16:29:46 +0300 Subject: [PATCH 062/528] Fix tests --- Dockerfile.tmpl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index 529324ca..16337fbb 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -7,12 +7,12 @@ RUN if [ "${CHECK_CODE}" = "clang" ] ; then \ fi RUN if [ "${CHECK_CODE}" = "cppcheck" ] ; then \ - apk --no-cache add cppcheck --repository http://dl-cdn.alpinelinux.org/alpine/v3.6/community \ + apk --no-cache add cppcheck --repository http://dl-cdn.alpinelinux.org/alpine/v3.6/community; \ fi RUN if [ "${CHECK_CODE}" = "false" ] ; then \ - apk --no-cache add python3 gcc make musl-dev cppcheck && \ - pip3 install testgres && \ + apk --no-cache add python3 gcc make musl-dev cppcheck;\ + pip3 install testgres; \ fi RUN mkdir -p /pg/data && \ From 915593e330d897875efc0fcb2cec39f588eeef8c Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 29 Jun 2017 16:42:54 +0300 Subject: [PATCH 063/528] Fix tests --- Dockerfile.tmpl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index 16337fbb..a748df46 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -3,7 +3,7 @@ FROM postgres:${PG_VERSION}-alpine ENV LANG=C.UTF-8 PGDATA=/pg/data RUN if [ "${CHECK_CODE}" = "clang" ] ; then \ - apk --no-cache add clang-analyzer --repository http://dl-3.alpinelinux.org/alpine/edge/main; \ + apk --no-cache add clang-analyzer make musl-dev --repository http://dl-3.alpinelinux.org/alpine/edge/main; \ fi RUN if [ "${CHECK_CODE}" = "cppcheck" ] ; then \ @@ -11,7 +11,7 @@ RUN if [ "${CHECK_CODE}" = "cppcheck" ] ; then \ fi RUN if [ "${CHECK_CODE}" = "false" ] ; then \ - apk --no-cache add python3 gcc make musl-dev cppcheck;\ + apk --no-cache add python3 gcc make musl-dev;\ pip3 install testgres; \ fi From 3cac044e2971dd93474c247aebe2ed77e30ab513 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 29 Jun 2017 16:45:04 +0300 Subject: [PATCH 064/528] Fix tests --- Dockerfile.tmpl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index a748df46..b5e2f0f2 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -3,7 +3,8 @@ FROM postgres:${PG_VERSION}-alpine ENV LANG=C.UTF-8 PGDATA=/pg/data RUN if [ "${CHECK_CODE}" = "clang" ] ; then \ - apk --no-cache add clang-analyzer make musl-dev --repository http://dl-3.alpinelinux.org/alpine/edge/main; \ + echo 'http://dl-3.alpinelinux.org/alpine/edge/main' > /etc/apk/repositories; \ + apk --no-cache add clang-analyzer make musl-dev; \ fi RUN if [ "${CHECK_CODE}" = "cppcheck" ] ; then \ From f949215de94e305cada522410a3e3ecaca9bf8cf Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 29 Jun 2017 17:33:13 +0300 Subject: [PATCH 065/528] Fix tests --- Dockerfile.tmpl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index b5e2f0f2..80ede1c0 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -4,7 +4,7 @@ ENV LANG=C.UTF-8 PGDATA=/pg/data RUN if [ "${CHECK_CODE}" = "clang" ] ; then \ echo 'http://dl-3.alpinelinux.org/alpine/edge/main' > /etc/apk/repositories; \ - apk --no-cache add clang-analyzer make musl-dev; \ + apk --no-cache add clang-analyzer make musl-dev gcc; \ fi RUN if [ "${CHECK_CODE}" = "cppcheck" ] ; then \ From 85e2b804fcfd69e47a3f2fbada0e11eb55cf33fe Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 10 Jul 2017 16:20:38 +0300 Subject: [PATCH 066/528] use cmocka instead of cmockery --- Dockerfile.tmpl | 3 +- run_tests.sh | 2 +- tests/cmocka/Makefile | 4 +- tests/cmocka/cmockery.c | 1770 --------------------------------- tests/cmocka/cmockery.h | 484 --------- tests/cmocka/rangeset_tests.c | 18 +- 6 files changed, 14 insertions(+), 2267 deletions(-) delete mode 100755 tests/cmocka/cmockery.c delete mode 100755 tests/cmocka/cmockery.h diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index 80ede1c0..bd78ba02 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -12,7 +12,8 @@ RUN if [ "${CHECK_CODE}" = "cppcheck" ] ; then \ fi RUN if [ "${CHECK_CODE}" = "false" ] ; then \ - apk --no-cache add python3 gcc make musl-dev;\ + echo 'http://dl-3.alpinelinux.org/alpine/edge/main' > /etc/apk/repositories; \ + apk --no-cache add python3 gcc make musl-dev cmocka-dev;\ pip3 install testgres; \ fi diff --git a/run_tests.sh b/run_tests.sh index dc1f4114..d41e053e 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -61,7 +61,7 @@ if [ $status -ne 0 ]; then exit $status; fi set -u -# run mock tests (using CFLAGS_SL for gcov) +# run cmocka tests (using CFLAGS_SL for gcov) make USE_PGXS=1 PG_CPPFLAGS="-coverage" cmocka_tests || status=$? if [ $status -ne 0 ]; then exit $status; fi diff --git a/tests/cmocka/Makefile b/tests/cmocka/Makefile index 2d4d8bff..e31e6d95 100644 --- a/tests/cmocka/Makefile +++ b/tests/cmocka/Makefile @@ -8,11 +8,11 @@ CFLAGS += $(shell $(PG_CONFIG) --cflags_sl) CFLAGS += $(shell $(PG_CONFIG) --cflags) CFLAGS += $(CFLAGS_SL) CFLAGS += $(PG_CPPFLAGS) +LDFLAGS += -lcmocka TEST_BIN = rangeset_tests OBJ = missing_basic.o missing_list.o missing_stringinfo.o \ - missing_bitmapset.o rangeset_tests.o cmockery.o \ - $(TOP_SRC_DIR)/rangeset.o + missing_bitmapset.o rangeset_tests.o $(TOP_SRC_DIR)/rangeset.o all: build_extension $(TEST_BIN) diff --git a/tests/cmocka/cmockery.c b/tests/cmocka/cmockery.c deleted file mode 100755 index 5bf212dc..00000000 --- a/tests/cmocka/cmockery.c +++ /dev/null @@ -1,1770 +0,0 @@ -/* - * Copyright 2008 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifdef HAVE_CONFIG_H -#include "config.h" -#endif -#ifdef HAVE_MALLOC_H -#include -#endif -#include -#ifndef _WIN32 -#include -#endif // !_WIN32 -#include -#include -#include -#include -#include -#ifdef _WIN32 -#include -#endif // _WIN32 -#include - -#ifdef _WIN32 -#define vsnprintf _vsnprintf -#endif // _WIN32 - -/* Backwards compatibility with headers shipped with Visual Studio 2005 and - * earlier. */ -#ifdef _WIN32 -WINBASEAPI BOOL WINAPI IsDebuggerPresent(VOID); -#endif // _WIN32 - -// Size of guard bytes around dynamically allocated blocks. -#define MALLOC_GUARD_SIZE 16 -// Pattern used to initialize guard blocks. -#define MALLOC_GUARD_PATTERN 0xEF -// Pattern used to initialize memory allocated with test_malloc(). -#define MALLOC_ALLOC_PATTERN 0xBA -#define MALLOC_FREE_PATTERN 0xCD -// Alignment of allocated blocks. NOTE: This must be base2. -#define MALLOC_ALIGNMENT sizeof(size_t) - -// Printf formatting for source code locations. -#define SOURCE_LOCATION_FORMAT "%s:%d" - -// Calculates the number of elements in an array. -#define ARRAY_LENGTH(x) (sizeof(x) / sizeof((x)[0])) - -// Declare and initialize the pointer member of ValuePointer variable name -// with ptr. -#define declare_initialize_value_pointer_pointer(name, ptr) \ - ValuePointer name ; \ - name.value = 0; \ - name.pointer = (void*)(ptr) - -// Declare and initialize the value member of ValuePointer variable name -// with val. -#define declare_initialize_value_pointer_value(name, val) \ - ValuePointer name ; \ - name.value = val - -// Cast a LargestIntegralType to pointer_type via a ValuePointer. -#define cast_largest_integral_type_to_pointer( \ - pointer_type, largest_integral_type) \ - ((pointer_type)((ValuePointer*)&(largest_integral_type))->pointer) - -// Used to cast LargetIntegralType to void* and vice versa. -typedef union ValuePointer { - LargestIntegralType value; - void *pointer; -} ValuePointer; - -// Doubly linked list node. -typedef struct ListNode { - const void *value; - int refcount; - struct ListNode *next; - struct ListNode *prev; -} ListNode; - -// Debug information for malloc(). -typedef struct MallocBlockInfo { - void* block; // Address of the block returned by malloc(). - size_t allocated_size; // Total size of the allocated block. - size_t size; // Request block size. - SourceLocation location; // Where the block was allocated. - ListNode node; // Node within list of all allocated blocks. -} MallocBlockInfo; - -// State of each test. -typedef struct TestState { - const ListNode *check_point; // Check point of the test if there's a - // setup function. - void *state; // State associated with the test. -} TestState; - -// Determines whether two values are the same. -typedef int (*EqualityFunction)(const void *left, const void *right); - -// Value of a symbol and the place it was declared. -typedef struct SymbolValue { - SourceLocation location; - LargestIntegralType value; -} SymbolValue; - -/* Contains a list of values for a symbol. - * NOTE: Each structure referenced by symbol_values_list_head must have a - * SourceLocation as its' first member. - */ -typedef struct SymbolMapValue { - const char *symbol_name; - ListNode symbol_values_list_head; -} SymbolMapValue; - -// Used by list_free() to deallocate values referenced by list nodes. -typedef void (*CleanupListValue)(const void *value, void *cleanup_value_data); - -// Structure used to check the range of integer types. -typedef struct CheckIntegerRange { - CheckParameterEvent event; - LargestIntegralType minimum; - LargestIntegralType maximum; -} CheckIntegerRange; - -// Structure used to check whether an integer value is in a set. -typedef struct CheckIntegerSet { - CheckParameterEvent event; - const LargestIntegralType *set; - size_t size_of_set; -} CheckIntegerSet; - -/* Used to check whether a parameter matches the area of memory referenced by - * this structure. */ -typedef struct CheckMemoryData { - CheckParameterEvent event; - const void *memory; - size_t size; -} CheckMemoryData; - -static ListNode* list_initialize(ListNode * const node); -static ListNode* list_add(ListNode * const head, ListNode *new_node); -static ListNode* list_add_value(ListNode * const head, const void *value, - const int count); -static ListNode* list_remove( - ListNode * const node, const CleanupListValue cleanup_value, - void * const cleanup_value_data); -static void list_remove_free( - ListNode * const node, const CleanupListValue cleanup_value, - void * const cleanup_value_data); -static int list_empty(const ListNode * const head); -static int list_find( - ListNode * const head, const void *value, - const EqualityFunction equal_func, ListNode **output); -static int list_first(ListNode * const head, ListNode **output); -static ListNode* list_free( - ListNode * const head, const CleanupListValue cleanup_value, - void * const cleanup_value_data); - -static void add_symbol_value( - ListNode * const symbol_map_head, const char * const symbol_names[], - const size_t number_of_symbol_names, const void* value, const int count); -static int get_symbol_value( - ListNode * const symbol_map_head, const char * const symbol_names[], - const size_t number_of_symbol_names, void **output); -static void free_value(const void *value, void *cleanup_value_data); -static void free_symbol_map_value( - const void *value, void *cleanup_value_data); -static void remove_always_return_values(ListNode * const map_head, - const size_t number_of_symbol_names); -static int check_for_leftover_values( - const ListNode * const map_head, const char * const error_message, - const size_t number_of_symbol_names); -// This must be called at the beginning of a test to initialize some data -// structures. -static void initialize_testing(const char *test_name); -// This must be called at the end of a test to free() allocated structures. -static void teardown_testing(const char *test_name); -static void fail_if_leftover_values(const char *test_name); - - -// Keeps track of the calling context returned by setenv() so that the fail() -// method can jump out of a test. -static jmp_buf global_run_test_env; -static int global_running_test = 0; - -// Keeps track of the calling context returned by setenv() so that -// mock_assert() can optionally jump back to expect_assert_failure(). -jmp_buf global_expect_assert_env; -const char *global_last_failed_assert = NULL; -int global_expecting_assert = 0; - -// Keeps a map of the values that functions will have to return to provide -// mocked interfaces. -static ListNode global_function_result_map_head; -// Location of the last mock value returned was declared. -static SourceLocation global_last_mock_value_location; - -/* Keeps a map of the values that functions expect as parameters to their - * mocked interfaces. */ -static ListNode global_function_parameter_map_head; -// Location of last parameter value checked was declared. -static SourceLocation global_last_parameter_location; - -// List of all currently allocated blocks. -static ListNode global_allocated_blocks; - -#ifndef _WIN32 -// Signals caught by exception_handler(). -static const int exception_signals[] = { - SIGFPE, - SIGILL, - SIGSEGV, - SIGBUS, - SIGSYS, -}; - -// Default signal functions that should be restored after a test is complete. -typedef void (*SignalFunction)(int signal); -static SignalFunction default_signal_functions[ - ARRAY_LENGTH(exception_signals)]; - -#else // _WIN32 - -// The default exception filter. -static LPTOP_LEVEL_EXCEPTION_FILTER previous_exception_filter; - -// Fatal exceptions. -typedef struct ExceptionCodeInfo { - DWORD code; - const char* description; -} ExceptionCodeInfo; - -#define EXCEPTION_CODE_INFO(exception_code) {exception_code, #exception_code} - -static const ExceptionCodeInfo exception_codes[] = { - EXCEPTION_CODE_INFO(EXCEPTION_ACCESS_VIOLATION), - EXCEPTION_CODE_INFO(EXCEPTION_ARRAY_BOUNDS_EXCEEDED), - EXCEPTION_CODE_INFO(EXCEPTION_DATATYPE_MISALIGNMENT), - EXCEPTION_CODE_INFO(EXCEPTION_FLT_DENORMAL_OPERAND), - EXCEPTION_CODE_INFO(EXCEPTION_FLT_DIVIDE_BY_ZERO), - EXCEPTION_CODE_INFO(EXCEPTION_FLT_INEXACT_RESULT), - EXCEPTION_CODE_INFO(EXCEPTION_FLT_INVALID_OPERATION), - EXCEPTION_CODE_INFO(EXCEPTION_FLT_OVERFLOW), - EXCEPTION_CODE_INFO(EXCEPTION_FLT_STACK_CHECK), - EXCEPTION_CODE_INFO(EXCEPTION_FLT_UNDERFLOW), - EXCEPTION_CODE_INFO(EXCEPTION_GUARD_PAGE), - EXCEPTION_CODE_INFO(EXCEPTION_ILLEGAL_INSTRUCTION), - EXCEPTION_CODE_INFO(EXCEPTION_INT_DIVIDE_BY_ZERO), - EXCEPTION_CODE_INFO(EXCEPTION_INT_OVERFLOW), - EXCEPTION_CODE_INFO(EXCEPTION_INVALID_DISPOSITION), - EXCEPTION_CODE_INFO(EXCEPTION_INVALID_HANDLE), - EXCEPTION_CODE_INFO(EXCEPTION_IN_PAGE_ERROR), - EXCEPTION_CODE_INFO(EXCEPTION_NONCONTINUABLE_EXCEPTION), - EXCEPTION_CODE_INFO(EXCEPTION_PRIV_INSTRUCTION), - EXCEPTION_CODE_INFO(EXCEPTION_STACK_OVERFLOW), -}; -#endif // !_WIN32 - - -// Exit the currently executing test. -static void exit_test(const int quit_application) { - if (global_running_test) { - longjmp(global_run_test_env, 1); - } else if (quit_application) { - exit(-1); - } -} - - -// Initialize a SourceLocation structure. -static void initialize_source_location(SourceLocation * const location) { - assert_true(location); - location->file = NULL; - location->line = 0; -} - - -// Determine whether a source location is currently set. -static int source_location_is_set(const SourceLocation * const location) { - assert_true(location); - return location->file && location->line; -} - - -// Set a source location. -static void set_source_location( - SourceLocation * const location, const char * const file, - const int line) { - assert_true(location); - location->file = file; - location->line = line; -} - - -// Create function results and expected parameter lists. -void initialize_testing(const char *test_name) { - list_initialize(&global_function_result_map_head); - initialize_source_location(&global_last_mock_value_location); - list_initialize(&global_function_parameter_map_head); - initialize_source_location(&global_last_parameter_location); -} - - -static void fail_if_leftover_values(const char *test_name) { - int error_occurred = 0; - remove_always_return_values(&global_function_result_map_head, 1); - if (check_for_leftover_values( - &global_function_result_map_head, - "%s() has remaining non-returned values.\n", 1)) { - error_occurred = 1; - } - - remove_always_return_values(&global_function_parameter_map_head, 2); - if (check_for_leftover_values( - &global_function_parameter_map_head, - "%s parameter still has values that haven't been checked.\n", 2)) { - error_occurred = 1; - } - if (error_occurred) { - exit_test(1); - } -} - - -void teardown_testing(const char *test_name) { - list_free(&global_function_result_map_head, free_symbol_map_value, - (void*)0); - initialize_source_location(&global_last_mock_value_location); - list_free(&global_function_parameter_map_head, free_symbol_map_value, - (void*)1); - initialize_source_location(&global_last_parameter_location); -} - -// Initialize a list node. -static ListNode* list_initialize(ListNode * const node) { - node->value = NULL; - node->next = node; - node->prev = node; - node->refcount = 1; - return node; -} - - -/* Adds a value at the tail of a given list. - * The node referencing the value is allocated from the heap. */ -static ListNode* list_add_value(ListNode * const head, const void *value, - const int refcount) { - ListNode * const new_node = (ListNode*)malloc(sizeof(ListNode)); - assert_true(head); - assert_true(value); - new_node->value = value; - new_node->refcount = refcount; - return list_add(head, new_node); -} - - -// Add new_node to the end of the list. -static ListNode* list_add(ListNode * const head, ListNode *new_node) { - assert_true(head); - assert_true(new_node); - new_node->next = head; - new_node->prev = head->prev; - head->prev->next = new_node; - head->prev = new_node; - return new_node; -} - - -// Remove a node from a list. -static ListNode* list_remove( - ListNode * const node, const CleanupListValue cleanup_value, - void * const cleanup_value_data) { - assert_true(node); - node->prev->next = node->next; - node->next->prev = node->prev; - if (cleanup_value) { - cleanup_value(node->value, cleanup_value_data); - } - return node; -} - - -/* Remove a list node from a list and free the node. */ -static void list_remove_free( - ListNode * const node, const CleanupListValue cleanup_value, - void * const cleanup_value_data) { - assert_true(node); - free(list_remove(node, cleanup_value, cleanup_value_data)); -} - - -/* Frees memory kept by a linked list - * The cleanup_value function is called for every "value" field of nodes in the - * list, except for the head. In addition to each list value, - * cleanup_value_data is passed to each call to cleanup_value. The head - * of the list is not deallocated. - */ -static ListNode* list_free( - ListNode * const head, const CleanupListValue cleanup_value, - void * const cleanup_value_data) { - assert_true(head); - while (!list_empty(head)) { - list_remove_free(head->next, cleanup_value, cleanup_value_data); - } - return head; -} - - -// Determine whether a list is empty. -static int list_empty(const ListNode * const head) { - assert_true(head); - return head->next == head; -} - - -/* Find a value in the list using the equal_func to compare each node with the - * value. - */ -static int list_find(ListNode * const head, const void *value, - const EqualityFunction equal_func, ListNode **output) { - ListNode *current; - assert_true(head); - for (current = head->next; current != head; current = current->next) { - if (equal_func(current->value, value)) { - *output = current; - return 1; - } - } - return 0; -} - -// Returns the first node of a list -static int list_first(ListNode * const head, ListNode **output) { - ListNode *target_node; - assert_true(head); - if (list_empty(head)) { - return 0; - } - target_node = head->next; - *output = target_node; - return 1; -} - - -// Deallocate a value referenced by a list. -static void free_value(const void *value, void *cleanup_value_data) { - assert_true(value); - free((void*)value); -} - - -// Releases memory associated to a symbol_map_value. -static void free_symbol_map_value(const void *value, - void *cleanup_value_data) { - SymbolMapValue * const map_value = (SymbolMapValue*)value; - assert_true(value); - list_free(&map_value->symbol_values_list_head, - cleanup_value_data ? free_symbol_map_value : free_value, - (void *)((char *) cleanup_value_data - 1)); - free(map_value); -} - - -/* Determine whether a symbol name referenced by a symbol_map_value - * matches the specified function name. */ -static int symbol_names_match(const void *map_value, const void *symbol) { - return !strcmp(((SymbolMapValue*)map_value)->symbol_name, - (const char*)symbol); -} - - -/* Adds a value to the queue of values associated with the given - * hierarchy of symbols. It's assumed value is allocated from the heap. - */ -static void add_symbol_value(ListNode * const symbol_map_head, - const char * const symbol_names[], - const size_t number_of_symbol_names, - const void* value, const int refcount) { - const char* symbol_name; - ListNode *target_node; - SymbolMapValue *target_map_value; - assert_true(symbol_map_head); - assert_true(symbol_names); - assert_true(number_of_symbol_names); - symbol_name = symbol_names[0]; - - if (!list_find(symbol_map_head, symbol_name, symbol_names_match, - &target_node)) { - SymbolMapValue * const new_symbol_map_value = - malloc(sizeof(*new_symbol_map_value)); - new_symbol_map_value->symbol_name = symbol_name; - list_initialize(&new_symbol_map_value->symbol_values_list_head); - target_node = list_add_value(symbol_map_head, new_symbol_map_value, - 1); - } - - target_map_value = (SymbolMapValue*)target_node->value; - if (number_of_symbol_names == 1) { - list_add_value(&target_map_value->symbol_values_list_head, - value, refcount); - } else { - add_symbol_value(&target_map_value->symbol_values_list_head, - &symbol_names[1], number_of_symbol_names - 1, value, - refcount); - } -} - - -/* Gets the next value associated with the given hierarchy of symbols. - * The value is returned as an output parameter with the function returning the - * node's old refcount value if a value is found, 0 otherwise. - * This means that a return value of 1 indicates the node was just removed from - * the list. - */ -static int get_symbol_value( - ListNode * const head, const char * const symbol_names[], - const size_t number_of_symbol_names, void **output) { - const char* symbol_name; - ListNode *target_node; - assert_true(head); - assert_true(symbol_names); - assert_true(number_of_symbol_names); - assert_true(output); - symbol_name = symbol_names[0]; - - if (list_find(head, symbol_name, symbol_names_match, &target_node)) { - SymbolMapValue *map_value; - ListNode *child_list; - int return_value = 0; - assert_true(target_node); - assert_true(target_node->value); - - map_value = (SymbolMapValue*)target_node->value; - child_list = &map_value->symbol_values_list_head; - - if (number_of_symbol_names == 1) { - ListNode *value_node = NULL; - return_value = list_first(child_list, &value_node); - assert_true(return_value); - *output = (void*) value_node->value; - return_value = value_node->refcount; - if (--value_node->refcount == 0) { - list_remove_free(value_node, NULL, NULL); - } - } else { - return_value = get_symbol_value( - child_list, &symbol_names[1], number_of_symbol_names - 1, - output); - } - if (list_empty(child_list)) { - list_remove_free(target_node, free_symbol_map_value, (void*)0); - } - return return_value; - } else { - print_error("No entries for symbol %s.\n", symbol_name); - } - return 0; -} - - -/* Traverse down a tree of symbol values and remove the first symbol value - * in each branch that has a refcount < -1 (i.e should always be returned - * and has been returned at least once). - */ -static void remove_always_return_values(ListNode * const map_head, - const size_t number_of_symbol_names) { - ListNode *current; - assert_true(map_head); - assert_true(number_of_symbol_names); - current = map_head->next; - while (current != map_head) { - SymbolMapValue * const value = (SymbolMapValue*)current->value; - ListNode * const next = current->next; - ListNode *child_list; - assert_true(value); - child_list = &value->symbol_values_list_head; - - if (!list_empty(child_list)) { - if (number_of_symbol_names == 1) { - ListNode * const child_node = child_list->next; - // If this item has been returned more than once, free it. - if (child_node->refcount < -1) { - list_remove_free(child_node, free_value, NULL); - } - } else { - remove_always_return_values(child_list, - number_of_symbol_names - 1); - } - } - - if (list_empty(child_list)) { - list_remove_free(current, free_value, NULL); - } - current = next; - } -} - -/* Checks if there are any leftover values set up by the test that were never - * retrieved through execution, and fail the test if that is the case. - */ -static int check_for_leftover_values( - const ListNode * const map_head, const char * const error_message, - const size_t number_of_symbol_names) { - const ListNode *current; - int symbols_with_leftover_values = 0; - assert_true(map_head); - assert_true(number_of_symbol_names); - - for (current = map_head->next; current != map_head; - current = current->next) { - const SymbolMapValue * const value = - (SymbolMapValue*)current->value; - const ListNode *child_list; - assert_true(value); - child_list = &value->symbol_values_list_head; - - if (!list_empty(child_list)) { - if (number_of_symbol_names == 1) { - const ListNode *child_node; - print_error(error_message, value->symbol_name); - print_error(" Remaining item(s) declared at...\n"); - - for (child_node = child_list->next; child_node != child_list; - child_node = child_node->next) { - const SourceLocation * const location = child_node->value; - print_error(" " SOURCE_LOCATION_FORMAT "\n", - location->file, location->line); - } - } else { - print_error("%s.", value->symbol_name); - check_for_leftover_values(child_list, error_message, - number_of_symbol_names - 1); - } - symbols_with_leftover_values ++; - } - } - return symbols_with_leftover_values; -} - - -// Get the next return value for the specified mock function. -LargestIntegralType _mock(const char * const function, const char* const file, - const int line) { - void *result; - const int rc = get_symbol_value(&global_function_result_map_head, - &function, 1, &result); - if (rc) { - SymbolValue * const symbol = (SymbolValue*)result; - const LargestIntegralType value = symbol->value; - global_last_mock_value_location = symbol->location; - if (rc == 1) { - free(symbol); - } - return value; - } else { - print_error("ERROR: " SOURCE_LOCATION_FORMAT " - Could not get value " - "to mock function %s\n", file, line, function); - if (source_location_is_set(&global_last_mock_value_location)) { - print_error("Previously returned mock value was declared at " - SOURCE_LOCATION_FORMAT "\n", - global_last_mock_value_location.file, - global_last_mock_value_location.line); - } else { - print_error("There were no previously returned mock values for " - "this test.\n"); - } - exit_test(1); - } - return 0; -} - - -// Add a return value for the specified mock function name. -void _will_return(const char * const function_name, const char * const file, - const int line, const LargestIntegralType value, - const int count) { - SymbolValue * const return_value = malloc(sizeof(*return_value)); - assert_true(count > 0 || count == -1); - return_value->value = value; - set_source_location(&return_value->location, file, line); - add_symbol_value(&global_function_result_map_head, &function_name, 1, - return_value, count); -} - - -/* Add a custom parameter checking function. If the event parameter is NULL - * the event structure is allocated internally by this function. If event - * parameter is provided it must be allocated on the heap and doesn't need to - * be deallocated by the caller. - */ -void _expect_check( - const char* const function, const char* const parameter, - const char* const file, const int line, - const CheckParameterValue check_function, - const LargestIntegralType check_data, - CheckParameterEvent * const event, const int count) { - CheckParameterEvent * const check = - event ? event : malloc(sizeof(*check)); - const char* symbols[] = {function, parameter}; - check->parameter_name = parameter; - check->check_value = check_function; - check->check_value_data = check_data; - set_source_location(&check->location, file, line); - add_symbol_value(&global_function_parameter_map_head, symbols, 2, check, - count); -} - - -/* Returns 1 if the specified values are equal. If the values are not equal - * an error is displayed and 0 is returned. */ -static int values_equal_display_error(const LargestIntegralType left, - const LargestIntegralType right) { - const int equal = left == right; - if (!equal) { - print_error(LargestIntegralTypePrintfFormat " != " - LargestIntegralTypePrintfFormat "\n", left, right); - } - return equal; -} - -/* Returns 1 if the specified values are not equal. If the values are equal - * an error is displayed and 0 is returned. */ -static int values_not_equal_display_error(const LargestIntegralType left, - const LargestIntegralType right) { - const int not_equal = left != right; - if (!not_equal) { - print_error(LargestIntegralTypePrintfFormat " == " - LargestIntegralTypePrintfFormat "\n", left, right); - } - return not_equal; -} - - -/* Determine whether value is contained within check_integer_set. - * If invert is 0 and the value is in the set 1 is returned, otherwise 0 is - * returned and an error is displayed. If invert is 1 and the value is not - * in the set 1 is returned, otherwise 0 is returned and an error is - * displayed. */ -static int value_in_set_display_error( - const LargestIntegralType value, - const CheckIntegerSet * const check_integer_set, const int invert) { - int succeeded = invert; - assert_true(check_integer_set); - { - const LargestIntegralType * const set = check_integer_set->set; - const size_t size_of_set = check_integer_set->size_of_set; - size_t i; - for (i = 0; i < size_of_set; i++) { - if (set[i] == value) { - // If invert = 0 and item is found, succeeded = 1. - // If invert = 1 and item is found, succeeded = 0. - succeeded = !succeeded; - break; - } - } - if (succeeded) { - return 1; - } - print_error("%d is %sin the set (", value, invert ? "" : "not "); - for (i = 0; i < size_of_set; i++) { - print_error("%d, ", set[i]); - } - print_error(")\n"); - } - return 0; -} - - -/* Determine whether a value is within the specified range. If the value is - * within the specified range 1 is returned. If the value isn't within the - * specified range an error is displayed and 0 is returned. */ -static int integer_in_range_display_error( - const LargestIntegralType value, const LargestIntegralType range_min, - const LargestIntegralType range_max) { - if (value >= range_min && value <= range_max) { - return 1; - } - print_error("%d is not within the range %d-%d\n", value, range_min, - range_max); - return 0; -} - - -/* Determine whether a value is within the specified range. If the value - * is not within the range 1 is returned. If the value is within the - * specified range an error is displayed and zero is returned. */ -static int integer_not_in_range_display_error( - const LargestIntegralType value, const LargestIntegralType range_min, - const LargestIntegralType range_max) { - if (value < range_min || value > range_max) { - return 1; - } - print_error("%d is within the range %d-%d\n", value, range_min, - range_max); - return 0; -} - - -/* Determine whether the specified strings are equal. If the strings are equal - * 1 is returned. If they're not equal an error is displayed and 0 is - * returned. */ -static int string_equal_display_error( - const char * const left, const char * const right) { - if (strcmp(left, right) == 0) { - return 1; - } - print_error("\"%s\" != \"%s\"\n", left, right); - return 0; -} - - -/* Determine whether the specified strings are equal. If the strings are not - * equal 1 is returned. If they're not equal an error is displayed and 0 is - * returned */ -static int string_not_equal_display_error( - const char * const left, const char * const right) { - if (strcmp(left, right) != 0) { - return 1; - } - print_error("\"%s\" == \"%s\"\n", left, right); - return 0; -} - - -/* Determine whether the specified areas of memory are equal. If they're equal - * 1 is returned otherwise an error is displayed and 0 is returned. */ -static int memory_equal_display_error(const char* const a, const char* const b, - const size_t size) { - int differences = 0; - size_t i; - for (i = 0; i < size; i++) { - const char l = a[i]; - const char r = b[i]; - if (l != r) { - print_error("difference at offset %d 0x%02x 0x%02x\n", i, l, r); - differences ++; - } - } - if (differences) { - print_error("%d bytes of 0x%08x and 0x%08x differ\n", differences, - a, b); - return 0; - } - return 1; -} - - -/* Determine whether the specified areas of memory are not equal. If they're - * not equal 1 is returned otherwise an error is displayed and 0 is - * returned. */ -static int memory_not_equal_display_error( - const char* const a, const char* const b, const size_t size) { - int same = 0; - size_t i; - for (i = 0; i < size; i++) { - const char l = a[i]; - const char r = b[i]; - if (l == r) { - same ++; - } - } - if (same == size) { - print_error("%d bytes of 0x%08x and 0x%08x the same\n", same, - a, b); - return 0; - } - return 1; -} - - -// CheckParameterValue callback to check whether a value is within a set. -static int check_in_set(const LargestIntegralType value, - const LargestIntegralType check_value_data) { - return value_in_set_display_error(value, - cast_largest_integral_type_to_pointer(CheckIntegerSet*, - check_value_data), 0); -} - - -// CheckParameterValue callback to check whether a value isn't within a set. -static int check_not_in_set(const LargestIntegralType value, - const LargestIntegralType check_value_data) { - return value_in_set_display_error(value, - cast_largest_integral_type_to_pointer(CheckIntegerSet*, - check_value_data), 1); -} - - -/* Create the callback data for check_in_set() or check_not_in_set() and - * register a check event. */ -static void expect_set( - const char* const function, const char* const parameter, - const char* const file, const int line, - const LargestIntegralType values[], const size_t number_of_values, - const CheckParameterValue check_function, const int count) { - CheckIntegerSet * const check_integer_set = - malloc(sizeof(*check_integer_set) + - (sizeof(values[0]) * number_of_values)); - LargestIntegralType * const set = (LargestIntegralType*)( - check_integer_set + 1); - declare_initialize_value_pointer_pointer(check_data, check_integer_set); - assert_true(values); - assert_true(number_of_values); - memcpy(set, values, number_of_values * sizeof(values[0])); - check_integer_set->set = set; - _expect_check( - function, parameter, file, line, check_function, - check_data.value, &check_integer_set->event, count); -} - - -// Add an event to check whether a value is in a set. -void _expect_in_set( - const char* const function, const char* const parameter, - const char* const file, const int line, - const LargestIntegralType values[], const size_t number_of_values, - const int count) { - expect_set(function, parameter, file, line, values, number_of_values, - check_in_set, count); -} - - -// Add an event to check whether a value isn't in a set. -void _expect_not_in_set( - const char* const function, const char* const parameter, - const char* const file, const int line, - const LargestIntegralType values[], const size_t number_of_values, - const int count) { - expect_set(function, parameter, file, line, values, number_of_values, - check_not_in_set, count); -} - - -// CheckParameterValue callback to check whether a value is within a range. -static int check_in_range(const LargestIntegralType value, - const LargestIntegralType check_value_data) { - CheckIntegerRange * const check_integer_range = - cast_largest_integral_type_to_pointer(CheckIntegerRange*, - check_value_data); - assert_true(check_integer_range); - return integer_in_range_display_error(value, check_integer_range->minimum, - check_integer_range->maximum); -} - - -// CheckParameterValue callback to check whether a value is not within a range. -static int check_not_in_range(const LargestIntegralType value, - const LargestIntegralType check_value_data) { - CheckIntegerRange * const check_integer_range = - cast_largest_integral_type_to_pointer(CheckIntegerRange*, - check_value_data); - assert_true(check_integer_range); - return integer_not_in_range_display_error( - value, check_integer_range->minimum, check_integer_range->maximum); -} - - -/* Create the callback data for check_in_range() or check_not_in_range() and - * register a check event. */ -static void expect_range( - const char* const function, const char* const parameter, - const char* const file, const int line, - const LargestIntegralType minimum, const LargestIntegralType maximum, - const CheckParameterValue check_function, const int count) { - CheckIntegerRange * const check_integer_range = - malloc(sizeof(*check_integer_range)); - declare_initialize_value_pointer_pointer(check_data, check_integer_range); - check_integer_range->minimum = minimum; - check_integer_range->maximum = maximum; - _expect_check(function, parameter, file, line, check_function, - check_data.value, &check_integer_range->event, count); -} - - -// Add an event to determine whether a parameter is within a range. -void _expect_in_range( - const char* const function, const char* const parameter, - const char* const file, const int line, - const LargestIntegralType minimum, const LargestIntegralType maximum, - const int count) { - expect_range(function, parameter, file, line, minimum, maximum, - check_in_range, count); -} - - -// Add an event to determine whether a parameter is not within a range. -void _expect_not_in_range( - const char* const function, const char* const parameter, - const char* const file, const int line, - const LargestIntegralType minimum, const LargestIntegralType maximum, - const int count) { - expect_range(function, parameter, file, line, minimum, maximum, - check_not_in_range, count); -} - - -/* CheckParameterValue callback to check whether a value is equal to an - * expected value. */ -static int check_value(const LargestIntegralType value, - const LargestIntegralType check_value_data) { - return values_equal_display_error(value, check_value_data); -} - - -// Add an event to check a parameter equals an expected value. -void _expect_value( - const char* const function, const char* const parameter, - const char* const file, const int line, - const LargestIntegralType value, const int count) { - _expect_check(function, parameter, file, line, check_value, value, NULL, - count); -} - - -/* CheckParameterValue callback to check whether a value is not equal to an - * expected value. */ -static int check_not_value(const LargestIntegralType value, - const LargestIntegralType check_value_data) { - return values_not_equal_display_error(value, check_value_data); -} - - -// Add an event to check a parameter is not equal to an expected value. -void _expect_not_value( - const char* const function, const char* const parameter, - const char* const file, const int line, - const LargestIntegralType value, const int count) { - _expect_check(function, parameter, file, line, check_not_value, value, - NULL, count); -} - - -// CheckParameterValue callback to check whether a parameter equals a string. -static int check_string(const LargestIntegralType value, - const LargestIntegralType check_value_data) { - return string_equal_display_error( - cast_largest_integral_type_to_pointer(char*, value), - cast_largest_integral_type_to_pointer(char*, check_value_data)); -} - - -// Add an event to check whether a parameter is equal to a string. -void _expect_string( - const char* const function, const char* const parameter, - const char* const file, const int line, const char* string, - const int count) { - declare_initialize_value_pointer_pointer(string_pointer, (char*)string); - _expect_check(function, parameter, file, line, check_string, - string_pointer.value, NULL, count); -} - - -/* CheckParameterValue callback to check whether a parameter is not equals to - * a string. */ -static int check_not_string(const LargestIntegralType value, - const LargestIntegralType check_value_data) { - return string_not_equal_display_error( - cast_largest_integral_type_to_pointer(char*, value), - cast_largest_integral_type_to_pointer(char*, check_value_data)); -} - - -// Add an event to check whether a parameter is not equal to a string. -void _expect_not_string( - const char* const function, const char* const parameter, - const char* const file, const int line, const char* string, - const int count) { - declare_initialize_value_pointer_pointer(string_pointer, (char*)string); - _expect_check(function, parameter, file, line, check_not_string, - string_pointer.value, NULL, count); -} - -/* CheckParameterValue callback to check whether a parameter equals an area of - * memory. */ -static int check_memory(const LargestIntegralType value, - const LargestIntegralType check_value_data) { - CheckMemoryData * const check = cast_largest_integral_type_to_pointer( - CheckMemoryData*, check_value_data); - assert_true(check); - return memory_equal_display_error( - cast_largest_integral_type_to_pointer(void*, value), - check->memory, check->size); -} - - -/* Create the callback data for check_memory() or check_not_memory() and - * register a check event. */ -static void expect_memory_setup( - const char* const function, const char* const parameter, - const char* const file, const int line, - const void * const memory, const size_t size, - const CheckParameterValue check_function, const int count) { - CheckMemoryData * const check_data = malloc(sizeof(*check_data) + size); - void * const mem = (void*)(check_data + 1); - declare_initialize_value_pointer_pointer(check_data_pointer, check_data); - assert_true(memory); - assert_true(size); - memcpy(mem, memory, size); - check_data->memory = mem; - check_data->size = size; - _expect_check(function, parameter, file, line, check_function, - check_data_pointer.value, &check_data->event, count); -} - - -// Add an event to check whether a parameter matches an area of memory. -void _expect_memory( - const char* const function, const char* const parameter, - const char* const file, const int line, const void* const memory, - const size_t size, const int count) { - expect_memory_setup(function, parameter, file, line, memory, size, - check_memory, count); -} - - -/* CheckParameterValue callback to check whether a parameter is not equal to - * an area of memory. */ -static int check_not_memory(const LargestIntegralType value, - const LargestIntegralType check_value_data) { - CheckMemoryData * const check = cast_largest_integral_type_to_pointer( - CheckMemoryData*, check_value_data); - assert_true(check); - return memory_not_equal_display_error( - cast_largest_integral_type_to_pointer(void*, value), check->memory, - check->size); -} - - -// Add an event to check whether a parameter doesn't match an area of memory. -void _expect_not_memory( - const char* const function, const char* const parameter, - const char* const file, const int line, const void* const memory, - const size_t size, const int count) { - expect_memory_setup(function, parameter, file, line, memory, size, - check_not_memory, count); -} - - -// CheckParameterValue callback that always returns 1. -static int check_any(const LargestIntegralType value, - const LargestIntegralType check_value_data) { - return 1; -} - - -// Add an event to allow any value for a parameter. -void _expect_any( - const char* const function, const char* const parameter, - const char* const file, const int line, const int count) { - _expect_check(function, parameter, file, line, check_any, 0, NULL, - count); -} - - -void _check_expected( - const char * const function_name, const char * const parameter_name, - const char* file, const int line, const LargestIntegralType value) { - void *result; - const char* symbols[] = {function_name, parameter_name}; - const int rc = get_symbol_value(&global_function_parameter_map_head, - symbols, 2, &result); - if (rc) { - CheckParameterEvent * const check = (CheckParameterEvent*)result; - int check_succeeded; - global_last_parameter_location = check->location; - check_succeeded = check->check_value(value, check->check_value_data); - if (rc == 1) { - free(check); - } - if (!check_succeeded) { - print_error("ERROR: Check of parameter %s, function %s failed\n" - "Expected parameter declared at " - SOURCE_LOCATION_FORMAT "\n", - parameter_name, function_name, - global_last_parameter_location.file, - global_last_parameter_location.line); - _fail(file, line); - } - } else { - print_error("ERROR: " SOURCE_LOCATION_FORMAT " - Could not get value " - "to check parameter %s of function %s\n", file, line, - parameter_name, function_name); - if (source_location_is_set(&global_last_parameter_location)) { - print_error("Previously declared parameter value was declared at " - SOURCE_LOCATION_FORMAT "\n", - global_last_parameter_location.file, - global_last_parameter_location.line); - } else { - print_error("There were no previously declared parameter values " - "for this test.\n"); - } - exit_test(1); - } -} - - - -/* Replacement for assert. */ -void mock_assert(const int result, const char* const expression, - const char* const file, const int line) { - if (!result) { - if (global_expecting_assert) { - global_last_failed_assert = expression; - longjmp(global_expect_assert_env, result); - } else { - print_error("ASSERT: %s\n", expression); - _fail(file, line); - } - } -} - - -void _assert_true(const LargestIntegralType result, - const char * const expression, - const char * const file, const int line) { - if (!result) { - print_error("%s\n", expression); - _fail(file, line); - } -} - -void _assert_int_equal( - const LargestIntegralType a, const LargestIntegralType b, - const char * const file, const int line) { - if (!values_equal_display_error(a, b)) { - _fail(file, line); - } -} - - -void _assert_int_not_equal( - const LargestIntegralType a, const LargestIntegralType b, - const char * const file, const int line) { - if (!values_not_equal_display_error(a, b)) { - _fail(file, line); - } -} - - -void _assert_string_equal(const char * const a, const char * const b, - const char * const file, const int line) { - if (!string_equal_display_error(a, b)) { - _fail(file, line); - } -} - - -void _assert_string_not_equal(const char * const a, const char * const b, - const char *file, const int line) { - if (!string_not_equal_display_error(a, b)) { - _fail(file, line); - } -} - - -void _assert_memory_equal(const void * const a, const void * const b, - const size_t size, const char* const file, - const int line) { - if (!memory_equal_display_error((const char*)a, (const char*)b, size)) { - _fail(file, line); - } -} - - -void _assert_memory_not_equal(const void * const a, const void * const b, - const size_t size, const char* const file, - const int line) { - if (!memory_not_equal_display_error((const char*)a, (const char*)b, - size)) { - _fail(file, line); - } -} - - -void _assert_in_range( - const LargestIntegralType value, const LargestIntegralType minimum, - const LargestIntegralType maximum, const char* const file, - const int line) { - if (!integer_in_range_display_error(value, minimum, maximum)) { - _fail(file, line); - } -} - -void _assert_not_in_range( - const LargestIntegralType value, const LargestIntegralType minimum, - const LargestIntegralType maximum, const char* const file, - const int line) { - if (!integer_not_in_range_display_error(value, minimum, maximum)) { - _fail(file, line); - } -} - -void _assert_in_set(const LargestIntegralType value, - const LargestIntegralType values[], - const size_t number_of_values, const char* const file, - const int line) { - CheckIntegerSet check_integer_set; - check_integer_set.set = values; - check_integer_set.size_of_set = number_of_values; - if (!value_in_set_display_error(value, &check_integer_set, 0)) { - _fail(file, line); - } -} - -void _assert_not_in_set(const LargestIntegralType value, - const LargestIntegralType values[], - const size_t number_of_values, const char* const file, - const int line) { - CheckIntegerSet check_integer_set; - check_integer_set.set = values; - check_integer_set.size_of_set = number_of_values; - if (!value_in_set_display_error(value, &check_integer_set, 1)) { - _fail(file, line); - } -} - - -// Get the list of allocated blocks. -static ListNode* get_allocated_blocks_list() { - // If it initialized, initialize the list of allocated blocks. - if (!global_allocated_blocks.value) { - list_initialize(&global_allocated_blocks); - global_allocated_blocks.value = (void*)1; - } - return &global_allocated_blocks; -} - -// Use the real malloc in this function. -#undef malloc -void* _test_malloc(const size_t size, const char* file, const int line) { - char* ptr; - MallocBlockInfo *block_info; - ListNode * const block_list = get_allocated_blocks_list(); - const size_t allocate_size = size + (MALLOC_GUARD_SIZE * 2) + - sizeof(*block_info) + MALLOC_ALIGNMENT; - char* const block = (char*)malloc(allocate_size); - assert_true(block); - - // Calculate the returned address. - ptr = (char*)(((size_t)block + MALLOC_GUARD_SIZE + sizeof(*block_info) + - MALLOC_ALIGNMENT) & ~(MALLOC_ALIGNMENT - 1)); - - // Initialize the guard blocks. - memset(ptr - MALLOC_GUARD_SIZE, MALLOC_GUARD_PATTERN, MALLOC_GUARD_SIZE); - memset(ptr + size, MALLOC_GUARD_PATTERN, MALLOC_GUARD_SIZE); - memset(ptr, MALLOC_ALLOC_PATTERN, size); - - block_info = (MallocBlockInfo*)(ptr - (MALLOC_GUARD_SIZE + - sizeof(*block_info))); - set_source_location(&block_info->location, file, line); - block_info->allocated_size = allocate_size; - block_info->size = size; - block_info->block = block; - block_info->node.value = block_info; - list_add(block_list, &block_info->node); - return ptr; -} -#define malloc test_malloc - - -void* _test_calloc(const size_t number_of_elements, const size_t size, - const char* file, const int line) { - void* const ptr = _test_malloc(number_of_elements * size, file, line); - if (ptr) { - memset(ptr, 0, number_of_elements * size); - } - return ptr; -} - - -// Use the real free in this function. -#undef free -void _test_free(void* const ptr, const char* file, const int line) { - unsigned int i; - char *block = (char*)ptr; - MallocBlockInfo *block_info; - _assert_true((LargestIntegralType)ptr, "ptr", file, line); - block_info = (MallocBlockInfo*)(block - (MALLOC_GUARD_SIZE + - sizeof(*block_info))); - // Check the guard blocks. - { - char *guards[2] = {block - MALLOC_GUARD_SIZE, - block + block_info->size}; - for (i = 0; i < ARRAY_LENGTH(guards); i++) { - unsigned int j; - char * const guard = guards[i]; - for (j = 0; j < MALLOC_GUARD_SIZE; j++) { - const char diff = guard[j] - MALLOC_GUARD_PATTERN; - if (diff) { - print_error( - "Guard block of 0x%08x size=%d allocated by " - SOURCE_LOCATION_FORMAT " at 0x%08x is corrupt\n", - (size_t)ptr, block_info->size, - block_info->location.file, block_info->location.line, - (size_t)&guard[j]); - _fail(file, line); - } - } - } - } - list_remove(&block_info->node, NULL, NULL); - - block = block_info->block; - memset(block, MALLOC_FREE_PATTERN, block_info->allocated_size); - free(block); -} -#define free test_free - - -// Crudely checkpoint the current heap state. -static const ListNode* check_point_allocated_blocks() { - return get_allocated_blocks_list()->prev; -} - - -/* Display the blocks allocated after the specified check point. This - * function returns the number of blocks displayed. */ -static int display_allocated_blocks(const ListNode * const check_point) { - const ListNode * const head = get_allocated_blocks_list(); - const ListNode *node; - int allocated_blocks = 0; - assert_true(check_point); - assert_true(check_point->next); - - for (node = check_point->next; node != head; node = node->next) { - const MallocBlockInfo * const block_info = node->value; - assert_true(block_info); - - if (!allocated_blocks) { - print_error("Blocks allocated...\n"); - } - print_error(" 0x%08x : " SOURCE_LOCATION_FORMAT "\n", - block_info->block, block_info->location.file, - block_info->location.line); - allocated_blocks ++; - } - return allocated_blocks; -} - - -// Free all blocks allocated after the specified check point. -static void free_allocated_blocks(const ListNode * const check_point) { - const ListNode * const head = get_allocated_blocks_list(); - const ListNode *node; - assert_true(check_point); - - node = check_point->next; - assert_true(node); - - while (node != head) { - MallocBlockInfo * const block_info = (MallocBlockInfo*)node->value; - node = node->next; - free((char*)block_info + sizeof(*block_info) + MALLOC_GUARD_SIZE); - } -} - - -// Fail if any any blocks are allocated after the specified check point. -static void fail_if_blocks_allocated(const ListNode * const check_point, - const char * const test_name) { - const int allocated_blocks = display_allocated_blocks(check_point); - if (allocated_blocks) { - free_allocated_blocks(check_point); - print_error("ERROR: %s leaked %d block(s)\n", test_name, - allocated_blocks); - exit_test(1); - } -} - - -void _fail(const char * const file, const int line) { - print_error("ERROR: " SOURCE_LOCATION_FORMAT " Failure!\n", file, line); - exit_test(1); -} - - -#ifndef _WIN32 -static void exception_handler(int sig) { - print_error("%s\n", strsignal(sig)); - exit_test(1); -} - -#else // _WIN32 - -static LONG WINAPI exception_filter(EXCEPTION_POINTERS *exception_pointers) { - EXCEPTION_RECORD * const exception_record = - exception_pointers->ExceptionRecord; - const DWORD code = exception_record->ExceptionCode; - unsigned int i; - for (i = 0; i < ARRAY_LENGTH(exception_codes); i++) { - const ExceptionCodeInfo * const code_info = &exception_codes[i]; - if (code == code_info->code) { - static int shown_debug_message = 0; - fflush(stdout); - print_error("%s occurred at 0x%08x.\n", code_info->description, - exception_record->ExceptionAddress); - if (!shown_debug_message) { - print_error( - "\n" - "To debug in Visual Studio...\n" - "1. Select menu item File->Open Project\n" - "2. Change 'Files of type' to 'Executable Files'\n" - "3. Open this executable.\n" - "4. Select menu item Debug->Start\n" - "\n" - "Alternatively, set the environment variable \n" - "UNIT_TESTING_DEBUG to 1 and rebuild this executable, \n" - "then click 'Debug' in the popup dialog box.\n" - "\n"); - shown_debug_message = 1; - } - exit_test(0); - return EXCEPTION_EXECUTE_HANDLER; - } - } - return EXCEPTION_CONTINUE_SEARCH; -} -#endif // !_WIN32 - - -// Standard output and error print methods. -void vprint_message(const char* const format, va_list args) { - char buffer[1024]; - vsnprintf(buffer, sizeof(buffer), format, args); - puts(buffer); -#ifdef _WIN32 - OutputDebugString(buffer); -#endif // _WIN32 -} - - -void vprint_error(const char* const format, va_list args) { - char buffer[1024]; - vsnprintf(buffer, sizeof(buffer), format, args); - fputs(buffer, stderr); -#ifdef _WIN32 - OutputDebugString(buffer); -#endif // _WIN32 -} - - -void print_message(const char* const format, ...) { - va_list args; - va_start(args, format); - vprint_message(format, args); - va_end(args); -} - - -void print_error(const char* const format, ...) { - va_list args; - va_start(args, format); - vprint_error(format, args); - va_end(args); -} - - -int _run_test( - const char * const function_name, const UnitTestFunction Function, - void ** const state, const UnitTestFunctionType function_type, - const void* const heap_check_point) { - const ListNode * const check_point = heap_check_point ? - heap_check_point : check_point_allocated_blocks(); - void *current_state = NULL; - int rc = 1; - int handle_exceptions = 1; -#ifdef _WIN32 - handle_exceptions = !IsDebuggerPresent(); -#endif // _WIN32 -#if UNIT_TESTING_DEBUG - handle_exceptions = 0; -#endif // UNIT_TESTING_DEBUG - - if (handle_exceptions) { -#ifndef _WIN32 - unsigned int i; - for (i = 0; i < ARRAY_LENGTH(exception_signals); i++) { - default_signal_functions[i] = signal( - exception_signals[i], exception_handler); - } -#else // _WIN32 - previous_exception_filter = SetUnhandledExceptionFilter( - exception_filter); -#endif // !_WIN32 - } - - if (function_type == UNIT_TEST_FUNCTION_TYPE_TEST) { - print_message("%s: Starting test\n", function_name); - } - initialize_testing(function_name); - global_running_test = 1; - if (setjmp(global_run_test_env) == 0) { - Function(state ? state : ¤t_state); - fail_if_leftover_values(function_name); - - /* If this is a setup function then ignore any allocated blocks - * only ensure they're deallocated on tear down. */ - if (function_type != UNIT_TEST_FUNCTION_TYPE_SETUP) { - fail_if_blocks_allocated(check_point, function_name); - } - - global_running_test = 0; - - if (function_type == UNIT_TEST_FUNCTION_TYPE_TEST) { - print_message("%s: Test completed successfully.\n", function_name); - } - rc = 0; - } else { - global_running_test = 0; - print_message("%s: Test failed.\n", function_name); - } - teardown_testing(function_name); - - if (handle_exceptions) { -#ifndef _WIN32 - unsigned int i; - for (i = 0; i < ARRAY_LENGTH(exception_signals); i++) { - signal(exception_signals[i], default_signal_functions[i]); - } -#else // _WIN32 - if (previous_exception_filter) { - SetUnhandledExceptionFilter(previous_exception_filter); - previous_exception_filter = NULL; - } -#endif // !_WIN32 - } - - return rc; -} - - -int _run_tests(const UnitTest * const tests, const size_t number_of_tests) { - // Whether to execute the next test. - int run_next_test = 1; - // Whether the previous test failed. - int previous_test_failed = 0; - // Check point of the heap state. - const ListNode * const check_point = check_point_allocated_blocks(); - // Current test being executed. - size_t current_test = 0; - // Number of tests executed. - size_t tests_executed = 0; - // Number of failed tests. - size_t total_failed = 0; - // Number of setup functions. - size_t setups = 0; - // Number of teardown functions. - size_t teardowns = 0; - /* A stack of test states. A state is pushed on the stack - * when a test setup occurs and popped on tear down. */ - TestState* test_states = malloc(number_of_tests * sizeof(*test_states)); - size_t number_of_test_states = 0; - // Names of the tests that failed. - const char** failed_names = malloc(number_of_tests * - sizeof(*failed_names)); - void **current_state = NULL; - // Make sure LargestIntegralType is at least the size of a pointer. - assert_true(sizeof(LargestIntegralType) >= sizeof(void*)); - - while (current_test < number_of_tests) { - const ListNode *test_check_point = NULL; - TestState *current_TestState; - const UnitTest * const test = &tests[current_test++]; - if (!test->function) { - continue; - } - - switch (test->function_type) { - case UNIT_TEST_FUNCTION_TYPE_TEST: - run_next_test = 1; - break; - case UNIT_TEST_FUNCTION_TYPE_SETUP: { - // Checkpoint the heap before the setup. - current_TestState = &test_states[number_of_test_states++]; - current_TestState->check_point = check_point_allocated_blocks(); - test_check_point = current_TestState->check_point; - current_state = ¤t_TestState->state; - *current_state = NULL; - run_next_test = 1; - setups ++; - break; - } - case UNIT_TEST_FUNCTION_TYPE_TEARDOWN: - // Check the heap based on the last setup checkpoint. - assert_true(number_of_test_states); - current_TestState = &test_states[--number_of_test_states]; - test_check_point = current_TestState->check_point; - current_state = ¤t_TestState->state; - teardowns ++; - break; - default: - print_error("Invalid unit test function type %d\n", - test->function_type); - exit_test(1); - break; - } - - if (run_next_test) { - int failed = _run_test(test->name, test->function, current_state, - test->function_type, test_check_point); - if (failed) { - failed_names[total_failed] = test->name; - } - - switch (test->function_type) { - case UNIT_TEST_FUNCTION_TYPE_TEST: - previous_test_failed = failed; - total_failed += failed; - tests_executed ++; - break; - - case UNIT_TEST_FUNCTION_TYPE_SETUP: - if (failed) { - total_failed ++; - tests_executed ++; - // Skip forward until the next test or setup function. - run_next_test = 0; - } - previous_test_failed = 0; - break; - - case UNIT_TEST_FUNCTION_TYPE_TEARDOWN: - // If this test failed. - if (failed && !previous_test_failed) { - total_failed ++; - } - break; - default: - assert_false("BUG: shouldn't be here!"); - break; - } - } - } - - if (total_failed) { - size_t i; - print_error("%d out of %d tests failed!\n", total_failed, - tests_executed); - for (i = 0; i < total_failed; i++) { - print_error(" %s\n", failed_names[i]); - } - } else { - print_message("All %d tests passed\n", tests_executed); - } - - if (number_of_test_states) { - print_error("Mismatched number of setup %d and teardown %d " - "functions\n", setups, teardowns); - total_failed = -1; - } - - free(test_states); - free((void*)failed_names); - - fail_if_blocks_allocated(check_point, "run_tests"); - return (int)total_failed; -} diff --git a/tests/cmocka/cmockery.h b/tests/cmocka/cmockery.h deleted file mode 100755 index 4d5235cd..00000000 --- a/tests/cmocka/cmockery.h +++ /dev/null @@ -1,484 +0,0 @@ -/* - * Copyright 2008 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef CMOCKERY_H_ -#define CMOCKERY_H_ -/* - * These headers or their equivalents should be included prior to including - * this header file. - * - * #include - * #include - * #include - * - * This allows test applications to use custom definitions of C standard - * library functions and types. - */ - -// For those who are used to __func__ from gcc. -#ifndef __func__ -#define __func__ __FUNCTION__ -#endif - -/* Largest integral type. This type should be large enough to hold any - * pointer or integer supported by the compiler. */ -#ifndef LargestIntegralType -#define LargestIntegralType unsigned long long -#endif // LargestIntegralType - -// Printf format used to display LargestIntegralType. -#ifndef LargestIntegralTypePrintfFormat -#ifdef _WIN32 -#define LargestIntegralTypePrintfFormat "%I64x" -#else -#define LargestIntegralTypePrintfFormat "%llx" -#endif // _WIN32 -#endif // LargestIntegralTypePrintfFormat - -// Perform an unsigned cast to LargestIntegralType. -#define cast_to_largest_integral_type(value) \ - ((LargestIntegralType)(value)) - -// Retrieves a return value for the current function. -#define mock() _mock(__func__, __FILE__, __LINE__) - -/* Stores a value to be returned by the specified function later. - * The count parameter returns the number of times the value should be returned - * by mock(). If count is set to -1 the value will always be returned. - */ -#define will_return(function, value) \ - _will_return(#function, __FILE__, __LINE__, \ - cast_to_largest_integral_type(value), 1) -#define will_return_count(function, value, count) \ - _will_return(#function, __FILE__, __LINE__, \ - cast_to_largest_integral_type(value), count) - -/* Add a custom parameter checking function. If the event parameter is NULL - * the event structure is allocated internally by this function. If event - * parameter is provided it must be allocated on the heap and doesn't need to - * be deallocated by the caller. - */ -#define expect_check(function, parameter, check_function, check_data) \ - _expect_check(#function, #parameter, __FILE__, __LINE__, check_function, \ - cast_to_largest_integral_type(check_data), NULL, 0) - -/* Add an event to check a parameter, using check_expected(), against a set of - * values. See will_return() for a description of the count parameter. - */ -#define expect_in_set(function, parameter, value_array) \ - expect_in_set_count(function, parameter, value_array, 1) -#define expect_in_set_count(function, parameter, value_array, count) \ - _expect_in_set(#function, #parameter, __FILE__, __LINE__, value_array, \ - sizeof(value_array) / sizeof((value_array)[0]), count) -#define expect_not_in_set(function, parameter, value_array) \ - expect_not_in_set_count(function, parameter, value_array, 1) -#define expect_not_in_set_count(function, parameter, value_array, count) \ - _expect_not_in_set( \ - #function, #parameter, __FILE__, __LINE__, value_array, \ - sizeof(value_array) / sizeof((value_array)[0]), count) - - -/* Add an event to check a parameter, using check_expected(), against a - * signed range. Where range is minimum <= value <= maximum. - * See will_return() for a description of the count parameter. - */ -#define expect_in_range(function, parameter, minimum, maximum) \ - expect_in_range_count(function, parameter, minimum, maximum, 1) -#define expect_in_range_count(function, parameter, minimum, maximum, count) \ - _expect_in_range(#function, #parameter, __FILE__, __LINE__, minimum, \ - maximum, count) - -/* Add an event to check a parameter, using check_expected(), against a - * signed range. Where range is value < minimum or value > maximum. - * See will_return() for a description of the count parameter. - */ -#define expect_not_in_range(function, parameter, minimum, maximum) \ - expect_not_in_range_count(function, parameter, minimum, maximum, 1) -#define expect_not_in_range_count(function, parameter, minimum, maximum, \ - count) \ - _expect_not_in_range(#function, #parameter, __FILE__, __LINE__, \ - minimum, maximum, count) - -/* Add an event to check whether a parameter, using check_expected(), is or - * isn't a value. See will_return() for a description of the count parameter. - */ -#define expect_value(function, parameter, value) \ - expect_value_count(function, parameter, value, 1) -#define expect_value_count(function, parameter, value, count) \ - _expect_value(#function, #parameter, __FILE__, __LINE__, \ - cast_to_largest_integral_type(value), count) -#define expect_not_value(function, parameter, value) \ - expect_not_value_count(function, parameter, value, 1) -#define expect_not_value_count(function, parameter, value, count) \ - _expect_not_value(#function, #parameter, __FILE__, __LINE__, \ - cast_to_largest_integral_type(value), count) - -/* Add an event to check whether a parameter, using check_expected(), - * is or isn't a string. See will_return() for a description of the count - * parameter. - */ -#define expect_string(function, parameter, string) \ - expect_string_count(function, parameter, string, 1) -#define expect_string_count(function, parameter, string, count) \ - _expect_string(#function, #parameter, __FILE__, __LINE__, \ - (const char*)(string), count) -#define expect_not_string(function, parameter, string) \ - expect_not_string_count(function, parameter, string, 1) -#define expect_not_string_count(function, parameter, string, count) \ - _expect_not_string(#function, #parameter, __FILE__, __LINE__, \ - (const char*)(string), count) - -/* Add an event to check whether a parameter, using check_expected() does or - * doesn't match an area of memory. See will_return() for a description of - * the count parameter. - */ -#define expect_memory(function, parameter, memory, size) \ - expect_memory_count(function, parameter, memory, size, 1) -#define expect_memory_count(function, parameter, memory, size, count) \ - _expect_memory(#function, #parameter, __FILE__, __LINE__, \ - (const void*)(memory), size, count) -#define expect_not_memory(function, parameter, memory, size) \ - expect_not_memory_count(function, parameter, memory, size, 1) -#define expect_not_memory_count(function, parameter, memory, size, count) \ - _expect_not_memory(#function, #parameter, __FILE__, __LINE__, \ - (const void*)(memory), size, count) - - -/* Add an event to allow any value for a parameter checked using - * check_expected(). See will_return() for a description of the count - * parameter. - */ -#define expect_any(function, parameter) \ - expect_any_count(function, parameter, 1) -#define expect_any_count(function, parameter, count) \ - _expect_any(#function, #parameter, __FILE__, __LINE__, count) - -/* Determine whether a function parameter is correct. This ensures the next - * value queued by one of the expect_*() macros matches the specified variable. - */ -#define check_expected(parameter) \ - _check_expected(__func__, #parameter, __FILE__, __LINE__, \ - cast_to_largest_integral_type(parameter)) - -// Assert that the given expression is true. -#define assert_true(c) _assert_true(cast_to_largest_integral_type(c), #c, \ - __FILE__, __LINE__) -// Assert that the given expression is false. -#define assert_false(c) _assert_true(!(cast_to_largest_integral_type(c)), #c, \ - __FILE__, __LINE__) - -// Assert that the two given integers are equal, otherwise fail. -#define assert_int_equal(a, b) \ - _assert_int_equal(cast_to_largest_integral_type(a), \ - cast_to_largest_integral_type(b), \ - __FILE__, __LINE__) -// Assert that the two given integers are not equal, otherwise fail. -#define assert_int_not_equal(a, b) \ - _assert_int_not_equal(cast_to_largest_integral_type(a), \ - cast_to_largest_integral_type(b), \ - __FILE__, __LINE__) - -// Assert that the two given strings are equal, otherwise fail. -#define assert_string_equal(a, b) \ - _assert_string_equal((const char*)(a), (const char*)(b), __FILE__, \ - __LINE__) -// Assert that the two given strings are not equal, otherwise fail. -#define assert_string_not_equal(a, b) \ - _assert_string_not_equal((const char*)(a), (const char*)(b), __FILE__, \ - __LINE__) - -// Assert that the two given areas of memory are equal, otherwise fail. -#define assert_memory_equal(a, b, size) \ - _assert_memory_equal((const char*)(a), (const char*)(b), size, __FILE__, \ - __LINE__) -// Assert that the two given areas of memory are not equal, otherwise fail. -#define assert_memory_not_equal(a, b, size) \ - _assert_memory_not_equal((const char*)(a), (const char*)(b), size, \ - __FILE__, __LINE__) - -// Assert that the specified value is >= minimum and <= maximum. -#define assert_in_range(value, minimum, maximum) \ - _assert_in_range( \ - cast_to_largest_integral_type(value), \ - cast_to_largest_integral_type(minimum), \ - cast_to_largest_integral_type(maximum), __FILE__, __LINE__) - -// Assert that the specified value is < minumum or > maximum -#define assert_not_in_range(value, minimum, maximum) \ - _assert_not_in_range( \ - cast_to_largest_integral_type(value), \ - cast_to_largest_integral_type(minimum), \ - cast_to_largest_integral_type(maximum), __FILE__, __LINE__) - -// Assert that the specified value is within a set. -#define assert_in_set(value, values, number_of_values) \ - _assert_in_set(value, values, number_of_values, __FILE__, __LINE__) -// Assert that the specified value is not within a set. -#define assert_not_in_set(value, values, number_of_values) \ - _assert_not_in_set(value, values, number_of_values, __FILE__, __LINE__) - - -// Forces the test to fail immediately and quit. -#define fail() _fail(__FILE__, __LINE__) - -// Generic method to kick off testing -#define run_test(f) _run_test(#f, f, NULL, UNIT_TEST_FUNCTION_TYPE_TEST, NULL) - -// Initializes a UnitTest structure. -#define unit_test(f) { #f, f, UNIT_TEST_FUNCTION_TYPE_TEST } -#define unit_test_setup(test, setup) \ - { #test "_" #setup, setup, UNIT_TEST_FUNCTION_TYPE_SETUP } -#define unit_test_teardown(test, teardown) \ - { #test "_" #teardown, teardown, UNIT_TEST_FUNCTION_TYPE_TEARDOWN } - -/* Initialize an array of UnitTest structures with a setup function for a test - * and a teardown function. Either setup or teardown can be NULL. - */ -#define unit_test_setup_teardown(test, setup, teardown) \ - unit_test_setup(test, setup), \ - unit_test(test), \ - unit_test_teardown(test, teardown) - -/* - * Run tests specified by an array of UnitTest structures. The following - * example illustrates this macro's use with the unit_test macro. - * - * void Test0(); - * void Test1(); - * - * int main(int argc, char* argv[]) { - * const UnitTest tests[] = { - * unit_test(Test0); - * unit_test(Test1); - * }; - * return run_tests(tests); - * } - */ -#define run_tests(tests) _run_tests(tests, sizeof(tests) / sizeof(tests)[0]) - -// Dynamic allocators -#define test_malloc(size) _test_malloc(size, __FILE__, __LINE__) -#define test_calloc(num, size) _test_calloc(num, size, __FILE__, __LINE__) -#define test_free(ptr) _test_free(ptr, __FILE__, __LINE__) - -// Redirect malloc, calloc and free to the unit test allocators. -#if UNIT_TESTING -#define malloc test_malloc -#define calloc test_calloc -#define free test_free -#endif // UNIT_TESTING - -/* - * Ensure mock_assert() is called. If mock_assert() is called the assert - * expression string is returned. - * For example: - * - * #define assert mock_assert - * - * void showmessage(const char *message) { - * assert(message); - * } - * - * int main(int argc, const char* argv[]) { - * expect_assert_failure(show_message(NULL)); - * printf("succeeded\n"); - * return 0; - * } - */ -#define expect_assert_failure(function_call) \ - { \ - const int expression = setjmp(global_expect_assert_env); \ - global_expecting_assert = 1; \ - if (expression) { \ - print_message("Expected assertion %s occurred\n", \ - *((const char**)&expression)); \ - global_expecting_assert = 0; \ - } else { \ - function_call ; \ - global_expecting_assert = 0; \ - print_error("Expected assert in %s\n", #function_call); \ - _fail(__FILE__, __LINE__); \ - } \ - } - -// Function prototype for setup, test and teardown functions. -typedef void (*UnitTestFunction)(void **state); - -// Function that determines whether a function parameter value is correct. -typedef int (*CheckParameterValue)(const LargestIntegralType value, - const LargestIntegralType check_value_data); - -// Type of the unit test function. -typedef enum UnitTestFunctionType { - UNIT_TEST_FUNCTION_TYPE_TEST = 0, - UNIT_TEST_FUNCTION_TYPE_SETUP, - UNIT_TEST_FUNCTION_TYPE_TEARDOWN, -} UnitTestFunctionType; - -/* Stores a unit test function with its name and type. - * NOTE: Every setup function must be paired with a teardown function. It's - * possible to specify NULL function pointers. - */ -typedef struct UnitTest { - const char* name; - UnitTestFunction function; - UnitTestFunctionType function_type; -} UnitTest; - - -// Location within some source code. -typedef struct SourceLocation { - const char* file; - int line; -} SourceLocation; - -// Event that's called to check a parameter value. -typedef struct CheckParameterEvent { - SourceLocation location; - const char *parameter_name; - CheckParameterValue check_value; - LargestIntegralType check_value_data; -} CheckParameterEvent; - -// Used by expect_assert_failure() and mock_assert(). -extern int global_expecting_assert; -extern jmp_buf global_expect_assert_env; - -// Retrieves a value for the given function, as set by "will_return". -LargestIntegralType _mock(const char * const function, const char* const file, - const int line); - -void _expect_check( - const char* const function, const char* const parameter, - const char* const file, const int line, - const CheckParameterValue check_function, - const LargestIntegralType check_data, CheckParameterEvent * const event, - const int count); - -void _expect_in_set( - const char* const function, const char* const parameter, - const char* const file, const int line, const LargestIntegralType values[], - const size_t number_of_values, const int count); -void _expect_not_in_set( - const char* const function, const char* const parameter, - const char* const file, const int line, const LargestIntegralType values[], - const size_t number_of_values, const int count); - -void _expect_in_range( - const char* const function, const char* const parameter, - const char* const file, const int line, - const LargestIntegralType minimum, - const LargestIntegralType maximum, const int count); -void _expect_not_in_range( - const char* const function, const char* const parameter, - const char* const file, const int line, - const LargestIntegralType minimum, - const LargestIntegralType maximum, const int count); - -void _expect_value( - const char* const function, const char* const parameter, - const char* const file, const int line, const LargestIntegralType value, - const int count); -void _expect_not_value( - const char* const function, const char* const parameter, - const char* const file, const int line, const LargestIntegralType value, - const int count); - -void _expect_string( - const char* const function, const char* const parameter, - const char* const file, const int line, const char* string, - const int count); -void _expect_not_string( - const char* const function, const char* const parameter, - const char* const file, const int line, const char* string, - const int count); - -void _expect_memory( - const char* const function, const char* const parameter, - const char* const file, const int line, const void* const memory, - const size_t size, const int count); -void _expect_not_memory( - const char* const function, const char* const parameter, - const char* const file, const int line, const void* const memory, - const size_t size, const int count); - -void _expect_any( - const char* const function, const char* const parameter, - const char* const file, const int line, const int count); - -void _check_expected( - const char * const function_name, const char * const parameter_name, - const char* file, const int line, const LargestIntegralType value); - -// Can be used to replace assert in tested code so that in conjuction with -// check_assert() it's possible to determine whether an assert condition has -// failed without stopping a test. -void mock_assert(const int result, const char* const expression, - const char * const file, const int line); - -void _will_return(const char * const function_name, const char * const file, - const int line, const LargestIntegralType value, - const int count); -void _assert_true(const LargestIntegralType result, - const char* const expression, - const char * const file, const int line); -void _assert_int_equal( - const LargestIntegralType a, const LargestIntegralType b, - const char * const file, const int line); -void _assert_int_not_equal( - const LargestIntegralType a, const LargestIntegralType b, - const char * const file, const int line); -void _assert_string_equal(const char * const a, const char * const b, - const char * const file, const int line); -void _assert_string_not_equal(const char * const a, const char * const b, - const char *file, const int line); -void _assert_memory_equal(const void * const a, const void * const b, - const size_t size, const char* const file, - const int line); -void _assert_memory_not_equal(const void * const a, const void * const b, - const size_t size, const char* const file, - const int line); -void _assert_in_range( - const LargestIntegralType value, const LargestIntegralType minimum, - const LargestIntegralType maximum, const char* const file, const int line); -void _assert_not_in_range( - const LargestIntegralType value, const LargestIntegralType minimum, - const LargestIntegralType maximum, const char* const file, const int line); -void _assert_in_set( - const LargestIntegralType value, const LargestIntegralType values[], - const size_t number_of_values, const char* const file, const int line); -void _assert_not_in_set( - const LargestIntegralType value, const LargestIntegralType values[], - const size_t number_of_values, const char* const file, const int line); - -void* _test_malloc(const size_t size, const char* file, const int line); -void* _test_calloc(const size_t number_of_elements, const size_t size, - const char* file, const int line); -void _test_free(void* const ptr, const char* file, const int line); - -void _fail(const char * const file, const int line); -int _run_test( - const char * const function_name, const UnitTestFunction Function, - void ** const state, const UnitTestFunctionType function_type, - const void* const heap_check_point); -int _run_tests(const UnitTest * const tests, const size_t number_of_tests); - -// Standard output and error print methods. -void print_message(const char* const format, ...); -void print_error(const char* const format, ...); -void vprint_message(const char* const format, va_list args); -void vprint_error(const char* const format, va_list args); - -#endif // CMOCKERY_H_ diff --git a/tests/cmocka/rangeset_tests.c b/tests/cmocka/rangeset_tests.c index 589554f9..98d8d4d5 100644 --- a/tests/cmocka/rangeset_tests.c +++ b/tests/cmocka/rangeset_tests.c @@ -1,9 +1,9 @@ #include #include #include +#include #include "rangeset.h" -#include "cmockery.h" /* for "print" functions */ #include "debug_print.c" @@ -30,18 +30,18 @@ int main(void) { /* Array of test functions */ - const struct UnitTest tests[] = + const struct CMUnitTest tests[] = { - unit_test(test_irange_basic), - unit_test(test_irange_list_union_merge), - unit_test(test_irange_list_union_lossy_cov), - unit_test(test_irange_list_union_complete_cov), - unit_test(test_irange_list_union_intersecting), - unit_test(test_irange_list_intersection), + cmocka_unit_test(test_irange_basic), + cmocka_unit_test(test_irange_list_union_merge), + cmocka_unit_test(test_irange_list_union_lossy_cov), + cmocka_unit_test(test_irange_list_union_complete_cov), + cmocka_unit_test(test_irange_list_union_intersecting), + cmocka_unit_test(test_irange_list_intersection), }; /* Run series of tests */ - return run_tests(tests); + return cmocka_run_group_tests(tests, NULL, NULL); } /* From 2092dffaff7ec5171e9d4a184ef27afdf5fcbc9d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 10 Jul 2017 17:49:38 +0300 Subject: [PATCH 067/528] make Codecov work in Docker --- .travis.yml | 13 +++++-------- Makefile | 2 +- run_tests.sh | 18 ++++++++++++++---- 3 files changed, 20 insertions(+), 13 deletions(-) diff --git a/.travis.yml b/.travis.yml index 576d9efb..cf4a4fec 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,5 +1,5 @@ os: - - linux + - linux sudo: required dist: trusty @@ -10,22 +10,19 @@ services: - docker install: - - sed -e 's/${CHECK_CODE}/'${CHECK_CODE}/g -e 's/${PG_VERSION}/'${PG_VERSION}/g Dockerfile.tmpl > Dockerfile + - sed -e 's/${CHECK_CODE}/'${CHECK_CODE}/g -e 's/${PG_VERSION}/'${PG_VERSION}/g Dockerfile.tmpl > Dockerfile - docker-compose build script: - - docker-compose run tests + - docker-compose run tests $(bash <(curl -s https://codecov.io/env)) env: - PG_VERSION=10 CHECK_CODE=clang - PG_VERSION=9.6 CHECK_CODE=clang - PG_VERSION=9.5 CHECK_CODE=clang - PG_VERSION=10 CHECK_CODE=cppcheck - - PG_VERSION=10 CHECK_CODE=false - PG_VERSION=9.6 CHECK_CODE=cppcheck - - PG_VERSION=9.6 CHECK_CODE=false - PG_VERSION=9.5 CHECK_CODE=cppcheck + - PG_VERSION=10 CHECK_CODE=false + - PG_VERSION=9.6 CHECK_CODE=false - PG_VERSION=9.5 CHECK_CODE=false - -after_success: - - bash <(curl -s https://codecov.io/bash) diff --git a/Makefile b/Makefile index cec002ce..2de0874d 100644 --- a/Makefile +++ b/Makefile @@ -83,4 +83,4 @@ python_tests: $(MAKE) -C tests/python partitioning_tests cmocka_tests: - $(MAKE) -C tests/cmocka check + $(MAKE) -C tests/cmocka clean check diff --git a/run_tests.sh b/run_tests.sh index d41e053e..5859fefd 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -1,5 +1,11 @@ #!/bin/bash +# This is a main testing script for: +# * regression tests +# * testgres-based tests +# * cmocka-based tests +# Copyright (c) 2017, Postgres Professional + set -eux echo CHECK_CODE=$CHECK_CODE @@ -12,7 +18,8 @@ if [ "$CHECK_CODE" = "clang" ]; then exit $status elif [ "$CHECK_CODE" = "cppcheck" ]; then - cppcheck --template "{file} ({line}): {severity} ({id}): {message}" \ + cppcheck \ + --template "{file} ({line}): {severity} ({id}): {message}" \ --enable=warning,portability,performance \ --suppress=redundantAssignment \ --suppress=uselessAssignmentPtrArg \ @@ -34,8 +41,8 @@ make USE_PGXS=1 clean # initialize database initdb -# build pg_pathman (using CFLAGS_SL for gcov) -make USE_PGXS=1 CFLAGS_SL="$(pg_config --cflags_sl) -coverage" +# build pg_pathman (using PG_CPPFLAGS for gcov) +make USE_PGXS=1 PG_CPPFLAGS="-coverage" make USE_PGXS=1 install # check build @@ -69,7 +76,10 @@ if [ $status -ne 0 ]; then exit $status; fi rm -f tests/cmocka/*.gcno rm -f tests/cmocka/*.gcda -#generate *.gcov files +# generate *.gcov files gcov src/*.c src/compat/*.c src/include/*.h src/include/compat/*.h +# send coverage stats to Coveralls +bash <(curl -s https://codecov.io/bash) + exit $status From 53bb75252972a6e6910170db4578af1c7c6541e5 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 11 Jul 2017 00:02:04 +0300 Subject: [PATCH 068/528] show postgres server log if startup failed --- run_tests.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/run_tests.sh b/run_tests.sh index 5859fefd..a84c4dea 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -54,6 +54,10 @@ echo "shared_preload_libraries = 'pg_pathman'" >> $PGDATA/postgresql.conf echo "port = 55435" >> $PGDATA/postgresql.conf pg_ctl start -l /tmp/postgres.log -w +# check startup +status=$? +if [ $status -ne 0 ]; then cat /tmp/postgres.log; fi + # run regression tests PGPORT=55435 make USE_PGXS=1 installcheck || status=$? From 0c9e29269590832d17d1949d38dc84c6fd160bd0 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 11 Jul 2017 01:01:35 +0300 Subject: [PATCH 069/528] make use of SHLIB_LINK --- run_tests.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/run_tests.sh b/run_tests.sh index a84c4dea..353033e4 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -41,8 +41,8 @@ make USE_PGXS=1 clean # initialize database initdb -# build pg_pathman (using PG_CPPFLAGS for gcov) -make USE_PGXS=1 PG_CPPFLAGS="-coverage" +# build pg_pathman (using PG_CPPFLAGS and SHLIB_LINK for gcov) +make USE_PGXS=1 PG_CPPFLAGS="-coverage" SHLIB_LINK="-coverage" make USE_PGXS=1 install # check build From ecd13bca16462594a14b3dc6fca4d291bd7d219f Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 11 Jul 2017 01:15:41 +0300 Subject: [PATCH 070/528] Travis CI: install curl for Codecov --- Dockerfile.tmpl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index bd78ba02..31358464 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -13,7 +13,7 @@ RUN if [ "${CHECK_CODE}" = "cppcheck" ] ; then \ RUN if [ "${CHECK_CODE}" = "false" ] ; then \ echo 'http://dl-3.alpinelinux.org/alpine/edge/main' > /etc/apk/repositories; \ - apk --no-cache add python3 gcc make musl-dev cmocka-dev;\ + apk --no-cache add curl python3 gcc make musl-dev cmocka-dev;\ pip3 install testgres; \ fi From a8b197f107cf0257c3ef37b50f870ac25eaabf45 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 11 Jul 2017 01:27:40 +0300 Subject: [PATCH 071/528] fix .travis.yml --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index cf4a4fec..1c7d2bc8 100644 --- a/.travis.yml +++ b/.travis.yml @@ -14,7 +14,7 @@ install: - docker-compose build script: - - docker-compose run tests $(bash <(curl -s https://codecov.io/env)) + - docker-compose run $(bash <(curl -s https://codecov.io/env)) tests env: - PG_VERSION=10 CHECK_CODE=clang From 2bc4adaa9ea2a65e9b7b7da3e0b67dea692ed162 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 12 Jul 2017 16:57:10 +0300 Subject: [PATCH 072/528] add new test (test_irange_change_lossiness) --- tests/cmocka/rangeset_tests.c | 72 ++++++++++++++++++++++++++++++++++- 1 file changed, 70 insertions(+), 2 deletions(-) diff --git a/tests/cmocka/rangeset_tests.c b/tests/cmocka/rangeset_tests.c index 98d8d4d5..1f700bc3 100644 --- a/tests/cmocka/rangeset_tests.c +++ b/tests/cmocka/rangeset_tests.c @@ -16,6 +16,7 @@ */ static void test_irange_basic(void **state); +static void test_irange_change_lossiness(void **state); static void test_irange_list_union_merge(void **state); static void test_irange_list_union_lossy_cov(void **state); @@ -33,6 +34,7 @@ main(void) const struct CMUnitTest tests[] = { cmocka_unit_test(test_irange_basic), + cmocka_unit_test(test_irange_change_lossiness), cmocka_unit_test(test_irange_list_union_merge), cmocka_unit_test(test_irange_list_union_lossy_cov), cmocka_unit_test(test_irange_list_union_complete_cov), @@ -75,10 +77,76 @@ test_irange_basic(void **state) assert_true(is_irange_valid(irange)); /* test allocation */ - irange_list = NIL; - irange_list = lappend_irange(irange_list, irange); + irange = make_irange(100, 200, IR_LOSSY); + irange_list = lappend_irange(NIL, irange); assert_memory_equal(&irange, &linitial_irange(irange_list), sizeof(IndexRange)); assert_memory_equal(&irange, &llast_irange(irange_list), sizeof(IndexRange)); + + /* test length */ + irange_list = NIL; + assert_int_equal(irange_list_length(irange_list), 0); + irange_list = lappend_irange(irange_list, make_irange(10, 20, IR_LOSSY)); + assert_int_equal(irange_list_length(irange_list), 11); + irange_list = lappend_irange(irange_list, make_irange(21, 30, IR_LOSSY)); + assert_int_equal(irange_list_length(irange_list), 21); +} + + +/* Test lossiness switcher */ +static void +test_irange_change_lossiness(void **state) +{ + List *irange_list; + + /* test lossiness change (NIL) */ + irange_list = irange_list_set_lossiness(NIL, IR_LOSSY); + assert_ptr_equal(irange_list, NIL); + irange_list = irange_list_set_lossiness(NIL, IR_COMPLETE); + assert_ptr_equal(irange_list, NIL); + + /* test lossiness change (no-op) #1 */ + irange_list = list_make1_irange(make_irange(10, 20, IR_LOSSY)); + irange_list = irange_list_set_lossiness(irange_list, IR_LOSSY); + assert_string_equal(rangeset_print(irange_list), "[10-20]L"); + + /* test lossiness change (no-op) #2 */ + irange_list = list_make1_irange(make_irange(30, 40, IR_COMPLETE)); + irange_list = irange_list_set_lossiness(irange_list, IR_COMPLETE); + assert_string_equal(rangeset_print(irange_list), "[30-40]C"); + + /* test lossiness change (single element) #1 */ + irange_list = list_make1_irange(make_irange(10, 20, IR_LOSSY)); + irange_list = irange_list_set_lossiness(irange_list, IR_COMPLETE); + assert_string_equal(rangeset_print(irange_list), "[10-20]C"); + + /* test lossiness change (single element) #2 */ + irange_list = list_make1_irange(make_irange(30, 40, IR_COMPLETE)); + irange_list = irange_list_set_lossiness(irange_list, IR_LOSSY); + assert_string_equal(rangeset_print(irange_list), "[30-40]L"); + + /* test lossiness change (multiple elements, adjacent) #1 */ + irange_list = list_make1_irange(make_irange(10, 20, IR_LOSSY)); + irange_list = lappend_irange(irange_list, make_irange(21, 40, IR_COMPLETE)); + irange_list = irange_list_set_lossiness(irange_list, IR_COMPLETE); + assert_string_equal(rangeset_print(irange_list), "[10-40]C"); + + /* test lossiness change (multiple elements, adjacent) #2 */ + irange_list = list_make1_irange(make_irange(10, 20, IR_COMPLETE)); + irange_list = lappend_irange(irange_list, make_irange(21, 40, IR_LOSSY)); + irange_list = irange_list_set_lossiness(irange_list, IR_LOSSY); + assert_string_equal(rangeset_print(irange_list), "[10-40]L"); + + /* test lossiness change (multiple elements, non-adjacent) #1 */ + irange_list = list_make1_irange(make_irange(10, 15, IR_COMPLETE)); + irange_list = lappend_irange(irange_list, make_irange(21, 40, IR_LOSSY)); + irange_list = irange_list_set_lossiness(irange_list, IR_COMPLETE); + assert_string_equal(rangeset_print(irange_list), "[10-15]C, [21-40]C"); + + /* test lossiness change (multiple elements, non-adjacent) #2 */ + irange_list = list_make1_irange(make_irange(10, 15, IR_LOSSY)); + irange_list = lappend_irange(irange_list, make_irange(21, 40, IR_COMPLETE)); + irange_list = irange_list_set_lossiness(irange_list, IR_LOSSY); + assert_string_equal(rangeset_print(irange_list), "[10-15]L, [21-40]L"); } From 6288849ef76c465bf7741e19e10ceeabb321e7e1 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 12 Jul 2017 18:38:09 +0300 Subject: [PATCH 073/528] add new rule to Makefile --- Makefile | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Makefile b/Makefile index 2de0874d..6796be3b 100644 --- a/Makefile +++ b/Makefile @@ -84,3 +84,9 @@ python_tests: cmocka_tests: $(MAKE) -C tests/cmocka clean check + +clean_gcov: + find . \ + -name "*.gcda" -delete -o \ + -name "*.gcno" -delete -o \ + -name "*.gcov" -delete From ea98072a0972a891c2df1aa036e4690b4ab51b30 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 12 Jul 2017 22:24:06 +0300 Subject: [PATCH 074/528] Makefile: change rule cmocka_tests for Codecov --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 6796be3b..2b69fd73 100644 --- a/Makefile +++ b/Makefile @@ -83,7 +83,7 @@ python_tests: $(MAKE) -C tests/python partitioning_tests cmocka_tests: - $(MAKE) -C tests/cmocka clean check + $(MAKE) -C tests/cmocka check clean_gcov: find . \ From 215aa2e591a71ac9792a793e32cf21fa148ce1a8 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 18 Jul 2017 18:37:17 +0300 Subject: [PATCH 075/528] refactoring, extract function copy_rel_attributes() --- src/partition_creation.c | 121 +++++++++++++++++++++------------------ 1 file changed, 64 insertions(+), 57 deletions(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index 669295c5..a0ea93ab 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -74,7 +74,7 @@ static ObjectAddress create_table_using_stmt(CreateStmt *create_stmt, Oid relowner); static void copy_foreign_keys(Oid parent_relid, Oid partition_oid); -static void copy_relation_attributes(Oid partition_relid, Datum reloptions); +static void copy_rel_attributes(Oid parent_relid, Oid partition_relid); static void postprocess_child_table_and_atts(Oid parent_relid, Oid partition_relid); static Oid text_to_regprocedure(text *proname_args); @@ -672,9 +672,6 @@ create_single_partition_internal(Oid parent_relid, RangeVar *partition_rv, char *tablespace) { - HeapTuple tuple = NULL; - Relation parentrel; - /* Value to be returned */ Oid partition_relid = InvalidOid; /* safety */ @@ -695,7 +692,6 @@ create_single_partition_internal(Oid parent_relid, Oid save_userid; int save_sec_context; bool need_priv_escalation = !superuser(); /* we might be a SU */ - Datum reloptions = (Datum) 0; /* Lock parent and check if it exists */ LockRelationOid(parent_relid, ShareUpdateExclusiveLock); @@ -736,24 +732,6 @@ create_single_partition_internal(Oid parent_relid, /* Make up parent's RangeVar */ parent_rv = makeRangeVar(parent_nsp_name, parent_name, -1); - /* Copy attributes */ - parentrel = heap_open(parent_relid, NoLock); - newrel_rv->relpersistence = parentrel->rd_rel->relpersistence; - if (parentrel->rd_options) - { - bool isNull; - - tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(parent_relid)); - if (!HeapTupleIsValid(tuple)) - elog(ERROR, "cache lookup failed for relation %u", parent_relid); - - reloptions = SysCacheGetAttr(RELOID, tuple, Anum_pg_class_reloptions, - &isNull); - if (isNull) - reloptions = (Datum) 0; - } - heap_close(parentrel, NoLock); - /* If no 'tablespace' is provided, get parent's tablespace */ if (!tablespace) tablespace = get_tablespace_name(get_rel_tablespace(parent_relid)); @@ -804,8 +782,7 @@ create_single_partition_internal(Oid parent_relid, child_relowner).objectId; /* Copy attributes to partition */ - if (reloptions) - copy_relation_attributes(partition_relid, reloptions); + copy_rel_attributes(parent_relid, partition_relid); /* Copy FOREIGN KEYS of the parent table */ copy_foreign_keys(parent_relid, partition_relid); @@ -843,9 +820,6 @@ create_single_partition_internal(Oid parent_relid, if (need_priv_escalation) SetUserIdAndSecContext(save_userid, save_sec_context); - if (tuple != NULL) - ReleaseSysCache(tuple); - return partition_relid; } @@ -1104,7 +1078,7 @@ postprocess_child_table_and_atts(Oid parent_relid, Oid partition_relid) heap_close(pg_attribute_rel, RowExclusiveLock); } -/* Copy foreign keys of parent table */ +/* Copy foreign keys of parent table (updates pg_class) */ static void copy_foreign_keys(Oid parent_relid, Oid partition_oid) { @@ -1135,38 +1109,71 @@ copy_foreign_keys(Oid parent_relid, Oid partition_oid) /* Invoke the callback */ FunctionCallInvoke(©_fkeys_proc_fcinfo); + + /* Make changes visible */ + CommandCounterIncrement(); } -/* Copy attributes to partition. Updates partition's tuple in pg_class */ +/* Copy reloptions of foreign table (updates pg_class) */ static void -copy_relation_attributes(Oid partition_relid, Datum reloptions) +copy_rel_attributes(Oid parent_relid, Oid partition_relid) { - Relation classRel; - HeapTuple tuple, - newtuple; - Datum new_val[Natts_pg_class]; - bool new_null[Natts_pg_class], - new_repl[Natts_pg_class]; - - classRel = heap_open(RelationRelationId, RowExclusiveLock); - tuple = SearchSysCacheCopy1(RELOID, - ObjectIdGetDatum(partition_relid)); - if (!HeapTupleIsValid(tuple)) - elog(ERROR, "cache lookup failed for relation %u", - partition_relid); - - /* Fill in relpartbound value */ - memset(new_val, 0, sizeof(new_val)); - memset(new_null, false, sizeof(new_null)); - memset(new_repl, false, sizeof(new_repl)); - new_val[Anum_pg_class_reloptions - 1] = reloptions; - new_null[Anum_pg_class_reloptions - 1] = false; - new_repl[Anum_pg_class_reloptions - 1] = true; - newtuple = heap_modify_tuple(tuple, RelationGetDescr(classRel), - new_val, new_null, new_repl); - CatalogTupleUpdate(classRel, &newtuple->t_self, newtuple); - heap_freetuple(newtuple); - heap_close(classRel, RowExclusiveLock); + Relation pg_class_rel; + + HeapTuple parent_htup, + partition_htup, + new_htup; + + Datum reloptions; + bool reloptions_null; + Datum relpersistence; + + Datum values[Natts_pg_class]; + bool isnull[Natts_pg_class], + replace[Natts_pg_class] = { false }; + + pg_class_rel = heap_open(RelationRelationId, RowExclusiveLock); + + parent_htup = SearchSysCache1(RELOID, ObjectIdGetDatum(parent_relid)); + partition_htup = SearchSysCache1(RELOID, ObjectIdGetDatum(partition_relid)); + + if (!HeapTupleIsValid(parent_htup)) + elog(ERROR, "cache lookup failed for relation %u", parent_relid); + + if (!HeapTupleIsValid(partition_htup)) + elog(ERROR, "cache lookup failed for relation %u", partition_relid); + + /* Extract parent's reloptions */ + reloptions = SysCacheGetAttr(RELOID, parent_htup, + Anum_pg_class_reloptions, + &reloptions_null); + + /* Extract parent's relpersistence */ + relpersistence = ((Form_pg_class) GETSTRUCT(parent_htup))->relpersistence; + + /* Fill in reloptions */ + values[Anum_pg_class_reloptions - 1] = reloptions; + isnull[Anum_pg_class_reloptions - 1] = reloptions_null; + replace[Anum_pg_class_reloptions - 1] = true; + + /* Fill in relpersistence */ + values[Anum_pg_class_relpersistence - 1] = relpersistence; + isnull[Anum_pg_class_relpersistence - 1] = false; + replace[Anum_pg_class_relpersistence - 1] = true; + + new_htup = heap_modify_tuple(partition_htup, + RelationGetDescr(pg_class_rel), + values, isnull, replace); + CatalogTupleUpdate(pg_class_rel, &new_htup->t_self, new_htup); + heap_freetuple(new_htup); + + ReleaseSysCache(parent_htup); + ReleaseSysCache(partition_htup); + + heap_close(pg_class_rel, RowExclusiveLock); + + /* Make changes visible */ + CommandCounterIncrement(); } From 5afbc305bb0e14f477f22aa0eb03bb308a24a578 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 18 Jul 2017 18:43:38 +0300 Subject: [PATCH 076/528] minor fixes (free tuples etc) --- src/init.c | 1 + src/partition_creation.c | 5 ++--- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/init.c b/src/init.c index 3219b1fa..e1a1b5bf 100644 --- a/src/init.c +++ b/src/init.c @@ -706,6 +706,7 @@ pathman_config_invalidate_parsed_expression(Oid relid) /* Form new tuple and perform an update */ new_htup = heap_form_tuple(RelationGetDescr(rel), values, nulls); CatalogTupleUpdate(rel, &iptr, new_htup); + heap_freetuple(new_htup); heap_close(rel, RowExclusiveLock); } diff --git a/src/partition_creation.c b/src/partition_creation.c index a0ea93ab..53837ee1 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -681,8 +681,7 @@ create_single_partition_internal(Oid parent_relid, *parent_nsp_name; /* Elements of the "CREATE TABLE" query tree */ - RangeVar *parent_rv, - *newrel_rv = copyObject(partition_rv); + RangeVar *parent_rv; TableLikeClause like_clause; CreateStmt create_stmt; List *create_stmts; @@ -745,7 +744,7 @@ create_single_partition_internal(Oid parent_relid, /* Initialize CreateStmt structure */ NodeSetTag(&create_stmt, T_CreateStmt); - create_stmt.relation = newrel_rv; + create_stmt.relation = copyObject(partition_rv); create_stmt.tableElts = list_make1(copyObject(&like_clause)); create_stmt.inhRelations = list_make1(copyObject(parent_rv)); create_stmt.ofTypename = NULL; From 44e180d2d2976acb1fad74ca4ab7893fde12f18c Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 18 Jul 2017 18:45:57 +0300 Subject: [PATCH 077/528] rename copy_rel_attributes() -> copy_rel_options() --- src/partition_creation.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index 53837ee1..3d64d676 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -74,7 +74,7 @@ static ObjectAddress create_table_using_stmt(CreateStmt *create_stmt, Oid relowner); static void copy_foreign_keys(Oid parent_relid, Oid partition_oid); -static void copy_rel_attributes(Oid parent_relid, Oid partition_relid); +static void copy_rel_options(Oid parent_relid, Oid partition_relid); static void postprocess_child_table_and_atts(Oid parent_relid, Oid partition_relid); static Oid text_to_regprocedure(text *proname_args); @@ -781,7 +781,7 @@ create_single_partition_internal(Oid parent_relid, child_relowner).objectId; /* Copy attributes to partition */ - copy_rel_attributes(parent_relid, partition_relid); + copy_rel_options(parent_relid, partition_relid); /* Copy FOREIGN KEYS of the parent table */ copy_foreign_keys(parent_relid, partition_relid); @@ -1115,7 +1115,7 @@ copy_foreign_keys(Oid parent_relid, Oid partition_oid) /* Copy reloptions of foreign table (updates pg_class) */ static void -copy_rel_attributes(Oid parent_relid, Oid partition_relid) +copy_rel_options(Oid parent_relid, Oid partition_relid) { Relation pg_class_rel; From 5d34026d723407230a7a27f4d51d61acedd5cc7a Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 19 Jul 2017 14:16:34 +0300 Subject: [PATCH 078/528] Add first version of script that creates docker containers and uploads them to docker hub --- .travis.yml | 12 ++---------- Dockerfile.tmpl | 10 +++++----- 2 files changed, 7 insertions(+), 15 deletions(-) diff --git a/.travis.yml b/.travis.yml index 1c7d2bc8..86c6a175 100644 --- a/.travis.yml +++ b/.travis.yml @@ -10,19 +10,11 @@ services: - docker install: - - sed -e 's/${CHECK_CODE}/'${CHECK_CODE}/g -e 's/${PG_VERSION}/'${PG_VERSION}/g Dockerfile.tmpl > Dockerfile + - echo "FROM ${DOCKER_IMAGE}" > Dockerfile - docker-compose build script: - docker-compose run $(bash <(curl -s https://codecov.io/env)) tests env: - - PG_VERSION=10 CHECK_CODE=clang - - PG_VERSION=9.6 CHECK_CODE=clang - - PG_VERSION=9.5 CHECK_CODE=clang - - PG_VERSION=10 CHECK_CODE=cppcheck - - PG_VERSION=9.6 CHECK_CODE=cppcheck - - PG_VERSION=9.5 CHECK_CODE=cppcheck - - PG_VERSION=10 CHECK_CODE=false - - PG_VERSION=9.6 CHECK_CODE=false - - PG_VERSION=9.5 CHECK_CODE=false + - DOCKER_IMAGE=pg95_clang_check_code diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index 31358464..c7b0fab5 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -23,8 +23,8 @@ RUN mkdir -p /pg/data && \ chmod a+rwx /usr/local/lib/postgresql && \ chmod a+rwx /usr/local/share/postgresql/extension -ADD . /pg/pg_pathman -WORKDIR /pg/pg_pathman -RUN chmod -R go+rwX /pg/pg_pathman -USER postgres -ENTRYPOINT PGDATA=${PGDATA} CHECK_CODE=${CHECK_CODE} bash run_tests.sh +ONBUILD ADD . /pg/pg_pathman +ONBUILD WORKDIR /pg/pg_pathman +ONBUILD RUN chmod -R go+rwX /pg/pg_pathman +ONBUILD USER postgres +ONBUILD ENTRYPOINT PGDATA=${PGDATA} CHECK_CODE=${CHECK_CODE} bash run_tests.sh From 22f6d4d38fa08090c1708038d9298c5ea547cefb Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 19 Jul 2017 14:16:51 +0300 Subject: [PATCH 079/528] Add forgotten file --- make_images.py | 58 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100755 make_images.py diff --git a/make_images.py b/make_images.py new file mode 100755 index 00000000..e2968f2a --- /dev/null +++ b/make_images.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python + +import subprocess + +DOCKER_ID = 'pathman' +pg_versions = ['9.5','9.6','10'] + +image_types = { + 'clang_check_code': { + 'CHECK_CODE': 'clang', + }, + 'cppcheck': { + 'CHECK_CODE': 'cppcheck', + }, + 'pathman_tests': { + 'CHECK_CODE': 'false', + } +} + +stopline = '###STOP' + +password = input("Enter password for `docker login`: ") +subprocess.check_output([ + 'docker', + 'login', + '-u', DOCKER_ID, + '-p', password]) + +for pg_version in pg_versions: + pgname = 'pg%s' % pg_version.replace('.', '') + for key, variables in image_types.items(): + image_name = '%s/%s_%s' % (DOCKER_ID, pgname, key) + with open('Dockerfile', 'w') as out: + with open('Dockerfile.tmpl', 'r') as f: + for line in f: + if line.startswith(stopline): + break + + line = line + line = line.replace('${PG_VERSION}', pg_version) + for key, value in variables.items(): + varname = '${%s}' % key + line = line.replace(varname, value) + + out.write(line) + + args = [ + 'docker', + 'build', + '-t', image_name, + '.' + ] + subprocess.check_output(args, stderr=subprocess.STDOUT) + print("build ok:", image_name) + subprocess.check_output(['docker', 'push', image_name], + stderr=subprocess.STDOUT) + print("upload ok:", image_name) + exit() From 3a8bcf873083cd086da648178b885cc20b5b7131 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 19 Jul 2017 14:19:12 +0300 Subject: [PATCH 080/528] Fix container name in tests --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 86c6a175..b04085ec 100644 --- a/.travis.yml +++ b/.travis.yml @@ -17,4 +17,4 @@ script: - docker-compose run $(bash <(curl -s https://codecov.io/env)) tests env: - - DOCKER_IMAGE=pg95_clang_check_code + - DOCKER_IMAGE=pathman/pg95_clang_check_code From f633bce04e5802a566f0d30a6a242e095c4fea59 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 19 Jul 2017 14:55:59 +0300 Subject: [PATCH 081/528] Add all images to travis --- .travis.yml | 8 ++++++++ make_images.py | 15 +++++++-------- 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/.travis.yml b/.travis.yml index b04085ec..cd41b73e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -18,3 +18,11 @@ script: env: - DOCKER_IMAGE=pathman/pg95_clang_check_code + - DOCKER_IMAGE=pathman/pg95_cppcheck + - DOCKER_IMAGE=pathman/pg95_pathman_tests + - DOCKER_IMAGE=pathman/pg96_clang_check_code + - DOCKER_IMAGE=pathman/pg96_cppcheck + - DOCKER_IMAGE=pathman/pg96_pathman_tests + - DOCKER_IMAGE=pathman/pg10_clang_check_code + - DOCKER_IMAGE=pathman/pg10_cppcheck + - DOCKER_IMAGE=pathman/pg10_pathman_tests diff --git a/make_images.py b/make_images.py index e2968f2a..77a49fac 100755 --- a/make_images.py +++ b/make_images.py @@ -17,15 +17,16 @@ } } -stopline = '###STOP' - -password = input("Enter password for `docker login`: ") +password = input("Enter password for `docker login` for user `%s`: " % DOCKER_ID) subprocess.check_output([ 'docker', 'login', '-u', DOCKER_ID, '-p', password]) +travis_conf_line = '- DOCKER_IMAGE=%s' +travis_conf = [] + for pg_version in pg_versions: pgname = 'pg%s' % pg_version.replace('.', '') for key, variables in image_types.items(): @@ -33,10 +34,6 @@ with open('Dockerfile', 'w') as out: with open('Dockerfile.tmpl', 'r') as f: for line in f: - if line.startswith(stopline): - break - - line = line line = line.replace('${PG_VERSION}', pg_version) for key, value in variables.items(): varname = '${%s}' % key @@ -55,4 +52,6 @@ subprocess.check_output(['docker', 'push', image_name], stderr=subprocess.STDOUT) print("upload ok:", image_name) - exit() + travis_conf.append(travis_conf_line % image_name) + +print('\n'.join(travis_conf)) From 64b78a54adbab3c0b9a56dcee8267f80d3e8e12b Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 19 Jul 2017 15:04:07 +0300 Subject: [PATCH 082/528] Change make_images output a little bit --- make_images.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/make_images.py b/make_images.py index 77a49fac..6859cd3c 100755 --- a/make_images.py +++ b/make_images.py @@ -26,6 +26,7 @@ travis_conf_line = '- DOCKER_IMAGE=%s' travis_conf = [] +print("") for pg_version in pg_versions: pgname = 'pg%s' % pg_version.replace('.', '') @@ -54,4 +55,5 @@ print("upload ok:", image_name) travis_conf.append(travis_conf_line % image_name) +print("\ntravis configuration") print('\n'.join(travis_conf)) From 5d91fc55a755b0a47fdc30382f43398bcae86821 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Thu, 20 Jul 2017 14:51:09 +0300 Subject: [PATCH 083/528] Advice to configure pathman_ddl_trigger ENABLE ALWAYS on LR replica --- README.md | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/README.md b/README.md index d53ad374..3b37273f 100644 --- a/README.md +++ b/README.md @@ -251,6 +251,25 @@ drop_partitions(parent REGCLASS, ``` Drop partitions of the `parent` table (both foreign and local relations). If `delete_data` is `false`, the data is copied to the parent table first. Default is `false`. +To remove partitioned table along with all partitions fully, use conventional +`DROP TABLE relation CASCADE`. However, care should be taken in somewhat rare +case when you are running logical replication and `DROP` was executed by +replication apply worker, e.g. via trigger on replicated table. `pg_pathman` +uses `pathman_ddl_trigger` event trigger to remove the record about dropped +table from `pathman_config`, and this trigger by default won't fire on replica, +leading to inconsistent state when `pg_pathman` thinks that the table still +exists, but in fact it doesn't. If this is the case, configure this trigger to +fire on replica too: + +```plpgsql +ALTER EVENT TRIGGER pathman_ddl_trigger ENABLE ALWAYS; +``` + +Physical replication doesn't have this problem since DDL as well as +`pathman_config` table is replicated too; master and slave PostgreSQL instances +are basically identical, and it is only harmful to keep this trigger in `ALWAYS` +mode. + ### Additional parameters From 215234e5ca1d8bf56844ae45403d8f5839986bdb Mon Sep 17 00:00:00 2001 From: Ildus K Date: Fri, 21 Jul 2017 13:20:59 +0300 Subject: [PATCH 084/528] Ask user login in make_images script. Remove testgres installation from container --- Dockerfile.tmpl | 1 - make_images.py | 6 ++++-- run_tests.sh | 4 ++++ 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index c7b0fab5..a1c662c8 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -14,7 +14,6 @@ RUN if [ "${CHECK_CODE}" = "cppcheck" ] ; then \ RUN if [ "${CHECK_CODE}" = "false" ] ; then \ echo 'http://dl-3.alpinelinux.org/alpine/edge/main' > /etc/apk/repositories; \ apk --no-cache add curl python3 gcc make musl-dev cmocka-dev;\ - pip3 install testgres; \ fi RUN mkdir -p /pg/data && \ diff --git a/make_images.py b/make_images.py index 6859cd3c..b63b3fbe 100755 --- a/make_images.py +++ b/make_images.py @@ -1,6 +1,7 @@ #!/usr/bin/env python import subprocess +import getpass DOCKER_ID = 'pathman' pg_versions = ['9.5','9.6','10'] @@ -17,11 +18,12 @@ } } -password = input("Enter password for `docker login` for user `%s`: " % DOCKER_ID) +user = input("Enter username for `docker login`: ") +password = getpass.getpass() subprocess.check_output([ 'docker', 'login', - '-u', DOCKER_ID, + '-u', user, '-p', password]) travis_conf_line = '- DOCKER_IMAGE=%s' diff --git a/run_tests.sh b/run_tests.sh index 353033e4..0bb36105 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -35,6 +35,10 @@ elif [ "$CHECK_CODE" = "cppcheck" ]; then exit $status fi +# we need testgres for pathman tests +pip3 install testgres +pip3 freeze | grep testgres + # don't forget to "make clean" make USE_PGXS=1 clean From 9a693d4f3b10b792aa0c64475be3d07f8504ce4c Mon Sep 17 00:00:00 2001 From: Ildus K Date: Fri, 21 Jul 2017 13:25:57 +0300 Subject: [PATCH 085/528] Remove printlog function --- tests/python/partitioning_test.py | 299 ++++++++++++++---------------- 1 file changed, 141 insertions(+), 158 deletions(-) diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 0d05c458..6d79dd96 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -81,115 +81,102 @@ def catchup_replica(self, master, replica): % replica.name master.poll_query_until('postgres', wait_lsn_query) - def printlog(self, logfile): - with open(logfile, 'r') as log: - for line in log.readlines(): - print(line) - def test_concurrent(self): """Tests concurrent partitioning""" - try: - node = self.start_new_pathman_cluster() - self.init_test_data(node) - node.psql( + node = self.start_new_pathman_cluster() + self.init_test_data(node) + + node.psql( + 'postgres', + 'select partition_table_concurrently(\'abc\')') + + while True: + # update some rows to check for deadlocks + node.safe_psql( 'postgres', - 'select partition_table_concurrently(\'abc\')') + ''' + update abc set t = 'test' + where id in (select (random() * 300000)::int + from generate_series(1, 3000)) + ''') - while True: - # update some rows to check for deadlocks - node.safe_psql( - 'postgres', - ''' - update abc set t = 'test' - where id in (select (random() * 300000)::int - from generate_series(1, 3000)) - ''') - - count = node.execute( - 'postgres', - 'select count(*) from pathman_concurrent_part_tasks') - - # if there is no active workers then it means work is done - if count[0][0] == 0: - break - time.sleep(1) - - data = node.execute('postgres', 'select count(*) from only abc') - self.assertEqual(data[0][0], 0) - data = node.execute('postgres', 'select count(*) from abc') - self.assertEqual(data[0][0], 300000) - - node.stop() - except Exception as e: - self.printlog(node.logs_dir + '/postgresql.log') - raise e + count = node.execute( + 'postgres', + 'select count(*) from pathman_concurrent_part_tasks') + + # if there is no active workers then it means work is done + if count[0][0] == 0: + break + time.sleep(1) + + data = node.execute('postgres', 'select count(*) from only abc') + self.assertEqual(data[0][0], 0) + data = node.execute('postgres', 'select count(*) from abc') + self.assertEqual(data[0][0], 300000) + + node.stop() def test_replication(self): """Tests how pg_pathman works with replication""" node = get_new_node('master') replica = get_new_node('repl') - try: # initialize master server - node = self.start_new_pathman_cluster(allows_streaming=True) - node.backup('my_backup') - - # initialize replica from backup - replica.init_from_backup(node, 'my_backup', has_streaming=True) - replica.start() - - # initialize pg_pathman extension and some test data - self.init_test_data(node) - - # wait until replica catches up - self.catchup_replica(node, replica) - - # check that results are equal - self.assertEqual( - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc') - ) - - # enable parent and see if it is enabled in replica - node.psql('postgres', 'select enable_parent(\'abc\'') - - self.catchup_replica(node, replica) - self.assertEqual( - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc') - ) - self.assertEqual( - node.psql('postgres', 'select * from abc'), - replica.psql('postgres', 'select * from abc') - ) - self.assertEqual( - node.execute('postgres', 'select count(*) from abc')[0][0], - 300000 - ) - - # check that direct UPDATE in pathman_config_params invalidates - # cache - node.psql( - 'postgres', - 'update pathman_config_params set enable_parent = false') - self.catchup_replica(node, replica) - self.assertEqual( - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc') - ) - self.assertEqual( - node.psql('postgres', 'select * from abc'), - replica.psql('postgres', 'select * from abc') - ) - self.assertEqual( - node.execute('postgres', 'select count(*) from abc')[0][0], - 0 - ) - except Exception as e: - self.printlog(node.logs_dir + '/postgresql.log') - self.printlog(replica.logs_dir + '/postgresql.log') - raise e + node = self.start_new_pathman_cluster(allows_streaming=True) + node.backup('my_backup') + + # initialize replica from backup + replica.init_from_backup(node, 'my_backup', has_streaming=True) + replica.start() + + # initialize pg_pathman extension and some test data + self.init_test_data(node) + + # wait until replica catches up + self.catchup_replica(node, replica) + + # check that results are equal + self.assertEqual( + node.psql('postgres', 'explain (costs off) select * from abc'), + replica.psql('postgres', 'explain (costs off) select * from abc') + ) + + # enable parent and see if it is enabled in replica + node.psql('postgres', 'select enable_parent(\'abc\'') + + self.catchup_replica(node, replica) + self.assertEqual( + node.psql('postgres', 'explain (costs off) select * from abc'), + replica.psql('postgres', 'explain (costs off) select * from abc') + ) + self.assertEqual( + node.psql('postgres', 'select * from abc'), + replica.psql('postgres', 'select * from abc') + ) + self.assertEqual( + node.execute('postgres', 'select count(*) from abc')[0][0], + 300000 + ) + + # check that direct UPDATE in pathman_config_params invalidates + # cache + node.psql( + 'postgres', + 'update pathman_config_params set enable_parent = false') + self.catchup_replica(node, replica) + self.assertEqual( + node.psql('postgres', 'explain (costs off) select * from abc'), + replica.psql('postgres', 'explain (costs off) select * from abc') + ) + self.assertEqual( + node.psql('postgres', 'select * from abc'), + replica.psql('postgres', 'select * from abc') + ) + self.assertEqual( + node.execute('postgres', 'select count(*) from abc')[0][0], + 0 + ) def test_locks(self): """Test that a session trying to create new partitions waits for other @@ -225,71 +212,67 @@ def add_partition(node, flag, query): # Initialize master server node = get_new_node('master') - try: - node.init() - node.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman\'\n') - node.start() - node.safe_psql( - 'postgres', - 'create extension pg_pathman; ' + - 'create table abc(id serial, t text); ' + - 'insert into abc select generate_series(1, 100000); ' + - 'select create_range_partitions(\'abc\', \'id\', 1, 50000);' - ) - - # Start transaction that will create partition - con = node.connect() - con.begin() - con.execute('select append_range_partition(\'abc\')') - - # Start threads that suppose to add new partitions and wait some - # time - query = [ - 'select prepend_range_partition(\'abc\')', - 'select append_range_partition(\'abc\')', - 'select add_range_partition(\'abc\', 500000, 550000)', - ] - threads = [] + node.init() + node.append_conf( + 'postgresql.conf', + 'shared_preload_libraries=\'pg_pathman\'\n') + node.start() + node.safe_psql( + 'postgres', + 'create extension pg_pathman; ' + + 'create table abc(id serial, t text); ' + + 'insert into abc select generate_series(1, 100000); ' + + 'select create_range_partitions(\'abc\', \'id\', 1, 50000);' + ) + + # Start transaction that will create partition + con = node.connect() + con.begin() + con.execute('select append_range_partition(\'abc\')') + + # Start threads that suppose to add new partitions and wait some + # time + query = [ + 'select prepend_range_partition(\'abc\')', + 'select append_range_partition(\'abc\')', + 'select add_range_partition(\'abc\', 500000, 550000)', + ] + threads = [] + for i in range(3): + thread = threading.Thread( + target=add_partition, + args=(node, flags[i], query[i])) + threads.append(thread) + thread.start() + time.sleep(3) + + # This threads should wait until current transaction finished + with lock: for i in range(3): - thread = threading.Thread( - target=add_partition, - args=(node, flags[i], query[i])) - threads.append(thread) - thread.start() - time.sleep(3) - - # This threads should wait until current transaction finished - with lock: - for i in range(3): - self.assertEqual(flags[i].get(), False) + self.assertEqual(flags[i].get(), False) - # Commit transaction. Since then other sessions can create - # partitions - con.commit() + # Commit transaction. Since then other sessions can create + # partitions + con.commit() - # Now wait until each thread finishes - for thread in threads: - thread.join() + # Now wait until each thread finishes + for thread in threads: + thread.join() - # Check flags, it should be true which means that threads are - # finished - with lock: - for i in range(3): - self.assertEqual(flags[i].get(), True) - - # Check that all partitions are created - self.assertEqual( - node.safe_psql( - 'postgres', - 'select count(*) from pg_inherits where inhparent=\'abc\'::regclass' - ), - b'6\n' - ) - except Exception as e: - self.printlog(node.logs_dir + '/postgresql.log') - raise e + # Check flags, it should be true which means that threads are + # finished + with lock: + for i in range(3): + self.assertEqual(flags[i].get(), True) + + # Check that all partitions are created + self.assertEqual( + node.safe_psql( + 'postgres', + 'select count(*) from pg_inherits where inhparent=\'abc\'::regclass' + ), + b'6\n' + ) def test_tablespace(self): """Check tablespace support""" From 1126cc2cc84e6bce495861ef751824be6ff24436 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Fri, 21 Jul 2017 13:35:40 +0300 Subject: [PATCH 086/528] Use virtualenv in tests --- .gitignore | 1 + Dockerfile.tmpl | 1 + run_tests.sh | 2 ++ 3 files changed, 4 insertions(+) diff --git a/.gitignore b/.gitignore index 3eb50e54..06aa9a65 100644 --- a/.gitignore +++ b/.gitignore @@ -13,3 +13,4 @@ pg_pathman--*.sql tags cscope* Dockerfile +testgres diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index a1c662c8..e5663156 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -14,6 +14,7 @@ RUN if [ "${CHECK_CODE}" = "cppcheck" ] ; then \ RUN if [ "${CHECK_CODE}" = "false" ] ; then \ echo 'http://dl-3.alpinelinux.org/alpine/edge/main' > /etc/apk/repositories; \ apk --no-cache add curl python3 gcc make musl-dev cmocka-dev;\ + pip3 install virtualenv;\ fi RUN mkdir -p /pg/data && \ diff --git a/run_tests.sh b/run_tests.sh index 0bb36105..5074be7a 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -36,6 +36,8 @@ elif [ "$CHECK_CODE" = "cppcheck" ]; then fi # we need testgres for pathman tests +virtualenv env +source env/bin/activate pip3 install testgres pip3 freeze | grep testgres From 63dccb6b6d7b2d7940e16710326ef98ea12a5a3f Mon Sep 17 00:00:00 2001 From: Ildus K Date: Fri, 21 Jul 2017 13:42:00 +0300 Subject: [PATCH 087/528] Fix error in virtualenv activation for tests --- run_tests.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/run_tests.sh b/run_tests.sh index 5074be7a..1b9d7a70 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -37,6 +37,7 @@ fi # we need testgres for pathman tests virtualenv env +export VIRTUAL_ENV_DISABLE_PROMPT=1 source env/bin/activate pip3 install testgres pip3 freeze | grep testgres From c85f96af05bf0648d5dde080e8a882efb917da38 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 21 Jul 2017 20:41:11 +0300 Subject: [PATCH 088/528] reformat + fix python-based tests --- tests/python/.style.yapf | 5 + tests/python/partitioning_test.py | 2111 +++++++++++++++-------------- 2 files changed, 1075 insertions(+), 1041 deletions(-) create mode 100644 tests/python/.style.yapf diff --git a/tests/python/.style.yapf b/tests/python/.style.yapf new file mode 100644 index 00000000..e2ca7ba3 --- /dev/null +++ b/tests/python/.style.yapf @@ -0,0 +1,5 @@ +[style] +based_on_style = pep8 +spaces_before_comment = 4 +split_before_logical_operator = false +column_limit=90 diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 6d79dd96..52b96d87 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -1,11 +1,10 @@ #!/usr/bin/env python3 # coding: utf-8 - """ - concurrent_partitioning_test.py - Tests concurrent partitioning worker with simultaneous update queries +partitioning_test.py + Various stuff that looks out of place in regression tests - Copyright (c) 2015-2017, Postgres Professional + Copyright (c) 2015-2017, Postgres Professional """ import unittest @@ -20,1051 +19,1081 @@ version = get_config().get("VERSION_NUM") + # Helper function for json equality def ordered(obj): - if isinstance(obj, dict): - return sorted((k, ordered(v)) for k, v in obj.items()) - if isinstance(obj, list): - return sorted(ordered(x) for x in obj) - else: - return obj + if isinstance(obj, dict): + return sorted((k, ordered(v)) for k, v in obj.items()) + if isinstance(obj, list): + return sorted(ordered(x) for x in obj) + else: + return obj def if_fdw_enabled(func): - """To run tests with FDW support set environment variable TEST_FDW=1""" - def wrapper(*args, **kwargs): - if os.environ.get('FDW_DISABLED') != '1': - func(*args, **kwargs) - else: - print('Warning: FDW features tests are disabled, skipping...') - return wrapper + """ To run tests with FDW support, set environment variable TEST_FDW=1 """ + def wrapper(*args, **kwargs): + if os.environ.get('FDW_DISABLED') != '1': + func(*args, **kwargs) + else: + print('Warning: FDW features tests are disabled, skipping...') -class PartitioningTests(unittest.TestCase): + return wrapper - def setUp(self): - self.setup_cmd = [ - 'create table abc(id serial, t text)', - 'insert into abc select generate_series(1, 300000)', - 'select create_hash_partitions(\'abc\', \'id\', 3, partition_data := false)', - ] - - def tearDown(self): - stop_all() - - def start_new_pathman_cluster(self, name='test', allows_streaming=False): - node = get_new_node(name) - node.init(allows_streaming=allows_streaming) - node.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman\'\n') - node.start() - node.psql('postgres', 'create extension pg_pathman') - return node - - def init_test_data(self, node): - """Initialize pg_pathman extension and test data""" - for cmd in self.setup_cmd: - node.safe_psql('postgres', cmd) - - def catchup_replica(self, master, replica): - """Wait until replica synchronizes with master""" - if version >= 100000: - wait_lsn_query = \ - 'SELECT pg_current_wal_lsn() <= replay_lsn ' \ - 'FROM pg_stat_replication WHERE application_name = \'%s\'' \ - % replica.name - else: - wait_lsn_query = \ - 'SELECT pg_current_xlog_location() <= replay_location ' \ - 'FROM pg_stat_replication WHERE application_name = \'%s\'' \ - % replica.name - master.poll_query_until('postgres', wait_lsn_query) - - def test_concurrent(self): - """Tests concurrent partitioning""" - - node = self.start_new_pathman_cluster() - self.init_test_data(node) - - node.psql( - 'postgres', - 'select partition_table_concurrently(\'abc\')') - - while True: - # update some rows to check for deadlocks - node.safe_psql( - 'postgres', - ''' - update abc set t = 'test' - where id in (select (random() * 300000)::int - from generate_series(1, 3000)) - ''') - - count = node.execute( - 'postgres', - 'select count(*) from pathman_concurrent_part_tasks') - - # if there is no active workers then it means work is done - if count[0][0] == 0: - break - time.sleep(1) - - data = node.execute('postgres', 'select count(*) from only abc') - self.assertEqual(data[0][0], 0) - data = node.execute('postgres', 'select count(*) from abc') - self.assertEqual(data[0][0], 300000) - - node.stop() - - def test_replication(self): - """Tests how pg_pathman works with replication""" - node = get_new_node('master') - replica = get_new_node('repl') - - # initialize master server - node = self.start_new_pathman_cluster(allows_streaming=True) - node.backup('my_backup') - - # initialize replica from backup - replica.init_from_backup(node, 'my_backup', has_streaming=True) - replica.start() - - # initialize pg_pathman extension and some test data - self.init_test_data(node) - - # wait until replica catches up - self.catchup_replica(node, replica) - - # check that results are equal - self.assertEqual( - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc') - ) - - # enable parent and see if it is enabled in replica - node.psql('postgres', 'select enable_parent(\'abc\'') - - self.catchup_replica(node, replica) - self.assertEqual( - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc') - ) - self.assertEqual( - node.psql('postgres', 'select * from abc'), - replica.psql('postgres', 'select * from abc') - ) - self.assertEqual( - node.execute('postgres', 'select count(*) from abc')[0][0], - 300000 - ) - - # check that direct UPDATE in pathman_config_params invalidates - # cache - node.psql( - 'postgres', - 'update pathman_config_params set enable_parent = false') - self.catchup_replica(node, replica) - self.assertEqual( - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc') - ) - self.assertEqual( - node.psql('postgres', 'select * from abc'), - replica.psql('postgres', 'select * from abc') - ) - self.assertEqual( - node.execute('postgres', 'select count(*) from abc')[0][0], - 0 - ) - - def test_locks(self): - """Test that a session trying to create new partitions waits for other - sessions if they are doing the same""" - - import threading - import time - - class Flag: - def __init__(self, value): - self.flag = value - - def set(self, value): - self.flag = value - - def get(self): - return self.flag - - # There is one flag for each thread which shows if thread have done its work - flags = [Flag(False) for i in range(3)] - - # All threads synchronize though this lock - lock = threading.Lock() - - # Define thread function - def add_partition(node, flag, query): - """ We expect that this query will wait until another session - commits or rolls back""" - node.safe_psql('postgres', query) - with lock: - flag.set(True) - - # Initialize master server - node = get_new_node('master') - - node.init() - node.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman\'\n') - node.start() - node.safe_psql( - 'postgres', - 'create extension pg_pathman; ' + - 'create table abc(id serial, t text); ' + - 'insert into abc select generate_series(1, 100000); ' + - 'select create_range_partitions(\'abc\', \'id\', 1, 50000);' - ) - - # Start transaction that will create partition - con = node.connect() - con.begin() - con.execute('select append_range_partition(\'abc\')') - - # Start threads that suppose to add new partitions and wait some - # time - query = [ - 'select prepend_range_partition(\'abc\')', - 'select append_range_partition(\'abc\')', - 'select add_range_partition(\'abc\', 500000, 550000)', - ] - threads = [] - for i in range(3): - thread = threading.Thread( - target=add_partition, - args=(node, flags[i], query[i])) - threads.append(thread) - thread.start() - time.sleep(3) - - # This threads should wait until current transaction finished - with lock: - for i in range(3): - self.assertEqual(flags[i].get(), False) - - # Commit transaction. Since then other sessions can create - # partitions - con.commit() - - # Now wait until each thread finishes - for thread in threads: - thread.join() - - # Check flags, it should be true which means that threads are - # finished - with lock: - for i in range(3): - self.assertEqual(flags[i].get(), True) - - # Check that all partitions are created - self.assertEqual( - node.safe_psql( - 'postgres', - 'select count(*) from pg_inherits where inhparent=\'abc\'::regclass' - ), - b'6\n' - ) - - def test_tablespace(self): - """Check tablespace support""" - - def check_tablespace(node, tablename, tablespace): - res = node.execute( - 'postgres', - 'select get_tablespace(\'{}\')'.format(tablename)) - if len(res) == 0: - return False - - return res[0][0] == tablespace - - node = get_new_node('master') - node.init() - node.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman\'\n') - node.start() - node.psql('postgres', 'create extension pg_pathman') - - # create tablespace - path = os.path.join(node.data_dir, 'test_space_location') - os.mkdir(path) - node.psql( - 'postgres', - 'create tablespace test_space location \'{}\''.format(path)) - - # create table in this tablespace - node.psql( - 'postgres', - 'create table abc(a serial, b int) tablespace test_space') - - # create three partitions. Excpect that they will be created in the - # same tablespace as the parent table - node.psql( - 'postgres', - 'select create_range_partitions(\'abc\', \'a\', 1, 10, 3)') - self.assertTrue(check_tablespace(node, 'abc', 'test_space')) - - # check tablespace for appended partition - node.psql( - 'postgres', - 'select append_range_partition(\'abc\', \'abc_appended\')') - self.assertTrue(check_tablespace(node, 'abc_appended', 'test_space')) - - # check tablespace for prepended partition - node.psql( - 'postgres', - 'select prepend_range_partition(\'abc\', \'abc_prepended\')') - self.assertTrue(check_tablespace(node, 'abc_prepended', 'test_space')) - - # check tablespace for prepended partition - node.psql( - 'postgres', - 'select add_range_partition(\'abc\', 41, 51, \'abc_added\')') - self.assertTrue(check_tablespace(node, 'abc_added', 'test_space')) - - # check tablespace for split - node.psql( - 'postgres', - 'select split_range_partition(\'abc_added\', 45, \'abc_splitted\')') - self.assertTrue(check_tablespace(node, 'abc_splitted', 'test_space')) - - # now let's specify tablespace explicitly - node.psql( - 'postgres', - 'select append_range_partition(\'abc\', \'abc_appended_2\', \'pg_default\')') - node.psql( - 'postgres', - 'select prepend_range_partition(\'abc\', \'abc_prepended_2\', \'pg_default\')') - node.psql( - 'postgres', - 'select add_range_partition(\'abc\', 61, 71, \'abc_added_2\', \'pg_default\')') - node.psql( - 'postgres', - 'select split_range_partition(\'abc_added_2\', 65, \'abc_splitted_2\', \'pg_default\')') - self.assertTrue(check_tablespace(node, 'abc_appended_2', 'pg_default')) - self.assertTrue(check_tablespace(node, 'abc_prepended_2', 'pg_default')) - self.assertTrue(check_tablespace(node, 'abc_added_2', 'pg_default')) - self.assertTrue(check_tablespace(node, 'abc_splitted_2', 'pg_default')) - - @if_fdw_enabled - def test_foreign_table(self): - """Test foreign tables""" - - # Start master server - master = get_new_node('test') - master.init() - master.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman, postgres_fdw\'\n') - master.start() - master.psql('postgres', 'create extension pg_pathman') - master.psql('postgres', 'create extension postgres_fdw') - - # RANGE partitioning test with FDW: - # - create range partitioned table in master - # - create foreign server - # - create foreign table and insert some data into it - # - attach foreign table to partitioned one - # - try inserting data into foreign partition via parent - # - drop partitions - master.psql( - 'postgres', - '''create table abc(id serial, name text); - select create_range_partitions('abc', 'id', 0, 10, 2)''') - - # Current user name (needed for user mapping) - username = master.execute('postgres', 'select current_user')[0][0] - - # Start foreign server - fserv = get_new_node('fserv') - fserv.init().start() - fserv.safe_psql('postgres', 'create table ftable(id serial, name text)') - fserv.safe_psql('postgres', 'insert into ftable values (25, \'foreign\')') - - # Create foreign table and attach it to partitioned table - master.safe_psql( - 'postgres', - '''create server fserv - foreign data wrapper postgres_fdw - options (dbname 'postgres', host '127.0.0.1', port '{}')'''.format(fserv.port) - ) - master.safe_psql( - 'postgres', - '''create user mapping for {0} - server fserv - options (user '{0}')'''.format(username) - ) - master.safe_psql( - 'postgres', - '''import foreign schema public limit to (ftable) - from server fserv into public''' - ) - master.safe_psql( - 'postgres', - 'select attach_range_partition(\'abc\', \'ftable\', 20, 30)') - - # Check that table attached to partitioned table - self.assertEqual( - master.safe_psql('postgres', 'select * from ftable'), - b'25|foreign\n' - ) - - # Check that we can successfully insert new data into foreign partition - master.safe_psql('postgres', 'insert into abc values (26, \'part\')') - self.assertEqual( - master.safe_psql('postgres', 'select * from ftable order by id'), - b'25|foreign\n26|part\n' - ) - - # Testing drop partitions (including foreign partitions) - master.safe_psql('postgres', 'select drop_partitions(\'abc\')') - - # HASH partitioning with FDW: - # - create hash partitioned table in master - # - create foreign table - # - replace local partition with foreign one - # - insert data - # - drop partitions - master.psql( - 'postgres', - '''create table hash_test(id serial, name text); - select create_hash_partitions('hash_test', 'id', 2)''') - fserv.safe_psql('postgres', 'create table f_hash_test(id serial, name text)') - - master.safe_psql( - 'postgres', - '''import foreign schema public limit to (f_hash_test) - from server fserv into public''' - ) - master.safe_psql( - 'postgres', - 'select replace_hash_partition(\'hash_test_1\', \'f_hash_test\')') - master.safe_psql('postgres', 'insert into hash_test select generate_series(1,10)') - - self.assertEqual( - master.safe_psql('postgres', 'select * from hash_test'), - b'1|\n2|\n5|\n6|\n8|\n9|\n3|\n4|\n7|\n10|\n' - ) - master.safe_psql('postgres', 'select drop_partitions(\'hash_test\')') - - def test_parallel_nodes(self): - """Test parallel queries under partitions""" - - import json - - # Init and start postgres instance with preload pg_pathman module - node = get_new_node('test') - node.init() - node.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman, postgres_fdw\'\n') - node.start() - - # Check version of postgres server - # If version < 9.6 skip all tests for parallel queries - if version < 90600: - return - - # Prepare test database - node.psql('postgres', 'create extension pg_pathman') - node.psql('postgres', 'create table range_partitioned as select generate_series(1, 1e4::integer) i') - node.psql('postgres', 'alter table range_partitioned alter column i set not null') - node.psql('postgres', 'select create_range_partitions(\'range_partitioned\', \'i\', 1, 1e3::integer)') - node.psql('postgres', 'vacuum analyze range_partitioned') - - node.psql('postgres', 'create table hash_partitioned as select generate_series(1, 1e4::integer) i') - node.psql('postgres', 'alter table hash_partitioned alter column i set not null') - node.psql('postgres', 'select create_hash_partitions(\'hash_partitioned\', \'i\', 10)') - node.psql('postgres', 'vacuum analyze hash_partitioned') - - node.psql('postgres', """ - create or replace function query_plan(query text) returns jsonb as $$ - declare - plan jsonb; - begin - execute 'explain (costs off, format json)' || query into plan; - return plan; - end; - $$ language plpgsql; - """) - - # Test parallel select - with node.connect() as con: - con.execute('set max_parallel_workers_per_gather = 2') - if version >= 100000: - con.execute('set min_parallel_table_scan_size = 0') - else: - con.execute('set min_parallel_relation_size = 0') - con.execute('set parallel_setup_cost = 0') - con.execute('set parallel_tuple_cost = 0') - - # Check parallel aggregate plan - test_query = 'select count(*) from range_partitioned where i < 1500' - plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] - expected = json.loads(""" - [ - { - "Plan": { - "Node Type": "Aggregate", - "Strategy": "Plain", - "Partial Mode": "Finalize", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Gather", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Workers Planned": 2, - "Single Copy": false, - "Plans": [ - { - "Node Type": "Aggregate", - "Strategy": "Plain", - "Partial Mode": "Partial", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Append", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Seq Scan", - "Parent Relationship": "Member", - "Parallel Aware": true, - "Relation Name": "range_partitioned_2", - "Alias": "range_partitioned_2", - "Filter": "(i < 1500)" - }, - { - "Node Type": "Seq Scan", - "Parent Relationship": "Member", - "Parallel Aware": true, - "Relation Name": "range_partitioned_1", - "Alias": "range_partitioned_1" - } - ] - } - ] - } - ] - } - ] - } - } - ] - """) - self.assertEqual(ordered(plan), ordered(expected)) - - # Check count of returned tuples - count = con.execute('select count(*) from range_partitioned where i < 1500')[0][0] - self.assertEqual(count, 1499) - - # Check simple parallel seq scan plan with limit - test_query = 'select * from range_partitioned where i < 1500 limit 5' - plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] - expected = json.loads(""" - [ - { - "Plan": { - "Node Type": "Limit", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Gather", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Workers Planned": 2, - "Single Copy": false, - "Plans": [ - { - "Node Type": "Append", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Seq Scan", - "Parent Relationship": "Member", - "Parallel Aware": true, - "Relation Name": "range_partitioned_2", - "Alias": "range_partitioned_2", - "Filter": "(i < 1500)" - }, - { - "Node Type": "Seq Scan", - "Parent Relationship": "Member", - "Parallel Aware": true, - "Relation Name": "range_partitioned_1", - "Alias": "range_partitioned_1" - } - ] - } - ] - } - ] - } - } - ] - """) - self.assertEqual(ordered(plan), ordered(expected)) - - # Check tuples returned by query above - res_tuples = con.execute('select * from range_partitioned where i < 1500 limit 5') - res_tuples = sorted(map(lambda x: x[0], res_tuples)) - expected = [1, 2, 3, 4, 5] - self.assertEqual(res_tuples, expected) - - # Check the case when none partition is selected in result plan - test_query = 'select * from range_partitioned where i < 1' - plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] - expected = json.loads(""" - [ - { - "Plan": { - "Node Type": "Result", - "Parallel Aware": false, - "One-Time Filter": "false" - } - } - ] - """) - self.assertEqual(ordered(plan), ordered(expected)) - - # Remove all objects for testing - node.psql('postgres', 'drop table range_partitioned cascade') - node.psql('postgres', 'drop table hash_partitioned cascade') - node.psql('postgres', 'drop extension pg_pathman cascade') - - # Stop instance and finish work - node.stop() - node.cleanup() - - def test_conc_part_creation_insert(self): - """Test concurrent partition creation on INSERT""" - - # Create and start new instance - node = self.start_new_pathman_cluster(allows_streaming=False) - - # Create table 'ins_test' and partition it - with node.connect() as con0: - con0.begin() - con0.execute('create table ins_test(val int not null)') - con0.execute('insert into ins_test select generate_series(1, 50)') - con0.execute("select create_range_partitions('ins_test', 'val', 1, 10)") - con0.commit() - - # Create two separate connections for this test - with node.connect() as con1, node.connect() as con2: - - # Thread for connection #2 (it has to wait) - def con2_thread(): - con2.execute('insert into ins_test values(51)') - con2.commit() - - # Step 1: lock partitioned table in con1 - con1.begin() - con1.execute('select count(*) from ins_test') # load pathman's cache - con1.execute('lock table ins_test in share update exclusive mode') - - # Step 2: try inserting new value in con2 (waiting) - con2.begin() - con2.execute('select count(*) from ins_test') # load pathman's cache - t = threading.Thread(target=con2_thread) - t.start() - - # Step 3: wait until 't' locks - while True: - with node.connect() as con0: - locks = con0.execute(""" - select count(*) from pg_locks where granted = 'f' - """) - - if int(locks[0][0]) > 0: - break - - # Step 4: try inserting new value in con1 (success, unlock) - con1.execute('insert into ins_test values(52)') - con1.commit() - - # Step 5: wait for con2 - t.join() - - rows = con1.execute(""" - select * from pathman_partition_list - where parent = 'ins_test'::regclass - order by range_min, range_max - """) - - # check number of partitions - self.assertEqual(len(rows), 6) - - # check range_max of partitions - self.assertEqual(int(rows[0][5]), 11) - self.assertEqual(int(rows[1][5]), 21) - self.assertEqual(int(rows[2][5]), 31) - self.assertEqual(int(rows[3][5]), 41) - self.assertEqual(int(rows[4][5]), 51) - self.assertEqual(int(rows[5][5]), 61) - - # Stop instance and finish work - node.stop() - node.cleanup() - - def test_conc_part_merge_insert(self): - """Test concurrent merge_range_partitions() + INSERT""" - - # Create and start new instance - node = self.start_new_pathman_cluster(allows_streaming=False) - - # Create table 'ins_test' and partition it - with node.connect() as con0: - con0.begin() - con0.execute('create table ins_test(val int not null)') - con0.execute("select create_range_partitions('ins_test', 'val', 1, 10, 10)") - con0.commit() - - # Create two separate connections for this test - with node.connect() as con1, node.connect() as con2: - - # Thread for connection #2 (it has to wait) - def con2_thread(): - con2.begin() - con2.execute('insert into ins_test values(20)') - con2.commit() - - # Step 1: initilize con1 - con1.begin() - con1.execute('select count(*) from ins_test') # load pathman's cache - - # Step 2: initilize con2 - con2.begin() - con2.execute('select count(*) from ins_test') # load pathman's cache - con2.commit() # unlock relations - - # Step 3: merge 'ins_test1' + 'ins_test_2' in con1 (success) - con1.execute("select merge_range_partitions('ins_test_1', 'ins_test_2')") - - # Step 4: try inserting new value in con2 (waiting) - t = threading.Thread(target=con2_thread) - t.start() - - # Step 5: wait until 't' locks - while True: - with node.connect() as con0: - locks = con0.execute(""" - select count(*) from pg_locks where granted = 'f' - """) - - if int(locks[0][0]) > 0: - break - - # Step 6: finish merge in con1 (success, unlock) - con1.commit() - - # Step 7: wait for con2 - t.join() - - rows = con1.execute("select *, tableoid::regclass::text from ins_test") - - # check number of rows in table - self.assertEqual(len(rows), 1) - - # check value that has been inserted - self.assertEqual(int(rows[0][0]), 20) - - # check partition that was chosen for insert - self.assertEqual(str(rows[0][1]), 'ins_test_1') - - # Stop instance and finish work - node.stop() - node.cleanup() - - def test_pg_dump(self): - """ - Test using dump and restore of partitioned table through pg_dump and pg_restore tools. - - Test strategy: - - test range and hash partitioned tables; - - for each partitioned table check on restorable side the following quantities: - * constraints related to partitioning; - * init callback function and enable parent flag; - * number of rows in parent and child tables; - * plan validity of simple SELECT query under partitioned table; - - check dumping using the following parameters of pg_dump: - * format = plain | custom; - * using of inserts and copy. - - all test cases are carried out on tables half-full with data located in parent part, - the rest of data - in child tables. - """ - - import subprocess - - # Init and start postgres instance with preload pg_pathman module - node = get_new_node('test') - node.init() - node.append_conf( - 'postgresql.conf', - """ - shared_preload_libraries=\'pg_pathman\' - pg_pathman.override_copy=false - """) - node.start() - - # Init two databases: initial and copy - node.psql('postgres', 'create database initial') - node.psql('postgres', 'create database copy') - node.psql('initial', 'create extension pg_pathman') - - # Create and fillin partitioned table in initial database - with node.connect('initial') as con: - - # create and initailly fillin tables - con.execute('create table range_partitioned (i integer not null)') - con.execute('insert into range_partitioned select i from generate_series(1, 500) i') - con.execute('create table hash_partitioned (i integer not null)') - con.execute('insert into hash_partitioned select i from generate_series(1, 500) i') - - # partition table keeping data in base table - # enable_parent parameter automatically becames true - con.execute('select create_range_partitions(\'range_partitioned\', \'i\', 1, 200, partition_data := false)') - con.execute('select create_hash_partitions(\'hash_partitioned\', \'i\', 5, false)') - - # fillin child tables with remain data - con.execute('insert into range_partitioned select i from generate_series(501, 1000) i') - con.execute('insert into hash_partitioned select i from generate_series(501, 1000) i') - - # set init callback - con.execute(""" - create or replace function init_partition_stub_callback(args jsonb) - returns void as $$ - begin - end - $$ language plpgsql; - """) - con.execute('select set_init_callback(\'range_partitioned\', \'init_partition_stub_callback(jsonb)\')') - con.execute('select set_init_callback(\'hash_partitioned\', \'init_partition_stub_callback(jsonb)\')') - - # turn off enable_parent option - con.execute('select set_enable_parent(\'range_partitioned\', false)') - con.execute('select set_enable_parent(\'hash_partitioned\', false)') - - con.commit() - - # compare strategies - CMP_OK, PLANS_MISMATCH, CONTENTS_MISMATCH = range(3) - def cmp_full(con1, con2): - """Compare selection partitions in plan and contents in partitioned tables""" - - plan_query = 'explain (costs off, format json) select * from %s' - content_query = 'select * from %s order by i' - table_refs = [ - 'range_partitioned', - 'only range_partitioned', - 'hash_partitioned', - 'only hash_partitioned' - ] - for table_ref in table_refs: - plan_initial = con1.execute(plan_query % table_ref)[0][0][0]['Plan'] - plan_copy = con2.execute(plan_query % table_ref)[0][0][0]['Plan'] - if ordered(plan_initial) != ordered(plan_copy): - return PLANS_MISMATCH - - content_initial = [x[0] for x in con1.execute(content_query % table_ref)] - content_copy = [x[0] for x in con2.execute(content_query % table_ref)] - if content_initial != content_copy: - return CONTENTS_MISMATCH - - return CMP_OK - - def turnoff_pathman(node): - node.psql('initial', 'alter system set pg_pathman.enable to off') - node.reload() - - def turnon_pathman(node): - node.psql('initial', 'alter system set pg_pathman.enable to on') - node.psql('copy', 'alter system set pg_pathman.enable to on') - node.psql('initial', 'alter system set pg_pathman.override_copy to off') - node.psql('copy', 'alter system set pg_pathman.override_copy to off') - node.reload() - - # Test dump/restore from init database to copy functionality - test_params = [ - (None, - None, - [node.get_bin_path("pg_dump"), - "-p {}".format(node.port), - "initial"], - [node.get_bin_path("psql"), - "-p {}".format(node.port), - "copy"], - cmp_full), # dump as plain text and restore via COPY - (turnoff_pathman, - turnon_pathman, - [node.get_bin_path("pg_dump"), - "-p {}".format(node.port), - "--inserts", - "initial"], - [node.get_bin_path("psql"), - "-p {}".format(node.port), - "copy"], - cmp_full), # dump as plain text and restore via INSERTs - (None, - None, - [node.get_bin_path("pg_dump"), - "-p {}".format(node.port), - "--format=custom", - "initial"], - [node.get_bin_path("pg_restore"), - "-p {}".format(node.port), - "--dbname=copy"], - cmp_full), # dump in archive format - ] - - try: - FNULL = open(os.devnull, 'w') - - for preproc, postproc, pg_dump_params, pg_restore_params, cmp_dbs in test_params: - - dump_restore_cmd = " | ".join((' '.join(pg_dump_params), ' '.join(pg_restore_params))) - - if (preproc != None): - preproc(node) - - # transfer and restore data - p1 = subprocess.Popen(pg_dump_params, stdout=subprocess.PIPE) - stdoutdata, _ = p1.communicate() - p2 = subprocess.Popen(pg_restore_params, stdin=subprocess.PIPE, - stdout=FNULL, stderr=FNULL) - p2.communicate(input=stdoutdata) - - if (postproc != None): - postproc(node) - - # check validity of data - with node.connect('initial') as con1, node.connect('copy') as con2: - - # compare plans and contents of initial and copy - cmp_result = cmp_dbs(con1, con2) - self.assertNotEqual(cmp_result, PLANS_MISMATCH, - "mismatch in plans of select query on partitioned tables under the command: %s" % dump_restore_cmd) - self.assertNotEqual(cmp_result, CONTENTS_MISMATCH, - "mismatch in contents of partitioned tables under the command: %s" % dump_restore_cmd) - - # compare enable_parent flag and callback function - config_params_query = """ - select partrel, enable_parent, init_callback from pathman_config_params - """ - config_params_initial, config_params_copy = {}, {} - for row in con1.execute(config_params_query): - config_params_initial[row[0]] = row[1:] - for row in con2.execute(config_params_query): - config_params_copy[row[0]] = row[1:] - self.assertEqual(config_params_initial, config_params_copy, \ - "mismatch in pathman_config_params under the command: %s" % dump_restore_cmd) - - # compare constraints on each partition - constraints_query = """ - select r.relname, c.conname, c.consrc from - pg_constraint c join pg_class r on c.conrelid=r.oid - where relname similar to '(range|hash)_partitioned_\d+' - """ - constraints_initial, constraints_copy = {}, {} - for row in con1.execute(constraints_query): - constraints_initial[row[0]] = row[1:] - for row in con2.execute(constraints_query): - constraints_copy[row[0]] = row[1:] - self.assertEqual(constraints_initial, constraints_copy, \ - "mismatch in partitions' constraints under the command: %s" % dump_restore_cmd) - - # clear copy database - node.psql('copy', 'drop schema public cascade') - node.psql('copy', 'create schema public') - node.psql('copy', 'drop extension pg_pathman cascade') - - except: - raise - finally: - FNULL.close() - - # Stop instance and finish work - node.stop() - node.cleanup() - - def test_concurrent_detach(self): - """Test concurrent detach partition with contiguous tuple inserting and spawning new partitions""" - - # Init parameters - num_insert_workers = 8 - detach_timeout = 0.1 # time in sec between successive inserts and detachs - num_detachs = 100 # estimated number of detachs - inserts_advance = 1 # abvance in sec of inserts process under detachs - test_interval = int(math.ceil(detach_timeout * num_detachs)) - - insert_pgbench_script = os.path.dirname(os.path.realpath(__file__)) \ - + "/pgbench_scripts/insert_current_timestamp.pgbench" - detach_pgbench_script = os.path.dirname(os.path.realpath(__file__)) \ - + "/pgbench_scripts/detachs_in_timeout.pgbench" - - # Check pgbench scripts on existance - self.assertTrue(os.path.isfile(insert_pgbench_script), - msg="pgbench script with insert timestamp doesn't exist") - self.assertTrue(os.path.isfile(detach_pgbench_script), - msg="pgbench script with detach letfmost partition doesn't exist") - - # Create and start new instance - node = self.start_new_pathman_cluster(allows_streaming=False) - - # Create partitioned table for testing that spawns new partition on each next *detach_timeout* sec - with node.connect() as con0: - con0.begin() - con0.execute('create table ts_range_partitioned(ts timestamp not null)') - con0.execute("select create_range_partitions('ts_range_partitioned', 'ts', current_timestamp, interval '%f', 1)" % detach_timeout) - con0.commit() - - # Run in background inserts and detachs processes - FNULL = open(os.devnull, 'w') - - # init pgbench's utility tables - init_pgbench = node.pgbench(stdout=FNULL, stderr=FNULL, options=["-i"]) - init_pgbench.wait() - - inserts = node.pgbench(stdout=FNULL, stderr=subprocess.PIPE, options=[ - "-j", "%i" % num_insert_workers, - "-c", "%i" % num_insert_workers, - "-f", insert_pgbench_script, - "-T", "%i" % (test_interval+inserts_advance) - ]) - time.sleep(inserts_advance) - detachs = node.pgbench(stdout=FNULL, stderr=FNULL, options=[ - "-D", "timeout=%f" % detach_timeout, - "-f", detach_pgbench_script, - "-T", "%i" % test_interval - ]) - - # Wait for completion of processes - _, stderrdata = inserts.communicate() - detachs.wait() - - # Obtain error log from inserts process - self.assertIsNone(re.search("ERROR|FATAL|PANIC", str(stderrdata)), - msg="Race condition between detach and concurrent inserts with append partition is expired") - - # Stop instance and finish work - node.stop() - node.cleanup() - FNULL.close() +class PartitioningTests(unittest.TestCase): + def setUp(self): + self.setup_cmd = [ + "create table abc(id serial, t text)", + "insert into abc select generate_series(1, 300000)", + "select create_hash_partitions('abc', 'id', 3, partition_data := false)", + ] + + def tearDown(self): + stop_all() + + def start_new_pathman_cluster(self, name='test', allows_streaming=False): + node = get_new_node(name) + node.init(allows_streaming=allows_streaming) + node.append_conf("postgresql.conf", "shared_preload_libraries='pg_pathman'\n") + node.start() + node.psql('postgres', 'create extension pg_pathman') + return node + + def init_test_data(self, node): + """ Initialize pg_pathman extension and test data """ + for cmd in self.setup_cmd: + node.safe_psql('postgres', cmd) + + def catchup_replica(self, master, replica): + """ Wait until replica synchronizes with master """ + if version >= 100000: + wait_lsn_query = \ + 'SELECT pg_current_wal_lsn() <= replay_lsn ' \ + 'FROM pg_stat_replication WHERE application_name = \'%s\'' \ + % replica.name + else: + wait_lsn_query = \ + 'SELECT pg_current_xlog_location() <= replay_location ' \ + 'FROM pg_stat_replication WHERE application_name = \'%s\'' \ + % replica.name + master.poll_query_until('postgres', wait_lsn_query) + + def test_concurrent(self): + """ Test concurrent partitioning """ + + node = self.start_new_pathman_cluster() + self.init_test_data(node) + + node.psql('postgres', "select partition_table_concurrently('abc')") + + while True: + # update some rows to check for deadlocks + node.safe_psql('postgres', """ + update abc set t = 'test' + where id in (select (random() * 300000)::int + from generate_series(1, 3000)) + """) + + count = node.execute('postgres', """ + select count(*) from pathman_concurrent_part_tasks + """) + + # if there is no active workers then it means work is done + if count[0][0] == 0: + break + time.sleep(1) + + data = node.execute('postgres', 'select count(*) from only abc') + self.assertEqual(data[0][0], 0) + data = node.execute('postgres', 'select count(*) from abc') + self.assertEqual(data[0][0], 300000) + + node.stop() + + def test_replication(self): + """ Test how pg_pathman works with replication """ + + node = get_new_node('master') + replica = get_new_node('repl') + + # initialize master server + node = self.start_new_pathman_cluster(allows_streaming=True) + node.backup('my_backup') + + # initialize replica from backup + replica.init_from_backup(node, 'my_backup', has_streaming=True) + replica.start() + + # initialize pg_pathman extension and some test data + self.init_test_data(node) + + # wait until replica catches up + self.catchup_replica(node, replica) + + # check that results are equal + self.assertEqual( + node.psql('postgres', 'explain (costs off) select * from abc'), + replica.psql('postgres', 'explain (costs off) select * from abc')) + + # enable parent and see if it is enabled in replica + node.psql('postgres', 'select enable_parent(\'abc\'') + + self.catchup_replica(node, replica) + self.assertEqual( + node.psql('postgres', 'explain (costs off) select * from abc'), + replica.psql('postgres', 'explain (costs off) select * from abc')) + self.assertEqual( + node.psql('postgres', 'select * from abc'), + replica.psql('postgres', 'select * from abc')) + self.assertEqual( + node.execute('postgres', 'select count(*) from abc')[0][0], 300000) + + # check that UPDATE in pathman_config_params invalidates cache + node.psql('postgres', 'update pathman_config_params set enable_parent = false') + self.catchup_replica(node, replica) + self.assertEqual( + node.psql('postgres', 'explain (costs off) select * from abc'), + replica.psql('postgres', 'explain (costs off) select * from abc')) + self.assertEqual( + node.psql('postgres', 'select * from abc'), + replica.psql('postgres', 'select * from abc')) + self.assertEqual(node.execute('postgres', 'select count(*) from abc')[0][0], 0) + + def test_locks(self): + """ + Test that a session trying to create new partitions + waits for other sessions if they are doing the same + """ + + import threading + import time + + class Flag: + def __init__(self, value): + self.flag = value + + def set(self, value): + self.flag = value + + def get(self): + return self.flag + + # There is one flag for each thread which shows if thread have done its work + flags = [Flag(False) for i in range(3)] + + # All threads synchronize though this lock + lock = threading.Lock() + + # Define thread function + def add_partition(node, flag, query): + """ + We expect that this query will wait until + another session commits or rolls back + """ + node.safe_psql('postgres', query) + with lock: + flag.set(True) + + # Initialize master server + node = get_new_node('master') + + node.init() + node.append_conf("postgresql.conf", "shared_preload_libraries='pg_pathman'\n") + node.start() + node.safe_psql( + 'postgres', + 'create extension pg_pathman; ' + 'create table abc(id serial, t text); ' + + 'insert into abc select generate_series(1, 100000); ' + + 'select create_range_partitions(\'abc\', \'id\', 1, 50000);') + + # Start transaction that will create partition + con = node.connect() + con.begin() + con.execute('select append_range_partition(\'abc\')') + + # Start threads that suppose to add new partitions and wait some + # time + query = [ + 'select prepend_range_partition(\'abc\')', + 'select append_range_partition(\'abc\')', + 'select add_range_partition(\'abc\', 500000, 550000)', + ] + threads = [] + for i in range(3): + thread = threading.Thread( + target=add_partition, args=(node, flags[i], query[i])) + threads.append(thread) + thread.start() + time.sleep(3) + + # This threads should wait until current transaction finished + with lock: + for i in range(3): + self.assertEqual(flags[i].get(), False) + + # Commit transaction. Since then other sessions can create + # partitions + con.commit() + + # Now wait until each thread finishes + for thread in threads: + thread.join() + + # Check flags, it should be true which means that threads are + # finished + with lock: + for i in range(3): + self.assertEqual(flags[i].get(), True) + + # Check that all partitions are created + self.assertEqual( + node.safe_psql( + 'postgres', + 'select count(*) from pg_inherits where inhparent=\'abc\'::regclass'), + b'6\n') + + def test_tablespace(self): + """ Check tablespace support """ + + def check_tablespace(node, tablename, tablespace): + res = node.execute('postgres', + 'select get_tablespace(\'{}\')'.format(tablename)) + if len(res) == 0: + return False + + return res[0][0] == tablespace + + node = get_new_node('master') + node.init() + node.append_conf('postgresql.conf', 'shared_preload_libraries=\'pg_pathman\'\n') + node.start() + node.psql('postgres', 'create extension pg_pathman') + + # create tablespace + path = os.path.join(node.data_dir, 'test_space_location') + os.mkdir(path) + node.psql('postgres', 'create tablespace test_space location \'{}\''.format(path)) + + # create table in this tablespace + node.psql('postgres', 'create table abc(a serial, b int) tablespace test_space') + + # create three partitions. Excpect that they will be created in the + # same tablespace as the parent table + node.psql('postgres', 'select create_range_partitions(\'abc\', \'a\', 1, 10, 3)') + self.assertTrue(check_tablespace(node, 'abc', 'test_space')) + + # check tablespace for appended partition + node.psql('postgres', 'select append_range_partition(\'abc\', \'abc_appended\')') + self.assertTrue(check_tablespace(node, 'abc_appended', 'test_space')) + + # check tablespace for prepended partition + node.psql('postgres', + 'select prepend_range_partition(\'abc\', \'abc_prepended\')') + self.assertTrue(check_tablespace(node, 'abc_prepended', 'test_space')) + + # check tablespace for prepended partition + node.psql('postgres', + 'select add_range_partition(\'abc\', 41, 51, \'abc_added\')') + self.assertTrue(check_tablespace(node, 'abc_added', 'test_space')) + + # check tablespace for split + node.psql('postgres', + 'select split_range_partition(\'abc_added\', 45, \'abc_splitted\')') + self.assertTrue(check_tablespace(node, 'abc_splitted', 'test_space')) + + # now let's specify tablespace explicitly + node.psql( + 'postgres', + 'select append_range_partition(\'abc\', \'abc_appended_2\', \'pg_default\')') + node.psql( + 'postgres', + 'select prepend_range_partition(\'abc\', \'abc_prepended_2\', \'pg_default\')' + ) + node.psql( + 'postgres', + 'select add_range_partition(\'abc\', 61, 71, \'abc_added_2\', \'pg_default\')' + ) + node.psql( + 'postgres', + 'select split_range_partition(\'abc_added_2\', 65, \'abc_splitted_2\', \'pg_default\')' + ) + + # yapf: disable + self.assertTrue(check_tablespace(node, 'abc_appended_2', 'pg_default')) + self.assertTrue(check_tablespace(node, 'abc_prepended_2', 'pg_default')) + self.assertTrue(check_tablespace(node, 'abc_added_2', 'pg_default')) + self.assertTrue(check_tablespace(node, 'abc_splitted_2', 'pg_default')) + + @if_fdw_enabled + def test_foreign_table(self): + """ Test foreign tables """ + + # Start master server + master = get_new_node('test') + master.init() + master.append_conf('postgresql.conf', """ + shared_preload_libraries='pg_pathman, postgres_fdw'\n + """) + master.start() + master.psql('postgres', 'create extension pg_pathman') + master.psql('postgres', 'create extension postgres_fdw') + + # RANGE partitioning test with FDW: + # - create range partitioned table in master + # - create foreign server + # - create foreign table and insert some data into it + # - attach foreign table to partitioned one + # - try inserting data into foreign partition via parent + # - drop partitions + master.psql('postgres', """ + create table abc(id serial, name text); + select create_range_partitions('abc', 'id', 0, 10, 2) + """) + + # Current user name (needed for user mapping) + username = master.execute('postgres', 'select current_user')[0][0] + + # Start foreign server + fserv = get_new_node('fserv') + fserv.init().start() + fserv.safe_psql('postgres', "create table ftable(id serial, name text)") + fserv.safe_psql('postgres', "insert into ftable values (25, 'foreign')") + + # Create foreign table and attach it to partitioned table + master.safe_psql('postgres', """ + create server fserv + foreign data wrapper postgres_fdw + options (dbname 'postgres', host '127.0.0.1', port '{}') + """.format(fserv.port)) + + master.safe_psql('postgres', """ + create user mapping for {0} server fserv + options (user '{0}') + """.format(username)) + + master.safe_psql('postgres', """ + import foreign schema public limit to (ftable) + from server fserv into public + """) + + master.safe_psql( + 'postgres', + "select attach_range_partition('abc', 'ftable', 20, 30)") + + # Check that table attached to partitioned table + self.assertEqual( + master.safe_psql('postgres', 'select * from ftable'), + b'25|foreign\n') + + # Check that we can successfully insert new data into foreign partition + master.safe_psql('postgres', 'insert into abc values (26, \'part\')') + self.assertEqual( + master.safe_psql('postgres', 'select * from ftable order by id'), + b'25|foreign\n26|part\n') + + # Testing drop partitions (including foreign partitions) + master.safe_psql('postgres', 'select drop_partitions(\'abc\')') + + # HASH partitioning with FDW: + # - create hash partitioned table in master + # - create foreign table + # - replace local partition with foreign one + # - insert data + # - drop partitions + master.psql('postgres', """ + create table hash_test(id serial, name text); + select create_hash_partitions('hash_test', 'id', 2) + """) + fserv.safe_psql('postgres', + 'create table f_hash_test(id serial, name text)') + + master.safe_psql('postgres', """ + import foreign schema public limit to (f_hash_test) + from server fserv into public + """) + master.safe_psql('postgres', """ + select replace_hash_partition('hash_test_1', 'f_hash_test') + """) + master.safe_psql('postgres', + 'insert into hash_test select generate_series(1,10)') + + self.assertEqual( + master.safe_psql('postgres', 'select * from hash_test'), + b'1|\n2|\n5|\n6|\n8|\n9|\n3|\n4|\n7|\n10|\n') + master.safe_psql('postgres', "select drop_partitions('hash_test')") + + @if_fdw_enabled + def test_parallel_nodes(self): + """ Test parallel queries under partitions """ + + import json + + # Init and start postgres instance with preload pg_pathman module + node = get_new_node('test') + node.init() + node.append_conf( + 'postgresql.conf', + 'shared_preload_libraries=\'pg_pathman, postgres_fdw\'\n') + node.start() + + # Check version of postgres server + # If version < 9.6 skip all tests for parallel queries + if version < 90600: + return + + # Prepare test database + node.psql('postgres', 'create extension pg_pathman') + + node.psql('postgres', """ + create table range_partitioned as + select generate_series(1, 1e4::integer) i; + + alter table range_partitioned alter column i set not null; + select create_range_partitions('range_partitioned', 'i', 1, 1e3::integer); + + create table hash_partitioned as + select generate_series(1, 1e4::integer) i; + + alter table hash_partitioned alter column i set not null; + select create_hash_partitions('hash_partitioned', 'i', 10); + """) + + # create statistics for both partitioned tables + node.psql('postgres', 'vacuum analyze') + + node.psql('postgres', """ + create or replace function query_plan(query text) + returns jsonb as $$ + declare + plan jsonb; + begin + execute 'explain (costs off, format json)' || query into plan; + return plan; + end; + $$ language plpgsql; + """) + + # Test parallel select + with node.connect() as con: + con.execute('set max_parallel_workers_per_gather = 2') + if version >= 100000: + con.execute('set min_parallel_table_scan_size = 0') + else: + con.execute('set min_parallel_relation_size = 0') + con.execute('set parallel_setup_cost = 0') + con.execute('set parallel_tuple_cost = 0') + + # Check parallel aggregate plan + test_query = 'select count(*) from range_partitioned where i < 1500' + plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] + expected = json.loads(""" + [ + { + "Plan": { + "Node Type": "Aggregate", + "Strategy": "Plain", + "Partial Mode": "Finalize", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Gather", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Workers Planned": 2, + "Single Copy": false, + "Plans": [ + { + "Node Type": "Aggregate", + "Strategy": "Plain", + "Partial Mode": "Partial", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Append", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_2", + "Alias": "range_partitioned_2", + "Filter": "(i < 1500)" + }, + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_1", + "Alias": "range_partitioned_1" + } + ] + } + ] + } + ] + } + ] + } + } + ] + """) + self.assertEqual(ordered(plan), ordered(expected)) + + # Check count of returned tuples + count = con.execute( + 'select count(*) from range_partitioned where i < 1500')[0][0] + self.assertEqual(count, 1499) + + # Check simple parallel seq scan plan with limit + test_query = 'select * from range_partitioned where i < 1500 limit 5' + plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] + expected = json.loads(""" + [ + { + "Plan": { + "Node Type": "Limit", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Gather", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Workers Planned": 2, + "Single Copy": false, + "Plans": [ + { + "Node Type": "Append", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_2", + "Alias": "range_partitioned_2", + "Filter": "(i < 1500)" + }, + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_1", + "Alias": "range_partitioned_1" + } + ] + } + ] + } + ] + } + } + ] + """) + self.assertEqual(ordered(plan), ordered(expected)) + + # Check tuples returned by query above + res_tuples = con.execute( + 'select * from range_partitioned where i < 1500 limit 5') + res_tuples = sorted(map(lambda x: x[0], res_tuples)) + expected = [1, 2, 3, 4, 5] + self.assertEqual(res_tuples, expected) + + # Check the case when none partition is selected in result plan + test_query = 'select * from range_partitioned where i < 1' + plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] + expected = json.loads(""" + [ + { + "Plan": { + "Node Type": "Result", + "Parallel Aware": false, + "One-Time Filter": "false" + } + } + ] + """) + self.assertEqual(ordered(plan), ordered(expected)) + + # Remove all objects for testing + node.psql('postgres', 'drop table range_partitioned cascade') + node.psql('postgres', 'drop table hash_partitioned cascade') + node.psql('postgres', 'drop extension pg_pathman cascade') + + # Stop instance and finish work + node.stop() + node.cleanup() + + def test_conc_part_creation_insert(self): + """ Test concurrent partition creation on INSERT """ + + # Create and start new instance + node = self.start_new_pathman_cluster(allows_streaming=False) + + # Create table 'ins_test' and partition it + with node.connect() as con0: + # yapf: disable + con0.begin() + con0.execute("create table ins_test(val int not null)") + con0.execute("insert into ins_test select generate_series(1, 50)") + con0.execute("select create_range_partitions('ins_test', 'val', 1, 10)") + con0.commit() + + # Create two separate connections for this test + with node.connect() as con1, node.connect() as con2: + + # Thread for connection #2 (it has to wait) + def con2_thread(): + con2.execute('insert into ins_test values(51)') + con2.commit() + + # Step 1: lock partitioned table in con1 + con1.begin() + con1.execute('select count(*) from ins_test') # load pathman's cache + con1.execute('lock table ins_test in share update exclusive mode') + + # Step 2: try inserting new value in con2 (waiting) + con2.begin() + con2.execute('select count(*) from ins_test') # load pathman's cache + t = threading.Thread(target=con2_thread) + t.start() + + # Step 3: wait until 't' locks + while True: + with node.connect() as con0: + locks = con0.execute(""" + select count(*) from pg_locks where granted = 'f' + """) + + if int(locks[0][0]) > 0: + break + + # Step 4: try inserting new value in con1 (success, unlock) + con1.execute('insert into ins_test values(52)') + con1.commit() + + # Step 5: wait for con2 + t.join() + + rows = con1.execute(""" + select * from pathman_partition_list + where parent = 'ins_test'::regclass + order by range_min, range_max + """) + + # check number of partitions + self.assertEqual(len(rows), 6) + + # check range_max of partitions + self.assertEqual(int(rows[0][5]), 11) + self.assertEqual(int(rows[1][5]), 21) + self.assertEqual(int(rows[2][5]), 31) + self.assertEqual(int(rows[3][5]), 41) + self.assertEqual(int(rows[4][5]), 51) + self.assertEqual(int(rows[5][5]), 61) + + # Stop instance and finish work + node.stop() + node.cleanup() + + def test_conc_part_merge_insert(self): + """ Test concurrent merge_range_partitions() + INSERT """ + + # Create and start new instance + node = self.start_new_pathman_cluster(allows_streaming=False) + + # Create table 'ins_test' and partition it + with node.connect() as con0: + # yapf: disable + con0.begin() + con0.execute("create table ins_test(val int not null)") + con0.execute("select create_range_partitions('ins_test', 'val', 1, 10, 10)") + con0.commit() + + # Create two separate connections for this test + with node.connect() as con1, node.connect() as con2: + + # Thread for connection #2 (it has to wait) + def con2_thread(): + con2.begin() + con2.execute('insert into ins_test values(20)') + con2.commit() + + # Step 1: initilize con1 + con1.begin() + con1.execute('select count(*) from ins_test') # load pathman's cache + + # Step 2: initilize con2 + con2.begin() + con2.execute('select count(*) from ins_test') # load pathman's cache + con2.commit() # unlock relations + + # Step 3: merge 'ins_test1' + 'ins_test_2' in con1 (success) + con1.execute( + "select merge_range_partitions('ins_test_1', 'ins_test_2')") + + # Step 4: try inserting new value in con2 (waiting) + t = threading.Thread(target=con2_thread) + t.start() + + # Step 5: wait until 't' locks + while True: + with node.connect() as con0: + locks = con0.execute(""" + select count(*) from pg_locks where granted = 'f' + """) + + if int(locks[0][0]) > 0: + break + + # Step 6: finish merge in con1 (success, unlock) + con1.commit() + + # Step 7: wait for con2 + t.join() + + rows = con1.execute("select *, tableoid::regclass::text from ins_test") + + # check number of rows in table + self.assertEqual(len(rows), 1) + + # check value that has been inserted + self.assertEqual(int(rows[0][0]), 20) + + # check partition that was chosen for insert + self.assertEqual(str(rows[0][1]), 'ins_test_1') + + # Stop instance and finish work + node.stop() + node.cleanup() + + def test_pg_dump(self): + """ + Test using dump and restore of partitioned table through pg_dump and pg_restore tools. + + Test strategy: + - test range and hash partitioned tables; + - for each partitioned table check on restorable side the following quantities: + * constraints related to partitioning; + * init callback function and enable parent flag; + * number of rows in parent and child tables; + * plan validity of simple SELECT query under partitioned table; + - check dumping using the following parameters of pg_dump: + * format = plain | custom; + * using of inserts and copy. + - all test cases are carried out on tables half-full with data located in parent part, + the rest of data - in child tables. + """ + + import subprocess + + # Init and start postgres instance with preload pg_pathman module + node = get_new_node('test') + node.init() + node.append_conf('postgresql.conf', """ + shared_preload_libraries=\'pg_pathman\' + pg_pathman.override_copy=false + """) + node.start() + + # Init two databases: initial and copy + node.psql('postgres', 'create database initial') + node.psql('postgres', 'create database copy') + node.psql('initial', 'create extension pg_pathman') + + # Create and fillin partitioned table in initial database + with node.connect('initial') as con: + + # create and initailly fillin tables + con.execute('create table range_partitioned (i integer not null)') + con.execute( + 'insert into range_partitioned select i from generate_series(1, 500) i' + ) + con.execute('create table hash_partitioned (i integer not null)') + con.execute( + 'insert into hash_partitioned select i from generate_series(1, 500) i' + ) + + # partition table keeping data in base table + # enable_parent parameter automatically becames true + con.execute( + 'select create_range_partitions(\'range_partitioned\', \'i\', 1, 200, partition_data := false)' + ) + con.execute( + 'select create_hash_partitions(\'hash_partitioned\', \'i\', 5, false)' + ) + + # fillin child tables with remain data + con.execute( + 'insert into range_partitioned select i from generate_series(501, 1000) i' + ) + con.execute( + 'insert into hash_partitioned select i from generate_series(501, 1000) i' + ) + + # set init callback + con.execute(""" + create or replace function init_partition_stub_callback(args jsonb) + returns void as $$ + begin + end + $$ language plpgsql; + """) + con.execute( + 'select set_init_callback(\'range_partitioned\', \'init_partition_stub_callback(jsonb)\')' + ) + con.execute( + 'select set_init_callback(\'hash_partitioned\', \'init_partition_stub_callback(jsonb)\')' + ) + + # turn off enable_parent option + con.execute( + 'select set_enable_parent(\'range_partitioned\', false)') + con.execute('select set_enable_parent(\'hash_partitioned\', false)') + + con.commit() + + # compare strategies + CMP_OK, PLANS_MISMATCH, CONTENTS_MISMATCH = range(3) + + def cmp_full(con1, con2): + """ + Compare selection partitions in plan + and contents in partitioned tables + """ + + plan_query = 'explain (costs off, format json) select * from %s' + content_query = 'select * from %s order by i' + table_refs = [ + 'range_partitioned', 'only range_partitioned', + 'hash_partitioned', 'only hash_partitioned' + ] + for table_ref in table_refs: + plan_initial = con1.execute( + plan_query % table_ref)[0][0][0]['Plan'] + plan_copy = con2.execute( + plan_query % table_ref)[0][0][0]['Plan'] + if ordered(plan_initial) != ordered(plan_copy): + return PLANS_MISMATCH + + content_initial = [ + x[0] for x in con1.execute(content_query % table_ref) + ] + content_copy = [ + x[0] for x in con2.execute(content_query % table_ref) + ] + if content_initial != content_copy: + return CONTENTS_MISMATCH + + return CMP_OK + + def turnoff_pathman(node): + node.psql('initial', 'alter system set pg_pathman.enable to off') + node.reload() + + def turnon_pathman(node): + node.psql('initial', 'alter system set pg_pathman.enable to on') + node.psql('copy', 'alter system set pg_pathman.enable to on') + node.psql('initial', + 'alter system set pg_pathman.override_copy to off') + node.psql('copy', + 'alter system set pg_pathman.override_copy to off') + node.reload() + + # Test dump/restore from init database to copy functionality + test_params = [ + (None, None, [ + node.get_bin_path("pg_dump"), "-p {}".format(node.port), + "initial" + ], [node.get_bin_path("psql"), "-p {}".format(node.port), "copy"], + cmp_full), # dump as plain text and restore via COPY + (turnoff_pathman, turnon_pathman, [ + node.get_bin_path("pg_dump"), "-p {}".format(node.port), + "--inserts", "initial" + ], [node.get_bin_path("psql"), "-p {}".format(node.port), "copy"], + cmp_full), # dump as plain text and restore via INSERTs + (None, None, [ + node.get_bin_path("pg_dump"), "-p {}".format(node.port), + "--format=custom", "initial" + ], [ + node.get_bin_path("pg_restore"), "-p {}".format(node.port), + "--dbname=copy" + ], cmp_full), # dump in archive format + ] + + try: + FNULL = open(os.devnull, 'w') + + for preproc, postproc, pg_dump_params, pg_restore_params, cmp_dbs in test_params: + + dump_restore_cmd = " | ".join((' '.join(pg_dump_params), + ' '.join(pg_restore_params))) + + if (preproc != None): + preproc(node) + + # transfer and restore data + p1 = subprocess.Popen(pg_dump_params, stdout=subprocess.PIPE) + stdoutdata, _ = p1.communicate() + p2 = subprocess.Popen( + pg_restore_params, + stdin=subprocess.PIPE, + stdout=FNULL, + stderr=FNULL) + p2.communicate(input=stdoutdata) + + if (postproc != None): + postproc(node) + + # validate data + with node.connect('initial') as con1, \ + node.connect('copy') as con2: + + # compare plans and contents of initial and copy + cmp_result = cmp_dbs(con1, con2) + self.assertNotEqual( + cmp_result, PLANS_MISMATCH, + "mismatch in plans of select query on partitioned tables under the command: %s" + % dump_restore_cmd) + self.assertNotEqual( + cmp_result, CONTENTS_MISMATCH, + "mismatch in contents of partitioned tables under the command: %s" + % dump_restore_cmd) + + # compare enable_parent flag and callback function + config_params_query = """ + select partrel, enable_parent, init_callback from pathman_config_params + """ + config_params_initial, config_params_copy = {}, {} + for row in con1.execute(config_params_query): + config_params_initial[row[0]] = row[1:] + for row in con2.execute(config_params_query): + config_params_copy[row[0]] = row[1:] + self.assertEqual(config_params_initial, config_params_copy, \ + "mismatch in pathman_config_params under the command: %s" % dump_restore_cmd) + + # compare constraints on each partition + constraints_query = """ + select r.relname, c.conname, c.consrc from + pg_constraint c join pg_class r on c.conrelid=r.oid + where relname similar to '(range|hash)_partitioned_\d+' + """ + constraints_initial, constraints_copy = {}, {} + for row in con1.execute(constraints_query): + constraints_initial[row[0]] = row[1:] + for row in con2.execute(constraints_query): + constraints_copy[row[0]] = row[1:] + self.assertEqual(constraints_initial, constraints_copy, \ + "mismatch in partitions' constraints under the command: %s" % dump_restore_cmd) + + # clear copy database + node.psql('copy', 'drop schema public cascade') + node.psql('copy', 'create schema public') + node.psql('copy', 'drop extension pg_pathman cascade') + + except: + raise + finally: + FNULL.close() + + # Stop instance and finish work + node.stop() + node.cleanup() + + def test_concurrent_detach(self): + """ + Test concurrent detach partition with contiguous + tuple inserting and spawning new partitions + """ + + # Init parameters + num_insert_workers = 8 + detach_timeout = 0.1 # time in sec between successive inserts and detachs + num_detachs = 100 # estimated number of detachs + inserts_advance = 1 # abvance in sec of inserts process under detachs + test_interval = int(math.ceil(detach_timeout * num_detachs)) + + insert_pgbench_script = os.path.dirname(os.path.realpath(__file__)) \ + + "/pgbench_scripts/insert_current_timestamp.pgbench" + detach_pgbench_script = os.path.dirname(os.path.realpath(__file__)) \ + + "/pgbench_scripts/detachs_in_timeout.pgbench" + + # Check pgbench scripts on existance + self.assertTrue( + os.path.isfile(insert_pgbench_script), + msg="pgbench script with insert timestamp doesn't exist") + self.assertTrue( + os.path.isfile(detach_pgbench_script), + msg="pgbench script with detach letfmost partition doesn't exist") + + # Create and start new instance + node = self.start_new_pathman_cluster(allows_streaming=False) + + # Create partitioned table for testing that spawns new partition on each next *detach_timeout* sec + with node.connect() as con0: + con0.begin() + con0.execute( + 'create table ts_range_partitioned(ts timestamp not null)') + + # yapf: disable + con0.execute(""" + select create_range_partitions('ts_range_partitioned', + 'ts', + current_timestamp, + interval '%f', + 1) + """ % detach_timeout) + con0.commit() + + # Run in background inserts and detachs processes + FNULL = open(os.devnull, 'w') + + # init pgbench's utility tables + init_pgbench = node.pgbench(stdout=FNULL, stderr=FNULL, options=["-i"]) + init_pgbench.wait() + + inserts = node.pgbench( + stdout=FNULL, + stderr=subprocess.PIPE, + options=[ + "-j", + "%i" % num_insert_workers, "-c", + "%i" % num_insert_workers, "-f", insert_pgbench_script, "-T", + "%i" % (test_interval + inserts_advance) + ]) + time.sleep(inserts_advance) + detachs = node.pgbench( + stdout=FNULL, + stderr=FNULL, + options=[ + "-D", + "timeout=%f" % detach_timeout, "-f", detach_pgbench_script, + "-T", + "%i" % test_interval + ]) + + # Wait for completion of processes + _, stderrdata = inserts.communicate() + detachs.wait() + + # Obtain error log from inserts process + self.assertIsNone( + re.search("ERROR|FATAL|PANIC", str(stderrdata)), + msg=""" + Race condition between detach and concurrent + inserts with append partition is expired + """) + + # Stop instance and finish work + node.stop() + node.cleanup() + FNULL.close() -if __name__ == "__main__": - unittest.main() +if __name__ == "__main__": + unittest.main() From c2ed28a6bdebdf743774ca3d1c1763c99b652f12 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 24 Jul 2017 15:36:49 +0300 Subject: [PATCH 089/528] new test test_conc_part_drop_runtime_append() --- tests/python/partitioning_test.py | 100 ++++++++++++++++++++++++++++++ 1 file changed, 100 insertions(+) diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 52b96d87..207fd664 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -629,6 +629,106 @@ def test_parallel_nodes(self): node.stop() node.cleanup() + def test_conc_part_drop_runtime_append(self): + """ Test concurrent partition drop + SELECT (RuntimeAppend) """ + + # Create and start new instance + node = self.start_new_pathman_cluster(allows_streaming=False) + + # Create table 'drop_test' and partition it + with node.connect() as con0: + # yapf: disable + con0.begin() + con0.execute("create table drop_test(val int not null)") + con0.execute("insert into drop_test select generate_series(1, 1000)") + con0.execute("select create_range_partitions('drop_test', 'val', 1, 10)") + con0.commit() + + # Create two separate connections for this test + with node.connect() as con1, node.connect() as con2: + + # Thread for connection #2 (it has to wait) + def con2_thread(): + con1.begin() + con2.execute('set enable_hashjoin = f') + con2.execute('set enable_mergejoin = f') + + res = con2.execute(""" + explain (analyze, costs off, timing off) + select * from drop_test + where val = any (select generate_series(1, 40, 34)) + """) # query selects from drop_test_1 and drop_test_4 + + con2.commit() + + has_runtime_append = False + has_drop_test_1 = False + has_drop_test_4 = False + + for row in res: + if row[0].find('RuntimeAppend') >= 0: + has_runtime_append = True + continue + + if row[0].find('drop_test_1') >= 0: + has_drop_test_1 = True + continue + + if row[0].find('drop_test_4') >= 0: + has_drop_test_4 = True + continue + + self.assertTrue(has_runtime_append) + self.assertFalse(has_drop_test_1) + self.assertTrue(has_drop_test_4) + + # Step 1: cache partitioned table in con1 + con1.begin() + con1.execute('select count(*) from drop_test') # load pathman's cache + con1.commit() + + # Step 2: cache partitioned table in con2 + con2.begin() + con2.execute('select count(*) from drop_test') # load pathman's cache + con2.commit() + + # Step 3: drop first partition of 'drop_test' + con1.begin() + con1.execute('drop table drop_test_1') + + # Step 4: try executing select (RuntimeAppend) + t = threading.Thread(target=con2_thread) + t.start() + + # Step 5: wait until 't' locks + while True: + with node.connect() as con0: + locks = con0.execute(""" + select count(*) from pg_locks where granted = 'f' + """) + + if int(locks[0][0]) > 0: + break + + # Step 6: commit 'DROP TABLE' + con1.commit() + + # Step 7: wait for con2 + t.join() + + rows = con1.execute(""" + select * from pathman_partition_list + where parent = 'drop_test'::regclass + order by range_min, range_max + """) + + # check number of partitions + self.assertEqual(len(rows), 99) + + # Stop instance and finish work + node.stop() + node.cleanup() + def test_conc_part_creation_insert(self): """ Test concurrent partition creation on INSERT """ From 61c01f603ce6938ea89e5ae5aa944fb4f6f2cdf1 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 24 Jul 2017 16:22:58 +0300 Subject: [PATCH 090/528] improve test_conc_part_drop_runtime_append() --- tests/python/partitioning_test.py | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 207fd664..32c30492 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -647,6 +647,14 @@ def test_conc_part_drop_runtime_append(self): # Create two separate connections for this test with node.connect() as con1, node.connect() as con2: + try: + from queue import Queue + except ImportError: + from Queue import Queue + + # return values from thread + queue = Queue() + # Thread for connection #2 (it has to wait) def con2_thread(): con1.begin() @@ -678,9 +686,9 @@ def con2_thread(): has_drop_test_4 = True continue - self.assertTrue(has_runtime_append) - self.assertFalse(has_drop_test_1) - self.assertTrue(has_drop_test_4) + # return all values in tuple + queue.put((has_runtime_append, has_drop_test_1, has_drop_test_4)) + # Step 1: cache partitioned table in con1 con1.begin() @@ -725,6 +733,12 @@ def con2_thread(): # check number of partitions self.assertEqual(len(rows), 99) + # check RuntimeAppend + selected partitions + (has_runtime_append, has_drop_test_1, has_drop_test_4) = queue.get() + self.assertTrue(has_runtime_append) + self.assertFalse(has_drop_test_1) + self.assertTrue(has_drop_test_4) + # Stop instance and finish work node.stop() node.cleanup() From 90b4f5770a1bc7d587ee72619482306a3ad9fd1d Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 24 Jul 2017 19:36:30 +0300 Subject: [PATCH 091/528] Make special container for postgres --with-cassert --- Dockerfile.tmpl | 2 +- make_images.py | 110 ++++++++++++++++++++++++++++++++++++------------ 2 files changed, 83 insertions(+), 29 deletions(-) diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index e5663156..0504dd5a 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -1,4 +1,4 @@ -FROM postgres:${PG_VERSION}-alpine +FROM ${PG_IMAGE} ENV LANG=C.UTF-8 PGDATA=/pg/data diff --git a/make_images.py b/make_images.py index b63b3fbe..a8ba8112 100755 --- a/make_images.py +++ b/make_images.py @@ -1,10 +1,64 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 +import os import subprocess import getpass +import requests +import tempfile + +from urllib.parse import urljoin +from urllib.request import urlopen DOCKER_ID = 'pathman' -pg_versions = ['9.5','9.6','10'] +ALPINE_BASE_URL = 'https://raw.githubusercontent.com/docker-library/postgres/master/9.6/alpine/' +ALPINE_ENTRYPOINT = 'docker-entrypoint.sh' +ALPINE_PATCH = b''' +diff --git a/Dockerfile b/Dockerfile +index 9878023..ba215bc 100644 +--- a/Dockerfile ++++ b/Dockerfile +@@ -80,6 +80,7 @@ RUN set -ex \\ + # configure options taken from: + # https://anonscm.debian.org/cgit/pkg-postgresql/postgresql.git/tree/debian/rules?h=9.5 + && ./configure \\ ++ --enable-cassert \\ + --build="$gnuArch" \\ + # "/usr/src/postgresql/src/backend/access/common/tupconvert.c:105: undefined reference to `libintl_gettext'" + # --enable-nls \\ +''' +CUSTOM_IMAGE_NAME = "%s/postgres_stable" % DOCKER_ID + +def make_alpine_image(image_name): + dockerfile = urlopen(urljoin(ALPINE_BASE_URL, 'Dockerfile')).read() + entrypoint_sh = urlopen(urljoin(ALPINE_BASE_URL, ALPINE_ENTRYPOINT)).read() + + with tempfile.TemporaryDirectory() as tmpdir: + print("Creating build in %s" % tmpdir) + with open(os.path.join(tmpdir, 'Dockerfile'), 'w') as f: + f.write(dockerfile.decode()) + + with open(os.path.join(tmpdir, ALPINE_ENTRYPOINT), 'w') as f: + f.write(entrypoint_sh.decode()) + + with open(os.path.join(tmpdir, 'cassert.patch'), 'w') as f: + f.write(ALPINE_PATCH.decode()) + + subprocess.check_output(["git", "apply", "cassert.patch"], cwd=tmpdir) + print("patch applied") + subprocess.check_output(["docker", "build", ".", '-t', image_name], cwd=tmpdir) + print("build ok: ", image_name) + subprocess.check_output(['docker', 'push', image_name], + stderr=subprocess.STDOUT) + print("upload ok:", image_name) + +make_alpine_image(CUSTOM_IMAGE_NAME) + +pg_containers = [ + ('pg95', 'postgres:9.5-alpine'), + ('pg96', 'postgres:9.6-alpine'), + ('pg10', 'postgres:10-alpine'), + ('pg96_ca', CUSTOM_IMAGE_NAME), +] image_types = { 'clang_check_code': { @@ -30,32 +84,32 @@ travis_conf = [] print("") -for pg_version in pg_versions: - pgname = 'pg%s' % pg_version.replace('.', '') - for key, variables in image_types.items(): - image_name = '%s/%s_%s' % (DOCKER_ID, pgname, key) - with open('Dockerfile', 'w') as out: - with open('Dockerfile.tmpl', 'r') as f: - for line in f: - line = line.replace('${PG_VERSION}', pg_version) - for key, value in variables.items(): - varname = '${%s}' % key - line = line.replace(varname, value) - - out.write(line) - - args = [ - 'docker', - 'build', - '-t', image_name, - '.' - ] - subprocess.check_output(args, stderr=subprocess.STDOUT) - print("build ok:", image_name) - subprocess.check_output(['docker', 'push', image_name], - stderr=subprocess.STDOUT) - print("upload ok:", image_name) - travis_conf.append(travis_conf_line % image_name) +if __name__ == '__main__': + for pgname, container in pg_containers: + for key, variables in image_types.items(): + image_name = '%s/%s_%s' % (DOCKER_ID, pgname, key) + with open('Dockerfile', 'w') as out: + with open('Dockerfile.tmpl', 'r') as f: + for line in f: + line = line.replace('${PG_IMAGE}', container) + for key, value in variables.items(): + varname = '${%s}' % key + line = line.replace(varname, value) + + out.write(line) + + args = [ + 'docker', + 'build', + '-t', image_name, + '.' + ] + subprocess.check_output(args, stderr=subprocess.STDOUT) + print("build ok:", image_name) + subprocess.check_output(['docker', 'push', image_name], + stderr=subprocess.STDOUT) + print("upload ok:", image_name) + travis_conf.append(travis_conf_line % image_name) print("\ntravis configuration") print('\n'.join(travis_conf)) From 0967d410c0ef4a9c7c95bcd6743a4232fa6023fc Mon Sep 17 00:00:00 2001 From: Ildus K Date: Tue, 25 Jul 2017 10:43:17 +0300 Subject: [PATCH 092/528] Add cassert containers to travis --- .travis.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.travis.yml b/.travis.yml index cd41b73e..29f4dff5 100644 --- a/.travis.yml +++ b/.travis.yml @@ -26,3 +26,6 @@ env: - DOCKER_IMAGE=pathman/pg10_clang_check_code - DOCKER_IMAGE=pathman/pg10_cppcheck - DOCKER_IMAGE=pathman/pg10_pathman_tests + - DOCKER_IMAGE=pathman/pg96_ca_clang_check_code + - DOCKER_IMAGE=pathman/pg96_ca_cppcheck + - DOCKER_IMAGE=pathman/pg96_ca_pathman_tests From b0d084f0cce2c2ce8c2970ff345244c208085fdd Mon Sep 17 00:00:00 2001 From: Ildus K Date: Tue, 25 Jul 2017 14:03:01 +0300 Subject: [PATCH 093/528] Use pg10 for custom container --- .travis.yml | 6 +++--- make_images.py | 33 ++++++++++++++++++--------------- 2 files changed, 21 insertions(+), 18 deletions(-) diff --git a/.travis.yml b/.travis.yml index 29f4dff5..3ca602c2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -26,6 +26,6 @@ env: - DOCKER_IMAGE=pathman/pg10_clang_check_code - DOCKER_IMAGE=pathman/pg10_cppcheck - DOCKER_IMAGE=pathman/pg10_pathman_tests - - DOCKER_IMAGE=pathman/pg96_ca_clang_check_code - - DOCKER_IMAGE=pathman/pg96_ca_cppcheck - - DOCKER_IMAGE=pathman/pg96_ca_pathman_tests + - DOCKER_IMAGE=pathman/pg10_ca_clang_check_code + - DOCKER_IMAGE=pathman/pg10_ca_cppcheck + - DOCKER_IMAGE=pathman/pg10_ca_pathman_tests diff --git a/make_images.py b/make_images.py index a8ba8112..dc01407e 100755 --- a/make_images.py +++ b/make_images.py @@ -10,21 +10,20 @@ from urllib.request import urlopen DOCKER_ID = 'pathman' -ALPINE_BASE_URL = 'https://raw.githubusercontent.com/docker-library/postgres/master/9.6/alpine/' +ALPINE_BASE_URL = 'https://raw.githubusercontent.com/docker-library/postgres/master/10/alpine/' ALPINE_ENTRYPOINT = 'docker-entrypoint.sh' ALPINE_PATCH = b''' -diff --git a/Dockerfile b/Dockerfile -index 9878023..ba215bc 100644 ---- a/Dockerfile -+++ b/Dockerfile -@@ -80,6 +80,7 @@ RUN set -ex \\ - # configure options taken from: - # https://anonscm.debian.org/cgit/pkg-postgresql/postgresql.git/tree/debian/rules?h=9.5 - && ./configure \\ +--- Dockerfile 2017-07-25 12:43:20.424984422 +0300 ++++ Dockerfile 2017-07-25 12:46:10.279267520 +0300 +@@ -86,6 +86,7 @@ + --enable-integer-datetimes \\ + --enable-thread-safety \\ + --enable-tap-tests \\ + --enable-cassert \\ - --build="$gnuArch" \\ - # "/usr/src/postgresql/src/backend/access/common/tupconvert.c:105: undefined reference to `libintl_gettext'" - # --enable-nls \\ + # skip debugging info -- we want tiny size instead + # --enable-debug \\ + --disable-rpath \\ + ''' CUSTOM_IMAGE_NAME = "%s/postgres_stable" % DOCKER_ID @@ -34,16 +33,20 @@ def make_alpine_image(image_name): with tempfile.TemporaryDirectory() as tmpdir: print("Creating build in %s" % tmpdir) + patch_name = os.path.join(tmpdir, "cassert.patch") + with open(os.path.join(tmpdir, 'Dockerfile'), 'w') as f: f.write(dockerfile.decode()) with open(os.path.join(tmpdir, ALPINE_ENTRYPOINT), 'w') as f: f.write(entrypoint_sh.decode()) - with open(os.path.join(tmpdir, 'cassert.patch'), 'w') as f: + with open(patch_name, 'w') as f: f.write(ALPINE_PATCH.decode()) - subprocess.check_output(["git", "apply", "cassert.patch"], cwd=tmpdir) + with open(patch_name, 'r') as f: + p = subprocess.Popen(["patch", "-p0"], cwd=tmpdir, stdin=subprocess.PIPE) + p.communicate(str.encode(f.read())) print("patch applied") subprocess.check_output(["docker", "build", ".", '-t', image_name], cwd=tmpdir) print("build ok: ", image_name) @@ -57,7 +60,7 @@ def make_alpine_image(image_name): ('pg95', 'postgres:9.5-alpine'), ('pg96', 'postgres:9.6-alpine'), ('pg10', 'postgres:10-alpine'), - ('pg96_ca', CUSTOM_IMAGE_NAME), + ('pg10_ca', CUSTOM_IMAGE_NAME), ] image_types = { From f01f9883ca71ae0702392b9d96be49308f56d067 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Tue, 25 Jul 2017 15:52:02 +0300 Subject: [PATCH 094/528] fix compatibility issues for postgres 10 --- src/partition_creation.c | 14 ++++++++++++++ src/partition_filter.c | 11 ++++++----- src/pl_funcs.c | 6 ++++-- 3 files changed, 24 insertions(+), 7 deletions(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index 6cfadc0e..73b48dfc 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -47,6 +47,9 @@ #include "utils/syscache.h" #include "utils/typcache.h" +#if PG_VERSION_NUM >= 100000 +#include "utils/regproc.h" +#endif static Oid spawn_partitions_val(Oid parent_relid, const Bound *range_bound_min, @@ -1955,18 +1958,29 @@ drop_single_update_trigger_internal(Oid relid, * To avoid warning message about missing trigger we check it beforehand. * and quit if it doesn't */ +#if PG_VERSION_NUM >= 100000 + address = get_object_address(OBJECT_TRIGGER, + (Node *) namelist, + &relation, + AccessExclusiveLock, + true); +#else address = get_object_address(OBJECT_TRIGGER, namelist, NIL, &relation, AccessExclusiveLock, true); +#endif + if (!OidIsValid(address.objectId)) return; /* Actually remove trigger */ n->removeType = OBJECT_TRIGGER; n->objects = list_make1(namelist); +#if PG_VERSION_NUM < 100000 n->arguments = NIL; +#endif n->behavior = DROP_RESTRICT; /* default behavior */ n->missing_ok = true; n->concurrent = false; diff --git a/src/partition_filter.c b/src/partition_filter.c index f6a19f74..668ca32e 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -436,6 +436,7 @@ select_partition_for_insert(ExprContext *econtext, ExprState *expr_state, int nparts; bool isnull; Datum value; + Oid parent = PrelParentRelid(prel); /* Execute expression */ value = ExecEvalExprCompat(expr_state, econtext, &isnull, @@ -453,11 +454,11 @@ select_partition_for_insert(ExprContext *econtext, ExprState *expr_state, elog(ERROR, ERR_PART_ATTR_MULTIPLE); else if (nparts == 0) { - selected_partid = create_partitions_for_value(PrelParentRelid(prel), + selected_partid = create_partitions_for_value(parent, value, prel->ev_type); /* get_pathman_relation_info() will refresh this entry */ - invalidate_pathman_relation_info(PrelParentRelid(prel), NULL); + invalidate_pathman_relation_info(parent, NULL); } else selected_partid = parts[0]; @@ -469,15 +470,15 @@ select_partition_for_insert(ExprContext *econtext, ExprState *expr_state, if (rri_holder == NULL) { /* get_pathman_relation_info() will refresh this entry */ - invalidate_pathman_relation_info(PrelParentRelid(prel), NULL); + invalidate_pathman_relation_info(parent, NULL); /* Get a fresh PartRelationInfo */ - prel = get_pathman_relation_info(PrelParentRelid(prel)); + prel = get_pathman_relation_info(parent); /* Paranoid check (all partitions have vanished) */ if (!prel) elog(ERROR, "table \"%s\" is not partitioned", - get_rel_name_or_relid(PrelParentRelid(prel))); + get_rel_name_or_relid(parent)); } /* If partition has subpartitions */ else if (rri_holder->has_subpartitions) diff --git a/src/pl_funcs.c b/src/pl_funcs.c index fa0a982c..ba286020 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -671,8 +671,10 @@ is_tuple_convertible(PG_FUNCTION_ARGS) map = convert_tuples_by_name(RelationGetDescr(rel1), RelationGetDescr(rel2), ERR_PART_DESC_CONVERT); - /* Now free map */ - pfree(map); + + /* Now free map. Note that map can be NULL if conversion isn't needed */ + if (map) + pfree(map); } PG_CATCH(); { From 618ab2ffef561fd8e4db9e50b5e159cef6602ff4 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Tue, 25 Jul 2017 15:56:55 +0300 Subject: [PATCH 095/528] bring partitioning_test.py from rel_future_beta branch --- tests/python/partitioning_test.py | 2225 +++++++++++++++-------------- 1 file changed, 1184 insertions(+), 1041 deletions(-) diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 6d79dd96..32c30492 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -1,11 +1,10 @@ #!/usr/bin/env python3 # coding: utf-8 - """ - concurrent_partitioning_test.py - Tests concurrent partitioning worker with simultaneous update queries +partitioning_test.py + Various stuff that looks out of place in regression tests - Copyright (c) 2015-2017, Postgres Professional + Copyright (c) 2015-2017, Postgres Professional """ import unittest @@ -20,1051 +19,1195 @@ version = get_config().get("VERSION_NUM") + # Helper function for json equality def ordered(obj): - if isinstance(obj, dict): - return sorted((k, ordered(v)) for k, v in obj.items()) - if isinstance(obj, list): - return sorted(ordered(x) for x in obj) - else: - return obj + if isinstance(obj, dict): + return sorted((k, ordered(v)) for k, v in obj.items()) + if isinstance(obj, list): + return sorted(ordered(x) for x in obj) + else: + return obj def if_fdw_enabled(func): - """To run tests with FDW support set environment variable TEST_FDW=1""" - def wrapper(*args, **kwargs): - if os.environ.get('FDW_DISABLED') != '1': - func(*args, **kwargs) - else: - print('Warning: FDW features tests are disabled, skipping...') - return wrapper + """ To run tests with FDW support, set environment variable TEST_FDW=1 """ + def wrapper(*args, **kwargs): + if os.environ.get('FDW_DISABLED') != '1': + func(*args, **kwargs) + else: + print('Warning: FDW features tests are disabled, skipping...') -class PartitioningTests(unittest.TestCase): + return wrapper - def setUp(self): - self.setup_cmd = [ - 'create table abc(id serial, t text)', - 'insert into abc select generate_series(1, 300000)', - 'select create_hash_partitions(\'abc\', \'id\', 3, partition_data := false)', - ] - - def tearDown(self): - stop_all() - - def start_new_pathman_cluster(self, name='test', allows_streaming=False): - node = get_new_node(name) - node.init(allows_streaming=allows_streaming) - node.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman\'\n') - node.start() - node.psql('postgres', 'create extension pg_pathman') - return node - - def init_test_data(self, node): - """Initialize pg_pathman extension and test data""" - for cmd in self.setup_cmd: - node.safe_psql('postgres', cmd) - - def catchup_replica(self, master, replica): - """Wait until replica synchronizes with master""" - if version >= 100000: - wait_lsn_query = \ - 'SELECT pg_current_wal_lsn() <= replay_lsn ' \ - 'FROM pg_stat_replication WHERE application_name = \'%s\'' \ - % replica.name - else: - wait_lsn_query = \ - 'SELECT pg_current_xlog_location() <= replay_location ' \ - 'FROM pg_stat_replication WHERE application_name = \'%s\'' \ - % replica.name - master.poll_query_until('postgres', wait_lsn_query) - - def test_concurrent(self): - """Tests concurrent partitioning""" - - node = self.start_new_pathman_cluster() - self.init_test_data(node) - - node.psql( - 'postgres', - 'select partition_table_concurrently(\'abc\')') - - while True: - # update some rows to check for deadlocks - node.safe_psql( - 'postgres', - ''' - update abc set t = 'test' - where id in (select (random() * 300000)::int - from generate_series(1, 3000)) - ''') - - count = node.execute( - 'postgres', - 'select count(*) from pathman_concurrent_part_tasks') - - # if there is no active workers then it means work is done - if count[0][0] == 0: - break - time.sleep(1) - - data = node.execute('postgres', 'select count(*) from only abc') - self.assertEqual(data[0][0], 0) - data = node.execute('postgres', 'select count(*) from abc') - self.assertEqual(data[0][0], 300000) - - node.stop() - - def test_replication(self): - """Tests how pg_pathman works with replication""" - node = get_new_node('master') - replica = get_new_node('repl') - - # initialize master server - node = self.start_new_pathman_cluster(allows_streaming=True) - node.backup('my_backup') - - # initialize replica from backup - replica.init_from_backup(node, 'my_backup', has_streaming=True) - replica.start() - - # initialize pg_pathman extension and some test data - self.init_test_data(node) - - # wait until replica catches up - self.catchup_replica(node, replica) - - # check that results are equal - self.assertEqual( - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc') - ) - - # enable parent and see if it is enabled in replica - node.psql('postgres', 'select enable_parent(\'abc\'') - - self.catchup_replica(node, replica) - self.assertEqual( - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc') - ) - self.assertEqual( - node.psql('postgres', 'select * from abc'), - replica.psql('postgres', 'select * from abc') - ) - self.assertEqual( - node.execute('postgres', 'select count(*) from abc')[0][0], - 300000 - ) - - # check that direct UPDATE in pathman_config_params invalidates - # cache - node.psql( - 'postgres', - 'update pathman_config_params set enable_parent = false') - self.catchup_replica(node, replica) - self.assertEqual( - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc') - ) - self.assertEqual( - node.psql('postgres', 'select * from abc'), - replica.psql('postgres', 'select * from abc') - ) - self.assertEqual( - node.execute('postgres', 'select count(*) from abc')[0][0], - 0 - ) - - def test_locks(self): - """Test that a session trying to create new partitions waits for other - sessions if they are doing the same""" - - import threading - import time - - class Flag: - def __init__(self, value): - self.flag = value - - def set(self, value): - self.flag = value - - def get(self): - return self.flag - - # There is one flag for each thread which shows if thread have done its work - flags = [Flag(False) for i in range(3)] - - # All threads synchronize though this lock - lock = threading.Lock() - - # Define thread function - def add_partition(node, flag, query): - """ We expect that this query will wait until another session - commits or rolls back""" - node.safe_psql('postgres', query) - with lock: - flag.set(True) - - # Initialize master server - node = get_new_node('master') - - node.init() - node.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman\'\n') - node.start() - node.safe_psql( - 'postgres', - 'create extension pg_pathman; ' + - 'create table abc(id serial, t text); ' + - 'insert into abc select generate_series(1, 100000); ' + - 'select create_range_partitions(\'abc\', \'id\', 1, 50000);' - ) - - # Start transaction that will create partition - con = node.connect() - con.begin() - con.execute('select append_range_partition(\'abc\')') - - # Start threads that suppose to add new partitions and wait some - # time - query = [ - 'select prepend_range_partition(\'abc\')', - 'select append_range_partition(\'abc\')', - 'select add_range_partition(\'abc\', 500000, 550000)', - ] - threads = [] - for i in range(3): - thread = threading.Thread( - target=add_partition, - args=(node, flags[i], query[i])) - threads.append(thread) - thread.start() - time.sleep(3) - - # This threads should wait until current transaction finished - with lock: - for i in range(3): - self.assertEqual(flags[i].get(), False) - - # Commit transaction. Since then other sessions can create - # partitions - con.commit() - - # Now wait until each thread finishes - for thread in threads: - thread.join() - - # Check flags, it should be true which means that threads are - # finished - with lock: - for i in range(3): - self.assertEqual(flags[i].get(), True) - - # Check that all partitions are created - self.assertEqual( - node.safe_psql( - 'postgres', - 'select count(*) from pg_inherits where inhparent=\'abc\'::regclass' - ), - b'6\n' - ) - - def test_tablespace(self): - """Check tablespace support""" - - def check_tablespace(node, tablename, tablespace): - res = node.execute( - 'postgres', - 'select get_tablespace(\'{}\')'.format(tablename)) - if len(res) == 0: - return False - - return res[0][0] == tablespace - - node = get_new_node('master') - node.init() - node.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman\'\n') - node.start() - node.psql('postgres', 'create extension pg_pathman') - - # create tablespace - path = os.path.join(node.data_dir, 'test_space_location') - os.mkdir(path) - node.psql( - 'postgres', - 'create tablespace test_space location \'{}\''.format(path)) - - # create table in this tablespace - node.psql( - 'postgres', - 'create table abc(a serial, b int) tablespace test_space') - - # create three partitions. Excpect that they will be created in the - # same tablespace as the parent table - node.psql( - 'postgres', - 'select create_range_partitions(\'abc\', \'a\', 1, 10, 3)') - self.assertTrue(check_tablespace(node, 'abc', 'test_space')) - - # check tablespace for appended partition - node.psql( - 'postgres', - 'select append_range_partition(\'abc\', \'abc_appended\')') - self.assertTrue(check_tablespace(node, 'abc_appended', 'test_space')) - - # check tablespace for prepended partition - node.psql( - 'postgres', - 'select prepend_range_partition(\'abc\', \'abc_prepended\')') - self.assertTrue(check_tablespace(node, 'abc_prepended', 'test_space')) - - # check tablespace for prepended partition - node.psql( - 'postgres', - 'select add_range_partition(\'abc\', 41, 51, \'abc_added\')') - self.assertTrue(check_tablespace(node, 'abc_added', 'test_space')) - - # check tablespace for split - node.psql( - 'postgres', - 'select split_range_partition(\'abc_added\', 45, \'abc_splitted\')') - self.assertTrue(check_tablespace(node, 'abc_splitted', 'test_space')) - - # now let's specify tablespace explicitly - node.psql( - 'postgres', - 'select append_range_partition(\'abc\', \'abc_appended_2\', \'pg_default\')') - node.psql( - 'postgres', - 'select prepend_range_partition(\'abc\', \'abc_prepended_2\', \'pg_default\')') - node.psql( - 'postgres', - 'select add_range_partition(\'abc\', 61, 71, \'abc_added_2\', \'pg_default\')') - node.psql( - 'postgres', - 'select split_range_partition(\'abc_added_2\', 65, \'abc_splitted_2\', \'pg_default\')') - self.assertTrue(check_tablespace(node, 'abc_appended_2', 'pg_default')) - self.assertTrue(check_tablespace(node, 'abc_prepended_2', 'pg_default')) - self.assertTrue(check_tablespace(node, 'abc_added_2', 'pg_default')) - self.assertTrue(check_tablespace(node, 'abc_splitted_2', 'pg_default')) - - @if_fdw_enabled - def test_foreign_table(self): - """Test foreign tables""" - - # Start master server - master = get_new_node('test') - master.init() - master.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman, postgres_fdw\'\n') - master.start() - master.psql('postgres', 'create extension pg_pathman') - master.psql('postgres', 'create extension postgres_fdw') - - # RANGE partitioning test with FDW: - # - create range partitioned table in master - # - create foreign server - # - create foreign table and insert some data into it - # - attach foreign table to partitioned one - # - try inserting data into foreign partition via parent - # - drop partitions - master.psql( - 'postgres', - '''create table abc(id serial, name text); - select create_range_partitions('abc', 'id', 0, 10, 2)''') - - # Current user name (needed for user mapping) - username = master.execute('postgres', 'select current_user')[0][0] - - # Start foreign server - fserv = get_new_node('fserv') - fserv.init().start() - fserv.safe_psql('postgres', 'create table ftable(id serial, name text)') - fserv.safe_psql('postgres', 'insert into ftable values (25, \'foreign\')') - - # Create foreign table and attach it to partitioned table - master.safe_psql( - 'postgres', - '''create server fserv - foreign data wrapper postgres_fdw - options (dbname 'postgres', host '127.0.0.1', port '{}')'''.format(fserv.port) - ) - master.safe_psql( - 'postgres', - '''create user mapping for {0} - server fserv - options (user '{0}')'''.format(username) - ) - master.safe_psql( - 'postgres', - '''import foreign schema public limit to (ftable) - from server fserv into public''' - ) - master.safe_psql( - 'postgres', - 'select attach_range_partition(\'abc\', \'ftable\', 20, 30)') - - # Check that table attached to partitioned table - self.assertEqual( - master.safe_psql('postgres', 'select * from ftable'), - b'25|foreign\n' - ) - - # Check that we can successfully insert new data into foreign partition - master.safe_psql('postgres', 'insert into abc values (26, \'part\')') - self.assertEqual( - master.safe_psql('postgres', 'select * from ftable order by id'), - b'25|foreign\n26|part\n' - ) - - # Testing drop partitions (including foreign partitions) - master.safe_psql('postgres', 'select drop_partitions(\'abc\')') - - # HASH partitioning with FDW: - # - create hash partitioned table in master - # - create foreign table - # - replace local partition with foreign one - # - insert data - # - drop partitions - master.psql( - 'postgres', - '''create table hash_test(id serial, name text); - select create_hash_partitions('hash_test', 'id', 2)''') - fserv.safe_psql('postgres', 'create table f_hash_test(id serial, name text)') - - master.safe_psql( - 'postgres', - '''import foreign schema public limit to (f_hash_test) - from server fserv into public''' - ) - master.safe_psql( - 'postgres', - 'select replace_hash_partition(\'hash_test_1\', \'f_hash_test\')') - master.safe_psql('postgres', 'insert into hash_test select generate_series(1,10)') - - self.assertEqual( - master.safe_psql('postgres', 'select * from hash_test'), - b'1|\n2|\n5|\n6|\n8|\n9|\n3|\n4|\n7|\n10|\n' - ) - master.safe_psql('postgres', 'select drop_partitions(\'hash_test\')') - - def test_parallel_nodes(self): - """Test parallel queries under partitions""" - - import json - - # Init and start postgres instance with preload pg_pathman module - node = get_new_node('test') - node.init() - node.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman, postgres_fdw\'\n') - node.start() - - # Check version of postgres server - # If version < 9.6 skip all tests for parallel queries - if version < 90600: - return - - # Prepare test database - node.psql('postgres', 'create extension pg_pathman') - node.psql('postgres', 'create table range_partitioned as select generate_series(1, 1e4::integer) i') - node.psql('postgres', 'alter table range_partitioned alter column i set not null') - node.psql('postgres', 'select create_range_partitions(\'range_partitioned\', \'i\', 1, 1e3::integer)') - node.psql('postgres', 'vacuum analyze range_partitioned') - - node.psql('postgres', 'create table hash_partitioned as select generate_series(1, 1e4::integer) i') - node.psql('postgres', 'alter table hash_partitioned alter column i set not null') - node.psql('postgres', 'select create_hash_partitions(\'hash_partitioned\', \'i\', 10)') - node.psql('postgres', 'vacuum analyze hash_partitioned') - - node.psql('postgres', """ - create or replace function query_plan(query text) returns jsonb as $$ - declare - plan jsonb; - begin - execute 'explain (costs off, format json)' || query into plan; - return plan; - end; - $$ language plpgsql; - """) - - # Test parallel select - with node.connect() as con: - con.execute('set max_parallel_workers_per_gather = 2') - if version >= 100000: - con.execute('set min_parallel_table_scan_size = 0') - else: - con.execute('set min_parallel_relation_size = 0') - con.execute('set parallel_setup_cost = 0') - con.execute('set parallel_tuple_cost = 0') - - # Check parallel aggregate plan - test_query = 'select count(*) from range_partitioned where i < 1500' - plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] - expected = json.loads(""" - [ - { - "Plan": { - "Node Type": "Aggregate", - "Strategy": "Plain", - "Partial Mode": "Finalize", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Gather", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Workers Planned": 2, - "Single Copy": false, - "Plans": [ - { - "Node Type": "Aggregate", - "Strategy": "Plain", - "Partial Mode": "Partial", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Append", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Seq Scan", - "Parent Relationship": "Member", - "Parallel Aware": true, - "Relation Name": "range_partitioned_2", - "Alias": "range_partitioned_2", - "Filter": "(i < 1500)" - }, - { - "Node Type": "Seq Scan", - "Parent Relationship": "Member", - "Parallel Aware": true, - "Relation Name": "range_partitioned_1", - "Alias": "range_partitioned_1" - } - ] - } - ] - } - ] - } - ] - } - } - ] - """) - self.assertEqual(ordered(plan), ordered(expected)) - - # Check count of returned tuples - count = con.execute('select count(*) from range_partitioned where i < 1500')[0][0] - self.assertEqual(count, 1499) - - # Check simple parallel seq scan plan with limit - test_query = 'select * from range_partitioned where i < 1500 limit 5' - plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] - expected = json.loads(""" - [ - { - "Plan": { - "Node Type": "Limit", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Gather", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Workers Planned": 2, - "Single Copy": false, - "Plans": [ - { - "Node Type": "Append", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Seq Scan", - "Parent Relationship": "Member", - "Parallel Aware": true, - "Relation Name": "range_partitioned_2", - "Alias": "range_partitioned_2", - "Filter": "(i < 1500)" - }, - { - "Node Type": "Seq Scan", - "Parent Relationship": "Member", - "Parallel Aware": true, - "Relation Name": "range_partitioned_1", - "Alias": "range_partitioned_1" - } - ] - } - ] - } - ] - } - } - ] - """) - self.assertEqual(ordered(plan), ordered(expected)) - - # Check tuples returned by query above - res_tuples = con.execute('select * from range_partitioned where i < 1500 limit 5') - res_tuples = sorted(map(lambda x: x[0], res_tuples)) - expected = [1, 2, 3, 4, 5] - self.assertEqual(res_tuples, expected) - - # Check the case when none partition is selected in result plan - test_query = 'select * from range_partitioned where i < 1' - plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] - expected = json.loads(""" - [ - { - "Plan": { - "Node Type": "Result", - "Parallel Aware": false, - "One-Time Filter": "false" - } - } - ] - """) - self.assertEqual(ordered(plan), ordered(expected)) - - # Remove all objects for testing - node.psql('postgres', 'drop table range_partitioned cascade') - node.psql('postgres', 'drop table hash_partitioned cascade') - node.psql('postgres', 'drop extension pg_pathman cascade') - - # Stop instance and finish work - node.stop() - node.cleanup() - - def test_conc_part_creation_insert(self): - """Test concurrent partition creation on INSERT""" - - # Create and start new instance - node = self.start_new_pathman_cluster(allows_streaming=False) - - # Create table 'ins_test' and partition it - with node.connect() as con0: - con0.begin() - con0.execute('create table ins_test(val int not null)') - con0.execute('insert into ins_test select generate_series(1, 50)') - con0.execute("select create_range_partitions('ins_test', 'val', 1, 10)") - con0.commit() - - # Create two separate connections for this test - with node.connect() as con1, node.connect() as con2: - - # Thread for connection #2 (it has to wait) - def con2_thread(): - con2.execute('insert into ins_test values(51)') - con2.commit() - - # Step 1: lock partitioned table in con1 - con1.begin() - con1.execute('select count(*) from ins_test') # load pathman's cache - con1.execute('lock table ins_test in share update exclusive mode') - - # Step 2: try inserting new value in con2 (waiting) - con2.begin() - con2.execute('select count(*) from ins_test') # load pathman's cache - t = threading.Thread(target=con2_thread) - t.start() - - # Step 3: wait until 't' locks - while True: - with node.connect() as con0: - locks = con0.execute(""" - select count(*) from pg_locks where granted = 'f' - """) - - if int(locks[0][0]) > 0: - break - - # Step 4: try inserting new value in con1 (success, unlock) - con1.execute('insert into ins_test values(52)') - con1.commit() - - # Step 5: wait for con2 - t.join() - - rows = con1.execute(""" - select * from pathman_partition_list - where parent = 'ins_test'::regclass - order by range_min, range_max - """) - - # check number of partitions - self.assertEqual(len(rows), 6) - - # check range_max of partitions - self.assertEqual(int(rows[0][5]), 11) - self.assertEqual(int(rows[1][5]), 21) - self.assertEqual(int(rows[2][5]), 31) - self.assertEqual(int(rows[3][5]), 41) - self.assertEqual(int(rows[4][5]), 51) - self.assertEqual(int(rows[5][5]), 61) - - # Stop instance and finish work - node.stop() - node.cleanup() - - def test_conc_part_merge_insert(self): - """Test concurrent merge_range_partitions() + INSERT""" - - # Create and start new instance - node = self.start_new_pathman_cluster(allows_streaming=False) - - # Create table 'ins_test' and partition it - with node.connect() as con0: - con0.begin() - con0.execute('create table ins_test(val int not null)') - con0.execute("select create_range_partitions('ins_test', 'val', 1, 10, 10)") - con0.commit() - - # Create two separate connections for this test - with node.connect() as con1, node.connect() as con2: - - # Thread for connection #2 (it has to wait) - def con2_thread(): - con2.begin() - con2.execute('insert into ins_test values(20)') - con2.commit() - - # Step 1: initilize con1 - con1.begin() - con1.execute('select count(*) from ins_test') # load pathman's cache - - # Step 2: initilize con2 - con2.begin() - con2.execute('select count(*) from ins_test') # load pathman's cache - con2.commit() # unlock relations - - # Step 3: merge 'ins_test1' + 'ins_test_2' in con1 (success) - con1.execute("select merge_range_partitions('ins_test_1', 'ins_test_2')") - - # Step 4: try inserting new value in con2 (waiting) - t = threading.Thread(target=con2_thread) - t.start() - - # Step 5: wait until 't' locks - while True: - with node.connect() as con0: - locks = con0.execute(""" - select count(*) from pg_locks where granted = 'f' - """) - - if int(locks[0][0]) > 0: - break - - # Step 6: finish merge in con1 (success, unlock) - con1.commit() - - # Step 7: wait for con2 - t.join() - - rows = con1.execute("select *, tableoid::regclass::text from ins_test") - - # check number of rows in table - self.assertEqual(len(rows), 1) - - # check value that has been inserted - self.assertEqual(int(rows[0][0]), 20) - - # check partition that was chosen for insert - self.assertEqual(str(rows[0][1]), 'ins_test_1') - - # Stop instance and finish work - node.stop() - node.cleanup() - - def test_pg_dump(self): - """ - Test using dump and restore of partitioned table through pg_dump and pg_restore tools. - - Test strategy: - - test range and hash partitioned tables; - - for each partitioned table check on restorable side the following quantities: - * constraints related to partitioning; - * init callback function and enable parent flag; - * number of rows in parent and child tables; - * plan validity of simple SELECT query under partitioned table; - - check dumping using the following parameters of pg_dump: - * format = plain | custom; - * using of inserts and copy. - - all test cases are carried out on tables half-full with data located in parent part, - the rest of data - in child tables. - """ - - import subprocess - - # Init and start postgres instance with preload pg_pathman module - node = get_new_node('test') - node.init() - node.append_conf( - 'postgresql.conf', - """ - shared_preload_libraries=\'pg_pathman\' - pg_pathman.override_copy=false - """) - node.start() - - # Init two databases: initial and copy - node.psql('postgres', 'create database initial') - node.psql('postgres', 'create database copy') - node.psql('initial', 'create extension pg_pathman') - - # Create and fillin partitioned table in initial database - with node.connect('initial') as con: - - # create and initailly fillin tables - con.execute('create table range_partitioned (i integer not null)') - con.execute('insert into range_partitioned select i from generate_series(1, 500) i') - con.execute('create table hash_partitioned (i integer not null)') - con.execute('insert into hash_partitioned select i from generate_series(1, 500) i') - - # partition table keeping data in base table - # enable_parent parameter automatically becames true - con.execute('select create_range_partitions(\'range_partitioned\', \'i\', 1, 200, partition_data := false)') - con.execute('select create_hash_partitions(\'hash_partitioned\', \'i\', 5, false)') - - # fillin child tables with remain data - con.execute('insert into range_partitioned select i from generate_series(501, 1000) i') - con.execute('insert into hash_partitioned select i from generate_series(501, 1000) i') - - # set init callback - con.execute(""" - create or replace function init_partition_stub_callback(args jsonb) - returns void as $$ - begin - end - $$ language plpgsql; - """) - con.execute('select set_init_callback(\'range_partitioned\', \'init_partition_stub_callback(jsonb)\')') - con.execute('select set_init_callback(\'hash_partitioned\', \'init_partition_stub_callback(jsonb)\')') - - # turn off enable_parent option - con.execute('select set_enable_parent(\'range_partitioned\', false)') - con.execute('select set_enable_parent(\'hash_partitioned\', false)') - - con.commit() - - # compare strategies - CMP_OK, PLANS_MISMATCH, CONTENTS_MISMATCH = range(3) - def cmp_full(con1, con2): - """Compare selection partitions in plan and contents in partitioned tables""" - - plan_query = 'explain (costs off, format json) select * from %s' - content_query = 'select * from %s order by i' - table_refs = [ - 'range_partitioned', - 'only range_partitioned', - 'hash_partitioned', - 'only hash_partitioned' - ] - for table_ref in table_refs: - plan_initial = con1.execute(plan_query % table_ref)[0][0][0]['Plan'] - plan_copy = con2.execute(plan_query % table_ref)[0][0][0]['Plan'] - if ordered(plan_initial) != ordered(plan_copy): - return PLANS_MISMATCH - - content_initial = [x[0] for x in con1.execute(content_query % table_ref)] - content_copy = [x[0] for x in con2.execute(content_query % table_ref)] - if content_initial != content_copy: - return CONTENTS_MISMATCH - - return CMP_OK - - def turnoff_pathman(node): - node.psql('initial', 'alter system set pg_pathman.enable to off') - node.reload() - - def turnon_pathman(node): - node.psql('initial', 'alter system set pg_pathman.enable to on') - node.psql('copy', 'alter system set pg_pathman.enable to on') - node.psql('initial', 'alter system set pg_pathman.override_copy to off') - node.psql('copy', 'alter system set pg_pathman.override_copy to off') - node.reload() - - # Test dump/restore from init database to copy functionality - test_params = [ - (None, - None, - [node.get_bin_path("pg_dump"), - "-p {}".format(node.port), - "initial"], - [node.get_bin_path("psql"), - "-p {}".format(node.port), - "copy"], - cmp_full), # dump as plain text and restore via COPY - (turnoff_pathman, - turnon_pathman, - [node.get_bin_path("pg_dump"), - "-p {}".format(node.port), - "--inserts", - "initial"], - [node.get_bin_path("psql"), - "-p {}".format(node.port), - "copy"], - cmp_full), # dump as plain text and restore via INSERTs - (None, - None, - [node.get_bin_path("pg_dump"), - "-p {}".format(node.port), - "--format=custom", - "initial"], - [node.get_bin_path("pg_restore"), - "-p {}".format(node.port), - "--dbname=copy"], - cmp_full), # dump in archive format - ] - - try: - FNULL = open(os.devnull, 'w') - - for preproc, postproc, pg_dump_params, pg_restore_params, cmp_dbs in test_params: - - dump_restore_cmd = " | ".join((' '.join(pg_dump_params), ' '.join(pg_restore_params))) - - if (preproc != None): - preproc(node) - - # transfer and restore data - p1 = subprocess.Popen(pg_dump_params, stdout=subprocess.PIPE) - stdoutdata, _ = p1.communicate() - p2 = subprocess.Popen(pg_restore_params, stdin=subprocess.PIPE, - stdout=FNULL, stderr=FNULL) - p2.communicate(input=stdoutdata) - - if (postproc != None): - postproc(node) - - # check validity of data - with node.connect('initial') as con1, node.connect('copy') as con2: - - # compare plans and contents of initial and copy - cmp_result = cmp_dbs(con1, con2) - self.assertNotEqual(cmp_result, PLANS_MISMATCH, - "mismatch in plans of select query on partitioned tables under the command: %s" % dump_restore_cmd) - self.assertNotEqual(cmp_result, CONTENTS_MISMATCH, - "mismatch in contents of partitioned tables under the command: %s" % dump_restore_cmd) - - # compare enable_parent flag and callback function - config_params_query = """ - select partrel, enable_parent, init_callback from pathman_config_params - """ - config_params_initial, config_params_copy = {}, {} - for row in con1.execute(config_params_query): - config_params_initial[row[0]] = row[1:] - for row in con2.execute(config_params_query): - config_params_copy[row[0]] = row[1:] - self.assertEqual(config_params_initial, config_params_copy, \ - "mismatch in pathman_config_params under the command: %s" % dump_restore_cmd) - - # compare constraints on each partition - constraints_query = """ - select r.relname, c.conname, c.consrc from - pg_constraint c join pg_class r on c.conrelid=r.oid - where relname similar to '(range|hash)_partitioned_\d+' - """ - constraints_initial, constraints_copy = {}, {} - for row in con1.execute(constraints_query): - constraints_initial[row[0]] = row[1:] - for row in con2.execute(constraints_query): - constraints_copy[row[0]] = row[1:] - self.assertEqual(constraints_initial, constraints_copy, \ - "mismatch in partitions' constraints under the command: %s" % dump_restore_cmd) - - # clear copy database - node.psql('copy', 'drop schema public cascade') - node.psql('copy', 'create schema public') - node.psql('copy', 'drop extension pg_pathman cascade') - - except: - raise - finally: - FNULL.close() - - # Stop instance and finish work - node.stop() - node.cleanup() - - def test_concurrent_detach(self): - """Test concurrent detach partition with contiguous tuple inserting and spawning new partitions""" - - # Init parameters - num_insert_workers = 8 - detach_timeout = 0.1 # time in sec between successive inserts and detachs - num_detachs = 100 # estimated number of detachs - inserts_advance = 1 # abvance in sec of inserts process under detachs - test_interval = int(math.ceil(detach_timeout * num_detachs)) - - insert_pgbench_script = os.path.dirname(os.path.realpath(__file__)) \ - + "/pgbench_scripts/insert_current_timestamp.pgbench" - detach_pgbench_script = os.path.dirname(os.path.realpath(__file__)) \ - + "/pgbench_scripts/detachs_in_timeout.pgbench" - - # Check pgbench scripts on existance - self.assertTrue(os.path.isfile(insert_pgbench_script), - msg="pgbench script with insert timestamp doesn't exist") - self.assertTrue(os.path.isfile(detach_pgbench_script), - msg="pgbench script with detach letfmost partition doesn't exist") - - # Create and start new instance - node = self.start_new_pathman_cluster(allows_streaming=False) - - # Create partitioned table for testing that spawns new partition on each next *detach_timeout* sec - with node.connect() as con0: - con0.begin() - con0.execute('create table ts_range_partitioned(ts timestamp not null)') - con0.execute("select create_range_partitions('ts_range_partitioned', 'ts', current_timestamp, interval '%f', 1)" % detach_timeout) - con0.commit() - - # Run in background inserts and detachs processes - FNULL = open(os.devnull, 'w') - - # init pgbench's utility tables - init_pgbench = node.pgbench(stdout=FNULL, stderr=FNULL, options=["-i"]) - init_pgbench.wait() - - inserts = node.pgbench(stdout=FNULL, stderr=subprocess.PIPE, options=[ - "-j", "%i" % num_insert_workers, - "-c", "%i" % num_insert_workers, - "-f", insert_pgbench_script, - "-T", "%i" % (test_interval+inserts_advance) - ]) - time.sleep(inserts_advance) - detachs = node.pgbench(stdout=FNULL, stderr=FNULL, options=[ - "-D", "timeout=%f" % detach_timeout, - "-f", detach_pgbench_script, - "-T", "%i" % test_interval - ]) - - # Wait for completion of processes - _, stderrdata = inserts.communicate() - detachs.wait() - - # Obtain error log from inserts process - self.assertIsNone(re.search("ERROR|FATAL|PANIC", str(stderrdata)), - msg="Race condition between detach and concurrent inserts with append partition is expired") - - # Stop instance and finish work - node.stop() - node.cleanup() - FNULL.close() +class PartitioningTests(unittest.TestCase): + def setUp(self): + self.setup_cmd = [ + "create table abc(id serial, t text)", + "insert into abc select generate_series(1, 300000)", + "select create_hash_partitions('abc', 'id', 3, partition_data := false)", + ] + + def tearDown(self): + stop_all() + + def start_new_pathman_cluster(self, name='test', allows_streaming=False): + node = get_new_node(name) + node.init(allows_streaming=allows_streaming) + node.append_conf("postgresql.conf", "shared_preload_libraries='pg_pathman'\n") + node.start() + node.psql('postgres', 'create extension pg_pathman') + return node + + def init_test_data(self, node): + """ Initialize pg_pathman extension and test data """ + for cmd in self.setup_cmd: + node.safe_psql('postgres', cmd) + + def catchup_replica(self, master, replica): + """ Wait until replica synchronizes with master """ + if version >= 100000: + wait_lsn_query = \ + 'SELECT pg_current_wal_lsn() <= replay_lsn ' \ + 'FROM pg_stat_replication WHERE application_name = \'%s\'' \ + % replica.name + else: + wait_lsn_query = \ + 'SELECT pg_current_xlog_location() <= replay_location ' \ + 'FROM pg_stat_replication WHERE application_name = \'%s\'' \ + % replica.name + master.poll_query_until('postgres', wait_lsn_query) + + def test_concurrent(self): + """ Test concurrent partitioning """ + + node = self.start_new_pathman_cluster() + self.init_test_data(node) + + node.psql('postgres', "select partition_table_concurrently('abc')") + + while True: + # update some rows to check for deadlocks + node.safe_psql('postgres', """ + update abc set t = 'test' + where id in (select (random() * 300000)::int + from generate_series(1, 3000)) + """) + + count = node.execute('postgres', """ + select count(*) from pathman_concurrent_part_tasks + """) + + # if there is no active workers then it means work is done + if count[0][0] == 0: + break + time.sleep(1) + + data = node.execute('postgres', 'select count(*) from only abc') + self.assertEqual(data[0][0], 0) + data = node.execute('postgres', 'select count(*) from abc') + self.assertEqual(data[0][0], 300000) + + node.stop() + + def test_replication(self): + """ Test how pg_pathman works with replication """ + + node = get_new_node('master') + replica = get_new_node('repl') + + # initialize master server + node = self.start_new_pathman_cluster(allows_streaming=True) + node.backup('my_backup') + + # initialize replica from backup + replica.init_from_backup(node, 'my_backup', has_streaming=True) + replica.start() + + # initialize pg_pathman extension and some test data + self.init_test_data(node) + + # wait until replica catches up + self.catchup_replica(node, replica) + + # check that results are equal + self.assertEqual( + node.psql('postgres', 'explain (costs off) select * from abc'), + replica.psql('postgres', 'explain (costs off) select * from abc')) + + # enable parent and see if it is enabled in replica + node.psql('postgres', 'select enable_parent(\'abc\'') + + self.catchup_replica(node, replica) + self.assertEqual( + node.psql('postgres', 'explain (costs off) select * from abc'), + replica.psql('postgres', 'explain (costs off) select * from abc')) + self.assertEqual( + node.psql('postgres', 'select * from abc'), + replica.psql('postgres', 'select * from abc')) + self.assertEqual( + node.execute('postgres', 'select count(*) from abc')[0][0], 300000) + + # check that UPDATE in pathman_config_params invalidates cache + node.psql('postgres', 'update pathman_config_params set enable_parent = false') + self.catchup_replica(node, replica) + self.assertEqual( + node.psql('postgres', 'explain (costs off) select * from abc'), + replica.psql('postgres', 'explain (costs off) select * from abc')) + self.assertEqual( + node.psql('postgres', 'select * from abc'), + replica.psql('postgres', 'select * from abc')) + self.assertEqual(node.execute('postgres', 'select count(*) from abc')[0][0], 0) + + def test_locks(self): + """ + Test that a session trying to create new partitions + waits for other sessions if they are doing the same + """ + + import threading + import time + + class Flag: + def __init__(self, value): + self.flag = value + + def set(self, value): + self.flag = value + + def get(self): + return self.flag + + # There is one flag for each thread which shows if thread have done its work + flags = [Flag(False) for i in range(3)] + + # All threads synchronize though this lock + lock = threading.Lock() + + # Define thread function + def add_partition(node, flag, query): + """ + We expect that this query will wait until + another session commits or rolls back + """ + node.safe_psql('postgres', query) + with lock: + flag.set(True) + + # Initialize master server + node = get_new_node('master') + + node.init() + node.append_conf("postgresql.conf", "shared_preload_libraries='pg_pathman'\n") + node.start() + node.safe_psql( + 'postgres', + 'create extension pg_pathman; ' + 'create table abc(id serial, t text); ' + + 'insert into abc select generate_series(1, 100000); ' + + 'select create_range_partitions(\'abc\', \'id\', 1, 50000);') + + # Start transaction that will create partition + con = node.connect() + con.begin() + con.execute('select append_range_partition(\'abc\')') + + # Start threads that suppose to add new partitions and wait some + # time + query = [ + 'select prepend_range_partition(\'abc\')', + 'select append_range_partition(\'abc\')', + 'select add_range_partition(\'abc\', 500000, 550000)', + ] + threads = [] + for i in range(3): + thread = threading.Thread( + target=add_partition, args=(node, flags[i], query[i])) + threads.append(thread) + thread.start() + time.sleep(3) + + # This threads should wait until current transaction finished + with lock: + for i in range(3): + self.assertEqual(flags[i].get(), False) + + # Commit transaction. Since then other sessions can create + # partitions + con.commit() + + # Now wait until each thread finishes + for thread in threads: + thread.join() + + # Check flags, it should be true which means that threads are + # finished + with lock: + for i in range(3): + self.assertEqual(flags[i].get(), True) + + # Check that all partitions are created + self.assertEqual( + node.safe_psql( + 'postgres', + 'select count(*) from pg_inherits where inhparent=\'abc\'::regclass'), + b'6\n') + + def test_tablespace(self): + """ Check tablespace support """ + + def check_tablespace(node, tablename, tablespace): + res = node.execute('postgres', + 'select get_tablespace(\'{}\')'.format(tablename)) + if len(res) == 0: + return False + + return res[0][0] == tablespace + + node = get_new_node('master') + node.init() + node.append_conf('postgresql.conf', 'shared_preload_libraries=\'pg_pathman\'\n') + node.start() + node.psql('postgres', 'create extension pg_pathman') + + # create tablespace + path = os.path.join(node.data_dir, 'test_space_location') + os.mkdir(path) + node.psql('postgres', 'create tablespace test_space location \'{}\''.format(path)) + + # create table in this tablespace + node.psql('postgres', 'create table abc(a serial, b int) tablespace test_space') + + # create three partitions. Excpect that they will be created in the + # same tablespace as the parent table + node.psql('postgres', 'select create_range_partitions(\'abc\', \'a\', 1, 10, 3)') + self.assertTrue(check_tablespace(node, 'abc', 'test_space')) + + # check tablespace for appended partition + node.psql('postgres', 'select append_range_partition(\'abc\', \'abc_appended\')') + self.assertTrue(check_tablespace(node, 'abc_appended', 'test_space')) + + # check tablespace for prepended partition + node.psql('postgres', + 'select prepend_range_partition(\'abc\', \'abc_prepended\')') + self.assertTrue(check_tablespace(node, 'abc_prepended', 'test_space')) + + # check tablespace for prepended partition + node.psql('postgres', + 'select add_range_partition(\'abc\', 41, 51, \'abc_added\')') + self.assertTrue(check_tablespace(node, 'abc_added', 'test_space')) + + # check tablespace for split + node.psql('postgres', + 'select split_range_partition(\'abc_added\', 45, \'abc_splitted\')') + self.assertTrue(check_tablespace(node, 'abc_splitted', 'test_space')) + + # now let's specify tablespace explicitly + node.psql( + 'postgres', + 'select append_range_partition(\'abc\', \'abc_appended_2\', \'pg_default\')') + node.psql( + 'postgres', + 'select prepend_range_partition(\'abc\', \'abc_prepended_2\', \'pg_default\')' + ) + node.psql( + 'postgres', + 'select add_range_partition(\'abc\', 61, 71, \'abc_added_2\', \'pg_default\')' + ) + node.psql( + 'postgres', + 'select split_range_partition(\'abc_added_2\', 65, \'abc_splitted_2\', \'pg_default\')' + ) + + # yapf: disable + self.assertTrue(check_tablespace(node, 'abc_appended_2', 'pg_default')) + self.assertTrue(check_tablespace(node, 'abc_prepended_2', 'pg_default')) + self.assertTrue(check_tablespace(node, 'abc_added_2', 'pg_default')) + self.assertTrue(check_tablespace(node, 'abc_splitted_2', 'pg_default')) + + @if_fdw_enabled + def test_foreign_table(self): + """ Test foreign tables """ + + # Start master server + master = get_new_node('test') + master.init() + master.append_conf('postgresql.conf', """ + shared_preload_libraries='pg_pathman, postgres_fdw'\n + """) + master.start() + master.psql('postgres', 'create extension pg_pathman') + master.psql('postgres', 'create extension postgres_fdw') + + # RANGE partitioning test with FDW: + # - create range partitioned table in master + # - create foreign server + # - create foreign table and insert some data into it + # - attach foreign table to partitioned one + # - try inserting data into foreign partition via parent + # - drop partitions + master.psql('postgres', """ + create table abc(id serial, name text); + select create_range_partitions('abc', 'id', 0, 10, 2) + """) + + # Current user name (needed for user mapping) + username = master.execute('postgres', 'select current_user')[0][0] + + # Start foreign server + fserv = get_new_node('fserv') + fserv.init().start() + fserv.safe_psql('postgres', "create table ftable(id serial, name text)") + fserv.safe_psql('postgres', "insert into ftable values (25, 'foreign')") + + # Create foreign table and attach it to partitioned table + master.safe_psql('postgres', """ + create server fserv + foreign data wrapper postgres_fdw + options (dbname 'postgres', host '127.0.0.1', port '{}') + """.format(fserv.port)) + + master.safe_psql('postgres', """ + create user mapping for {0} server fserv + options (user '{0}') + """.format(username)) + + master.safe_psql('postgres', """ + import foreign schema public limit to (ftable) + from server fserv into public + """) + + master.safe_psql( + 'postgres', + "select attach_range_partition('abc', 'ftable', 20, 30)") + + # Check that table attached to partitioned table + self.assertEqual( + master.safe_psql('postgres', 'select * from ftable'), + b'25|foreign\n') + + # Check that we can successfully insert new data into foreign partition + master.safe_psql('postgres', 'insert into abc values (26, \'part\')') + self.assertEqual( + master.safe_psql('postgres', 'select * from ftable order by id'), + b'25|foreign\n26|part\n') + + # Testing drop partitions (including foreign partitions) + master.safe_psql('postgres', 'select drop_partitions(\'abc\')') + + # HASH partitioning with FDW: + # - create hash partitioned table in master + # - create foreign table + # - replace local partition with foreign one + # - insert data + # - drop partitions + master.psql('postgres', """ + create table hash_test(id serial, name text); + select create_hash_partitions('hash_test', 'id', 2) + """) + fserv.safe_psql('postgres', + 'create table f_hash_test(id serial, name text)') + + master.safe_psql('postgres', """ + import foreign schema public limit to (f_hash_test) + from server fserv into public + """) + master.safe_psql('postgres', """ + select replace_hash_partition('hash_test_1', 'f_hash_test') + """) + master.safe_psql('postgres', + 'insert into hash_test select generate_series(1,10)') + + self.assertEqual( + master.safe_psql('postgres', 'select * from hash_test'), + b'1|\n2|\n5|\n6|\n8|\n9|\n3|\n4|\n7|\n10|\n') + master.safe_psql('postgres', "select drop_partitions('hash_test')") + + @if_fdw_enabled + def test_parallel_nodes(self): + """ Test parallel queries under partitions """ + + import json + + # Init and start postgres instance with preload pg_pathman module + node = get_new_node('test') + node.init() + node.append_conf( + 'postgresql.conf', + 'shared_preload_libraries=\'pg_pathman, postgres_fdw\'\n') + node.start() + + # Check version of postgres server + # If version < 9.6 skip all tests for parallel queries + if version < 90600: + return + + # Prepare test database + node.psql('postgres', 'create extension pg_pathman') + + node.psql('postgres', """ + create table range_partitioned as + select generate_series(1, 1e4::integer) i; + + alter table range_partitioned alter column i set not null; + select create_range_partitions('range_partitioned', 'i', 1, 1e3::integer); + + create table hash_partitioned as + select generate_series(1, 1e4::integer) i; + + alter table hash_partitioned alter column i set not null; + select create_hash_partitions('hash_partitioned', 'i', 10); + """) + + # create statistics for both partitioned tables + node.psql('postgres', 'vacuum analyze') + + node.psql('postgres', """ + create or replace function query_plan(query text) + returns jsonb as $$ + declare + plan jsonb; + begin + execute 'explain (costs off, format json)' || query into plan; + return plan; + end; + $$ language plpgsql; + """) + + # Test parallel select + with node.connect() as con: + con.execute('set max_parallel_workers_per_gather = 2') + if version >= 100000: + con.execute('set min_parallel_table_scan_size = 0') + else: + con.execute('set min_parallel_relation_size = 0') + con.execute('set parallel_setup_cost = 0') + con.execute('set parallel_tuple_cost = 0') + + # Check parallel aggregate plan + test_query = 'select count(*) from range_partitioned where i < 1500' + plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] + expected = json.loads(""" + [ + { + "Plan": { + "Node Type": "Aggregate", + "Strategy": "Plain", + "Partial Mode": "Finalize", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Gather", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Workers Planned": 2, + "Single Copy": false, + "Plans": [ + { + "Node Type": "Aggregate", + "Strategy": "Plain", + "Partial Mode": "Partial", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Append", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_2", + "Alias": "range_partitioned_2", + "Filter": "(i < 1500)" + }, + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_1", + "Alias": "range_partitioned_1" + } + ] + } + ] + } + ] + } + ] + } + } + ] + """) + self.assertEqual(ordered(plan), ordered(expected)) + + # Check count of returned tuples + count = con.execute( + 'select count(*) from range_partitioned where i < 1500')[0][0] + self.assertEqual(count, 1499) + + # Check simple parallel seq scan plan with limit + test_query = 'select * from range_partitioned where i < 1500 limit 5' + plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] + expected = json.loads(""" + [ + { + "Plan": { + "Node Type": "Limit", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Gather", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Workers Planned": 2, + "Single Copy": false, + "Plans": [ + { + "Node Type": "Append", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_2", + "Alias": "range_partitioned_2", + "Filter": "(i < 1500)" + }, + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_1", + "Alias": "range_partitioned_1" + } + ] + } + ] + } + ] + } + } + ] + """) + self.assertEqual(ordered(plan), ordered(expected)) + + # Check tuples returned by query above + res_tuples = con.execute( + 'select * from range_partitioned where i < 1500 limit 5') + res_tuples = sorted(map(lambda x: x[0], res_tuples)) + expected = [1, 2, 3, 4, 5] + self.assertEqual(res_tuples, expected) + + # Check the case when none partition is selected in result plan + test_query = 'select * from range_partitioned where i < 1' + plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] + expected = json.loads(""" + [ + { + "Plan": { + "Node Type": "Result", + "Parallel Aware": false, + "One-Time Filter": "false" + } + } + ] + """) + self.assertEqual(ordered(plan), ordered(expected)) + + # Remove all objects for testing + node.psql('postgres', 'drop table range_partitioned cascade') + node.psql('postgres', 'drop table hash_partitioned cascade') + node.psql('postgres', 'drop extension pg_pathman cascade') + + # Stop instance and finish work + node.stop() + node.cleanup() + + def test_conc_part_drop_runtime_append(self): + """ Test concurrent partition drop + SELECT (RuntimeAppend) """ + + # Create and start new instance + node = self.start_new_pathman_cluster(allows_streaming=False) + + # Create table 'drop_test' and partition it + with node.connect() as con0: + # yapf: disable + con0.begin() + con0.execute("create table drop_test(val int not null)") + con0.execute("insert into drop_test select generate_series(1, 1000)") + con0.execute("select create_range_partitions('drop_test', 'val', 1, 10)") + con0.commit() + + # Create two separate connections for this test + with node.connect() as con1, node.connect() as con2: + + try: + from queue import Queue + except ImportError: + from Queue import Queue + + # return values from thread + queue = Queue() + + # Thread for connection #2 (it has to wait) + def con2_thread(): + con1.begin() + con2.execute('set enable_hashjoin = f') + con2.execute('set enable_mergejoin = f') + + res = con2.execute(""" + explain (analyze, costs off, timing off) + select * from drop_test + where val = any (select generate_series(1, 40, 34)) + """) # query selects from drop_test_1 and drop_test_4 + + con2.commit() + + has_runtime_append = False + has_drop_test_1 = False + has_drop_test_4 = False + + for row in res: + if row[0].find('RuntimeAppend') >= 0: + has_runtime_append = True + continue + + if row[0].find('drop_test_1') >= 0: + has_drop_test_1 = True + continue + + if row[0].find('drop_test_4') >= 0: + has_drop_test_4 = True + continue + + # return all values in tuple + queue.put((has_runtime_append, has_drop_test_1, has_drop_test_4)) + + + # Step 1: cache partitioned table in con1 + con1.begin() + con1.execute('select count(*) from drop_test') # load pathman's cache + con1.commit() + + # Step 2: cache partitioned table in con2 + con2.begin() + con2.execute('select count(*) from drop_test') # load pathman's cache + con2.commit() + + # Step 3: drop first partition of 'drop_test' + con1.begin() + con1.execute('drop table drop_test_1') + + # Step 4: try executing select (RuntimeAppend) + t = threading.Thread(target=con2_thread) + t.start() + + # Step 5: wait until 't' locks + while True: + with node.connect() as con0: + locks = con0.execute(""" + select count(*) from pg_locks where granted = 'f' + """) + + if int(locks[0][0]) > 0: + break + + # Step 6: commit 'DROP TABLE' + con1.commit() + + # Step 7: wait for con2 + t.join() + + rows = con1.execute(""" + select * from pathman_partition_list + where parent = 'drop_test'::regclass + order by range_min, range_max + """) + + # check number of partitions + self.assertEqual(len(rows), 99) + + # check RuntimeAppend + selected partitions + (has_runtime_append, has_drop_test_1, has_drop_test_4) = queue.get() + self.assertTrue(has_runtime_append) + self.assertFalse(has_drop_test_1) + self.assertTrue(has_drop_test_4) + + # Stop instance and finish work + node.stop() + node.cleanup() + + def test_conc_part_creation_insert(self): + """ Test concurrent partition creation on INSERT """ + + # Create and start new instance + node = self.start_new_pathman_cluster(allows_streaming=False) + + # Create table 'ins_test' and partition it + with node.connect() as con0: + # yapf: disable + con0.begin() + con0.execute("create table ins_test(val int not null)") + con0.execute("insert into ins_test select generate_series(1, 50)") + con0.execute("select create_range_partitions('ins_test', 'val', 1, 10)") + con0.commit() + + # Create two separate connections for this test + with node.connect() as con1, node.connect() as con2: + + # Thread for connection #2 (it has to wait) + def con2_thread(): + con2.execute('insert into ins_test values(51)') + con2.commit() + + # Step 1: lock partitioned table in con1 + con1.begin() + con1.execute('select count(*) from ins_test') # load pathman's cache + con1.execute('lock table ins_test in share update exclusive mode') + + # Step 2: try inserting new value in con2 (waiting) + con2.begin() + con2.execute('select count(*) from ins_test') # load pathman's cache + t = threading.Thread(target=con2_thread) + t.start() + + # Step 3: wait until 't' locks + while True: + with node.connect() as con0: + locks = con0.execute(""" + select count(*) from pg_locks where granted = 'f' + """) + + if int(locks[0][0]) > 0: + break + + # Step 4: try inserting new value in con1 (success, unlock) + con1.execute('insert into ins_test values(52)') + con1.commit() + + # Step 5: wait for con2 + t.join() + + rows = con1.execute(""" + select * from pathman_partition_list + where parent = 'ins_test'::regclass + order by range_min, range_max + """) + + # check number of partitions + self.assertEqual(len(rows), 6) + + # check range_max of partitions + self.assertEqual(int(rows[0][5]), 11) + self.assertEqual(int(rows[1][5]), 21) + self.assertEqual(int(rows[2][5]), 31) + self.assertEqual(int(rows[3][5]), 41) + self.assertEqual(int(rows[4][5]), 51) + self.assertEqual(int(rows[5][5]), 61) + + # Stop instance and finish work + node.stop() + node.cleanup() + + def test_conc_part_merge_insert(self): + """ Test concurrent merge_range_partitions() + INSERT """ + + # Create and start new instance + node = self.start_new_pathman_cluster(allows_streaming=False) + + # Create table 'ins_test' and partition it + with node.connect() as con0: + # yapf: disable + con0.begin() + con0.execute("create table ins_test(val int not null)") + con0.execute("select create_range_partitions('ins_test', 'val', 1, 10, 10)") + con0.commit() + + # Create two separate connections for this test + with node.connect() as con1, node.connect() as con2: + + # Thread for connection #2 (it has to wait) + def con2_thread(): + con2.begin() + con2.execute('insert into ins_test values(20)') + con2.commit() + + # Step 1: initilize con1 + con1.begin() + con1.execute('select count(*) from ins_test') # load pathman's cache + + # Step 2: initilize con2 + con2.begin() + con2.execute('select count(*) from ins_test') # load pathman's cache + con2.commit() # unlock relations + + # Step 3: merge 'ins_test1' + 'ins_test_2' in con1 (success) + con1.execute( + "select merge_range_partitions('ins_test_1', 'ins_test_2')") + + # Step 4: try inserting new value in con2 (waiting) + t = threading.Thread(target=con2_thread) + t.start() + + # Step 5: wait until 't' locks + while True: + with node.connect() as con0: + locks = con0.execute(""" + select count(*) from pg_locks where granted = 'f' + """) + + if int(locks[0][0]) > 0: + break + + # Step 6: finish merge in con1 (success, unlock) + con1.commit() + + # Step 7: wait for con2 + t.join() + + rows = con1.execute("select *, tableoid::regclass::text from ins_test") + + # check number of rows in table + self.assertEqual(len(rows), 1) + + # check value that has been inserted + self.assertEqual(int(rows[0][0]), 20) + + # check partition that was chosen for insert + self.assertEqual(str(rows[0][1]), 'ins_test_1') + + # Stop instance and finish work + node.stop() + node.cleanup() + + def test_pg_dump(self): + """ + Test using dump and restore of partitioned table through pg_dump and pg_restore tools. + + Test strategy: + - test range and hash partitioned tables; + - for each partitioned table check on restorable side the following quantities: + * constraints related to partitioning; + * init callback function and enable parent flag; + * number of rows in parent and child tables; + * plan validity of simple SELECT query under partitioned table; + - check dumping using the following parameters of pg_dump: + * format = plain | custom; + * using of inserts and copy. + - all test cases are carried out on tables half-full with data located in parent part, + the rest of data - in child tables. + """ + + import subprocess + + # Init and start postgres instance with preload pg_pathman module + node = get_new_node('test') + node.init() + node.append_conf('postgresql.conf', """ + shared_preload_libraries=\'pg_pathman\' + pg_pathman.override_copy=false + """) + node.start() + + # Init two databases: initial and copy + node.psql('postgres', 'create database initial') + node.psql('postgres', 'create database copy') + node.psql('initial', 'create extension pg_pathman') + + # Create and fillin partitioned table in initial database + with node.connect('initial') as con: + + # create and initailly fillin tables + con.execute('create table range_partitioned (i integer not null)') + con.execute( + 'insert into range_partitioned select i from generate_series(1, 500) i' + ) + con.execute('create table hash_partitioned (i integer not null)') + con.execute( + 'insert into hash_partitioned select i from generate_series(1, 500) i' + ) + + # partition table keeping data in base table + # enable_parent parameter automatically becames true + con.execute( + 'select create_range_partitions(\'range_partitioned\', \'i\', 1, 200, partition_data := false)' + ) + con.execute( + 'select create_hash_partitions(\'hash_partitioned\', \'i\', 5, false)' + ) + + # fillin child tables with remain data + con.execute( + 'insert into range_partitioned select i from generate_series(501, 1000) i' + ) + con.execute( + 'insert into hash_partitioned select i from generate_series(501, 1000) i' + ) + + # set init callback + con.execute(""" + create or replace function init_partition_stub_callback(args jsonb) + returns void as $$ + begin + end + $$ language plpgsql; + """) + con.execute( + 'select set_init_callback(\'range_partitioned\', \'init_partition_stub_callback(jsonb)\')' + ) + con.execute( + 'select set_init_callback(\'hash_partitioned\', \'init_partition_stub_callback(jsonb)\')' + ) + + # turn off enable_parent option + con.execute( + 'select set_enable_parent(\'range_partitioned\', false)') + con.execute('select set_enable_parent(\'hash_partitioned\', false)') + + con.commit() + + # compare strategies + CMP_OK, PLANS_MISMATCH, CONTENTS_MISMATCH = range(3) + + def cmp_full(con1, con2): + """ + Compare selection partitions in plan + and contents in partitioned tables + """ + + plan_query = 'explain (costs off, format json) select * from %s' + content_query = 'select * from %s order by i' + table_refs = [ + 'range_partitioned', 'only range_partitioned', + 'hash_partitioned', 'only hash_partitioned' + ] + for table_ref in table_refs: + plan_initial = con1.execute( + plan_query % table_ref)[0][0][0]['Plan'] + plan_copy = con2.execute( + plan_query % table_ref)[0][0][0]['Plan'] + if ordered(plan_initial) != ordered(plan_copy): + return PLANS_MISMATCH + + content_initial = [ + x[0] for x in con1.execute(content_query % table_ref) + ] + content_copy = [ + x[0] for x in con2.execute(content_query % table_ref) + ] + if content_initial != content_copy: + return CONTENTS_MISMATCH + + return CMP_OK + + def turnoff_pathman(node): + node.psql('initial', 'alter system set pg_pathman.enable to off') + node.reload() + + def turnon_pathman(node): + node.psql('initial', 'alter system set pg_pathman.enable to on') + node.psql('copy', 'alter system set pg_pathman.enable to on') + node.psql('initial', + 'alter system set pg_pathman.override_copy to off') + node.psql('copy', + 'alter system set pg_pathman.override_copy to off') + node.reload() + + # Test dump/restore from init database to copy functionality + test_params = [ + (None, None, [ + node.get_bin_path("pg_dump"), "-p {}".format(node.port), + "initial" + ], [node.get_bin_path("psql"), "-p {}".format(node.port), "copy"], + cmp_full), # dump as plain text and restore via COPY + (turnoff_pathman, turnon_pathman, [ + node.get_bin_path("pg_dump"), "-p {}".format(node.port), + "--inserts", "initial" + ], [node.get_bin_path("psql"), "-p {}".format(node.port), "copy"], + cmp_full), # dump as plain text and restore via INSERTs + (None, None, [ + node.get_bin_path("pg_dump"), "-p {}".format(node.port), + "--format=custom", "initial" + ], [ + node.get_bin_path("pg_restore"), "-p {}".format(node.port), + "--dbname=copy" + ], cmp_full), # dump in archive format + ] + + try: + FNULL = open(os.devnull, 'w') + + for preproc, postproc, pg_dump_params, pg_restore_params, cmp_dbs in test_params: + + dump_restore_cmd = " | ".join((' '.join(pg_dump_params), + ' '.join(pg_restore_params))) + + if (preproc != None): + preproc(node) + + # transfer and restore data + p1 = subprocess.Popen(pg_dump_params, stdout=subprocess.PIPE) + stdoutdata, _ = p1.communicate() + p2 = subprocess.Popen( + pg_restore_params, + stdin=subprocess.PIPE, + stdout=FNULL, + stderr=FNULL) + p2.communicate(input=stdoutdata) + + if (postproc != None): + postproc(node) + + # validate data + with node.connect('initial') as con1, \ + node.connect('copy') as con2: + + # compare plans and contents of initial and copy + cmp_result = cmp_dbs(con1, con2) + self.assertNotEqual( + cmp_result, PLANS_MISMATCH, + "mismatch in plans of select query on partitioned tables under the command: %s" + % dump_restore_cmd) + self.assertNotEqual( + cmp_result, CONTENTS_MISMATCH, + "mismatch in contents of partitioned tables under the command: %s" + % dump_restore_cmd) + + # compare enable_parent flag and callback function + config_params_query = """ + select partrel, enable_parent, init_callback from pathman_config_params + """ + config_params_initial, config_params_copy = {}, {} + for row in con1.execute(config_params_query): + config_params_initial[row[0]] = row[1:] + for row in con2.execute(config_params_query): + config_params_copy[row[0]] = row[1:] + self.assertEqual(config_params_initial, config_params_copy, \ + "mismatch in pathman_config_params under the command: %s" % dump_restore_cmd) + + # compare constraints on each partition + constraints_query = """ + select r.relname, c.conname, c.consrc from + pg_constraint c join pg_class r on c.conrelid=r.oid + where relname similar to '(range|hash)_partitioned_\d+' + """ + constraints_initial, constraints_copy = {}, {} + for row in con1.execute(constraints_query): + constraints_initial[row[0]] = row[1:] + for row in con2.execute(constraints_query): + constraints_copy[row[0]] = row[1:] + self.assertEqual(constraints_initial, constraints_copy, \ + "mismatch in partitions' constraints under the command: %s" % dump_restore_cmd) + + # clear copy database + node.psql('copy', 'drop schema public cascade') + node.psql('copy', 'create schema public') + node.psql('copy', 'drop extension pg_pathman cascade') + + except: + raise + finally: + FNULL.close() + + # Stop instance and finish work + node.stop() + node.cleanup() + + def test_concurrent_detach(self): + """ + Test concurrent detach partition with contiguous + tuple inserting and spawning new partitions + """ + + # Init parameters + num_insert_workers = 8 + detach_timeout = 0.1 # time in sec between successive inserts and detachs + num_detachs = 100 # estimated number of detachs + inserts_advance = 1 # abvance in sec of inserts process under detachs + test_interval = int(math.ceil(detach_timeout * num_detachs)) + + insert_pgbench_script = os.path.dirname(os.path.realpath(__file__)) \ + + "/pgbench_scripts/insert_current_timestamp.pgbench" + detach_pgbench_script = os.path.dirname(os.path.realpath(__file__)) \ + + "/pgbench_scripts/detachs_in_timeout.pgbench" + + # Check pgbench scripts on existance + self.assertTrue( + os.path.isfile(insert_pgbench_script), + msg="pgbench script with insert timestamp doesn't exist") + self.assertTrue( + os.path.isfile(detach_pgbench_script), + msg="pgbench script with detach letfmost partition doesn't exist") + + # Create and start new instance + node = self.start_new_pathman_cluster(allows_streaming=False) + + # Create partitioned table for testing that spawns new partition on each next *detach_timeout* sec + with node.connect() as con0: + con0.begin() + con0.execute( + 'create table ts_range_partitioned(ts timestamp not null)') + + # yapf: disable + con0.execute(""" + select create_range_partitions('ts_range_partitioned', + 'ts', + current_timestamp, + interval '%f', + 1) + """ % detach_timeout) + con0.commit() + + # Run in background inserts and detachs processes + FNULL = open(os.devnull, 'w') + + # init pgbench's utility tables + init_pgbench = node.pgbench(stdout=FNULL, stderr=FNULL, options=["-i"]) + init_pgbench.wait() + + inserts = node.pgbench( + stdout=FNULL, + stderr=subprocess.PIPE, + options=[ + "-j", + "%i" % num_insert_workers, "-c", + "%i" % num_insert_workers, "-f", insert_pgbench_script, "-T", + "%i" % (test_interval + inserts_advance) + ]) + time.sleep(inserts_advance) + detachs = node.pgbench( + stdout=FNULL, + stderr=FNULL, + options=[ + "-D", + "timeout=%f" % detach_timeout, "-f", detach_pgbench_script, + "-T", + "%i" % test_interval + ]) + + # Wait for completion of processes + _, stderrdata = inserts.communicate() + detachs.wait() + + # Obtain error log from inserts process + self.assertIsNone( + re.search("ERROR|FATAL|PANIC", str(stderrdata)), + msg=""" + Race condition between detach and concurrent + inserts with append partition is expired + """) + + # Stop instance and finish work + node.stop() + node.cleanup() + FNULL.close() -if __name__ == "__main__": - unittest.main() +if __name__ == "__main__": + unittest.main() From ef953a337e3146ba75e644481a52710114286226 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Tue, 25 Jul 2017 17:28:02 +0300 Subject: [PATCH 096/528] minor fixes --- src/pl_funcs.c | 11 +++++------ src/planner_tree_modification.c | 1 - 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/src/pl_funcs.c b/src/pl_funcs.c index ba286020..6672f124 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -668,13 +668,12 @@ is_tuple_convertible(PG_FUNCTION_ARGS) void *map; /* we don't actually need it */ /* Try to build a conversion map */ - map = convert_tuples_by_name(RelationGetDescr(rel1), - RelationGetDescr(rel2), - ERR_PART_DESC_CONVERT); + map = convert_tuples_by_name_map(RelationGetDescr(rel1), + RelationGetDescr(rel2), + ERR_PART_DESC_CONVERT); - /* Now free map. Note that map can be NULL if conversion isn't needed */ - if (map) - pfree(map); + /* Now free map */ + pfree(map); } PG_CATCH(); { diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 2f82f7f6..246436e3 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -367,7 +367,6 @@ find_deepest_partition(Oid relid, Index idx, Expr *quals, Oid *partition) return FP_NON_SINGULAR_RESULT; /* Exit if there's no quals (no use) */ - /* TODO: What if there is only one partition? */ if (!quals) return FP_NON_SINGULAR_RESULT; From 063ea8e363550680afa45fffd9255f8c459b5eb0 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 26 Jul 2017 17:58:59 +0300 Subject: [PATCH 097/528] reduce diff with rel_future_beta --- src/partition_filter.c | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/src/partition_filter.c b/src/partition_filter.c index 668ca32e..323e6a8f 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -431,12 +431,12 @@ select_partition_for_insert(ExprContext *econtext, ExprState *expr_state, { MemoryContext old_mcxt; ResultRelInfoHolder *rri_holder; - Oid selected_partid = InvalidOid; + Oid parent_relid = PrelParentRelid(prel), + partition_relid = InvalidOid; Oid *parts; int nparts; bool isnull; Datum value; - Oid parent = PrelParentRelid(prel); /* Execute expression */ value = ExecEvalExprCompat(expr_state, econtext, &isnull, @@ -454,31 +454,31 @@ select_partition_for_insert(ExprContext *econtext, ExprState *expr_state, elog(ERROR, ERR_PART_ATTR_MULTIPLE); else if (nparts == 0) { - selected_partid = create_partitions_for_value(parent, + partition_relid = create_partitions_for_value(parent_relid, value, prel->ev_type); /* get_pathman_relation_info() will refresh this entry */ - invalidate_pathman_relation_info(parent, NULL); + invalidate_pathman_relation_info(parent_relid, NULL); } - else selected_partid = parts[0]; + else partition_relid = parts[0]; old_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); - rri_holder = scan_result_parts_storage(selected_partid, parts_storage); + rri_holder = scan_result_parts_storage(partition_relid, parts_storage); MemoryContextSwitchTo(old_mcxt); - /* Could not find suitable partition */ + /* This partition has been dropped, repeat with a new 'prel' */ if (rri_holder == NULL) { /* get_pathman_relation_info() will refresh this entry */ - invalidate_pathman_relation_info(parent, NULL); + invalidate_pathman_relation_info(parent_relid, NULL); /* Get a fresh PartRelationInfo */ - prel = get_pathman_relation_info(parent); + prel = get_pathman_relation_info(parent_relid); /* Paranoid check (all partitions have vanished) */ if (!prel) elog(ERROR, "table \"%s\" is not partitioned", - get_rel_name_or_relid(parent)); + get_rel_name_or_relid(parent_relid)); } /* If partition has subpartitions */ else if (rri_holder->has_subpartitions) @@ -486,7 +486,7 @@ select_partition_for_insert(ExprContext *econtext, ExprState *expr_state, const PartRelationInfo *subprel; /* Fetch PartRelationInfo for this partitioned relation */ - subprel = get_pathman_relation_info(selected_partid); + subprel = get_pathman_relation_info(partition_relid); Assert(subprel != NULL); /* Build an expression state if not yet */ @@ -685,7 +685,10 @@ partition_filter_exec(CustomScanState *node) tmp_slot = econtext->ecxt_scantuple; econtext->ecxt_scantuple = slot; - /* Search for a matching partition */ + /* + * Search for a matching partition. + * WARNING: 'prel' might change after this call! + */ rri_holder = select_partition_for_insert(econtext, state->expr_state, prel, &state->result_parts, estate); From 2dd61661fc17ae4a05ef397509af021a06149d1d Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 27 Jul 2017 16:19:05 +0300 Subject: [PATCH 098/528] Change cassert container --- make_images.py | 32 +++++++++++++++++++++++++------- 1 file changed, 25 insertions(+), 7 deletions(-) diff --git a/make_images.py b/make_images.py index dc01407e..4de7d40e 100755 --- a/make_images.py +++ b/make_images.py @@ -12,18 +12,36 @@ DOCKER_ID = 'pathman' ALPINE_BASE_URL = 'https://raw.githubusercontent.com/docker-library/postgres/master/10/alpine/' ALPINE_ENTRYPOINT = 'docker-entrypoint.sh' + +''' +How to create this patch: + 1) put `import ipdb; ipdb.set_trace()` in make_alpine_image, after `open(patch_name)..` + 2) run the script + 3) in temporary folder run `cp Dockerfile Dockerfile.1 && vim Dockerfile.1 && diff -Naur Dockerfile Dockerfile.1 > ./cassert.patch` + 4) contents of cassert.patch put to variable below + 5) change Dockerfile.1 to Dockerfile in text, change `\` symbols to `\\` +''' ALPINE_PATCH = b''' ---- Dockerfile 2017-07-25 12:43:20.424984422 +0300 -+++ Dockerfile 2017-07-25 12:46:10.279267520 +0300 -@@ -86,6 +86,7 @@ - --enable-integer-datetimes \\ +--- Dockerfile 2017-07-27 14:54:10.403971867 +0300 ++++ Dockerfile 2017-07-27 14:56:01.132503106 +0300 +@@ -79,7 +79,7 @@ + && wget -O config/config.sub 'https://git.savannah.gnu.org/cgit/config.git/plain/config.sub?id=7d3d27baf8107b630586c962c057e22149653deb' \\ + # configure options taken from: + # https://anonscm.debian.org/cgit/pkg-postgresql/postgresql.git/tree/debian/rules?h=9.5 +- && ./configure \\ ++ && CFLAGS="-O0" ./configure \\ + --build="$gnuArch" \\ + # "/usr/src/postgresql/src/backend/access/common/tupconvert.c:105: undefined reference to `libintl_gettext'" + # --enable-nls \\ +@@ -87,7 +87,7 @@ --enable-thread-safety \\ --enable-tap-tests \\ -+ --enable-cassert \\ # skip debugging info -- we want tiny size instead - # --enable-debug \\ +-# --enable-debug \\ ++ --enable-debug \\ --disable-rpath \\ - + --with-uuid=e2fs \\ + --with-gnu-ld \\ ''' CUSTOM_IMAGE_NAME = "%s/postgres_stable" % DOCKER_ID From 075d00cf9e446605f773fd48ec167069496be733 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 27 Jul 2017 16:20:16 +0300 Subject: [PATCH 099/528] reorder args of select_partition_for_insert(), eliminate tmp_slot --- src/include/partition_filter.h | 7 ++++--- src/partition_filter.c | 25 +++++++++++-------------- src/utility_stmt_hooking.c | 21 ++++++++++++--------- 3 files changed, 27 insertions(+), 26 deletions(-) diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index e053d2a5..00294050 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -135,10 +135,11 @@ Oid * find_partitions_for_value(Datum value, Oid value_type, const PartRelationInfo *prel, int *nparts); -ResultRelInfoHolder *select_partition_for_insert(ExprContext *econtext, ExprState *expr_state, +ResultRelInfoHolder *select_partition_for_insert(ExprState *expr_state, + ExprContext *econtext, + EState *estate, const PartRelationInfo *prel, - ResultPartsStorage *parts_storage, - EState *estate); + ResultPartsStorage *parts_storage); Plan * make_partition_filter(Plan *subplan, Oid parent_relid, diff --git a/src/partition_filter.c b/src/partition_filter.c index 323e6a8f..a046cd2b 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -424,10 +424,11 @@ find_partitions_for_value(Datum value, Oid value_type, * Smart wrapper for scan_result_parts_storage(). */ ResultRelInfoHolder * -select_partition_for_insert(ExprContext *econtext, ExprState *expr_state, +select_partition_for_insert(ExprState *expr_state, + ExprContext *econtext, + EState *estate, const PartRelationInfo *prel, - ResultPartsStorage *parts_storage, - EState *estate) + ResultPartsStorage *parts_storage) { MemoryContext old_mcxt; ResultRelInfoHolder *rri_holder; @@ -496,11 +497,9 @@ select_partition_for_insert(ExprContext *econtext, ExprState *expr_state, Assert(rri_holder->expr_state != NULL); /* Recursively search for subpartitions */ - rri_holder = select_partition_for_insert(econtext, - rri_holder->expr_state, - subprel, - parts_storage, - estate); + rri_holder = select_partition_for_insert(rri_holder->expr_state, + econtext, estate, + subprel, parts_storage); } } /* Loop until we get some result */ @@ -665,7 +664,6 @@ partition_filter_exec(CustomScanState *node) MemoryContext old_mcxt; const PartRelationInfo *prel; ResultRelInfoHolder *rri_holder; - TupleTableSlot *tmp_slot; /* Fetch PartRelationInfo for this partitioned relation */ prel = get_pathman_relation_info(state->partitioned_table); @@ -682,17 +680,16 @@ partition_filter_exec(CustomScanState *node) /* Switch to per-tuple context */ old_mcxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); - tmp_slot = econtext->ecxt_scantuple; + /* Store slot for expression evaluation */ econtext->ecxt_scantuple = slot; /* * Search for a matching partition. * WARNING: 'prel' might change after this call! */ - rri_holder = select_partition_for_insert(econtext, state->expr_state, prel, - &state->result_parts, estate); - - econtext->ecxt_scantuple = tmp_slot; + rri_holder = select_partition_for_insert(state->expr_state, + econtext, estate, + prel, &state->result_parts); /* Switch back and clean up per-tuple context */ MemoryContextSwitchTo(old_mcxt); diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 14bfb800..b9ae406e 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -608,8 +608,7 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, for (;;) { - TupleTableSlot *slot, - *tmp_slot; + TupleTableSlot *slot; bool skip_tuple; Oid tuple_oid = InvalidOid; @@ -637,7 +636,7 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, if (!NextCopyFrom(cstate, econtext, values, nulls, &tuple_oid)) break; - /* We can form the input tuple. */ + /* We can form the input tuple */ tuple = heap_form_tuple(tupDesc, values, nulls); if (tuple_oid != InvalidOid) @@ -648,15 +647,19 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, ExecSetSlotDescriptor(slot, tupDesc); ExecStoreTuple(tuple, slot, InvalidBuffer, false); - /* Execute expression */ - tmp_slot = econtext->ecxt_scantuple; + /* Store slot for expression evaluation */ econtext->ecxt_scantuple = slot; - /* Search for a matching partition */ - rri_holder = select_partition_for_insert(econtext, expr_state, prel, - &parts_storage, estate); - econtext->ecxt_scantuple = tmp_slot; + /* + * Search for a matching partition. + * WARNING: 'prel' might change after this call! + */ + rri_holder = select_partition_for_insert(expr_state, econtext, estate, + prel, &parts_storage); + child_result_rel = rri_holder->result_rel_info; + + /* Magic: replace parent's ResultRelInfo with ours */ estate->es_result_relation_info = child_result_rel; /* From c2e09f2f09a212428773898ad6fe781d9f08720d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 27 Jul 2017 16:59:30 +0300 Subject: [PATCH 100/528] small adjustments in append_child_relation() --- src/pg_pathman.c | 38 ++++++++++++++++++-------------------- 1 file changed, 18 insertions(+), 20 deletions(-) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index b3058fe2..a45e6e4d 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -379,7 +379,7 @@ append_child_relation(PlannerInfo *root, *child_rel; Relation child_relation; AppendRelInfo *appinfo; - Index childRTindex; + Index child_rti; PlanRowMark *child_rowmark; Node *childqual; List *childquals; @@ -415,22 +415,20 @@ append_child_relation(PlannerInfo *root, child_rte = copyObject(parent_rte); child_rte->relid = child_oid; child_rte->relkind = child_relation->rd_rel->relkind; - child_rte->requiredPerms = 0; /* perform all checks on parent */ - /* - * If it is the parent relation, then set inh flag to false to prevent - * further recursive unrolling. Else if relation is a child and has subclass - * then we will need to check if there are subpartitions - */ - child_rte->inh = (child_oid != parent_rte->relid) ? - child_relation->rd_rel->relhassubclass : false; + child_rte->requiredPerms = 0; /* perform all checks on parent */ + + /* Does this child have subpartitions? */ + child_rte->inh = (child_oid == parent_rte->relid) ? + false : /* it's a parent, skip */ + child_relation->rd_rel->relhassubclass; /* Add 'child_rte' to rtable and 'root->simple_rte_array' */ root->parse->rtable = lappend(root->parse->rtable, child_rte); - childRTindex = list_length(root->parse->rtable); - root->simple_rte_array[childRTindex] = child_rte; + child_rti = list_length(root->parse->rtable); + root->simple_rte_array[child_rti] = child_rte; /* Create RelOptInfo for this child (and make some estimates as well) */ - child_rel = build_simple_rel_compat(root, childRTindex, parent_rel); + child_rel = build_simple_rel_compat(root, child_rti, parent_rel); /* Increase total_table_pages using the 'child_rel' */ root->total_table_pages += (double) child_rel->pages; @@ -441,7 +439,7 @@ append_child_relation(PlannerInfo *root, { child_rowmark = makeNode(PlanRowMark); - child_rowmark->rti = childRTindex; + child_rowmark->rti = child_rti; child_rowmark->prti = parent_rti; child_rowmark->rowmarkId = parent_rowmark->rowmarkId; /* Reselect rowmark type, because relkind might not match parent */ @@ -469,14 +467,14 @@ append_child_relation(PlannerInfo *root, /* Build an AppendRelInfo for this child */ appinfo = makeNode(AppendRelInfo); appinfo->parent_relid = parent_rti; - appinfo->child_relid = childRTindex; + appinfo->child_relid = child_rti; appinfo->parent_reloid = parent_rte->relid; /* Store table row types for wholerow references */ appinfo->parent_reltype = RelationGetDescr(parent_relation)->tdtypeid; appinfo->child_reltype = RelationGetDescr(child_relation)->tdtypeid; - make_inh_translation_list(parent_relation, child_relation, childRTindex, + make_inh_translation_list(parent_relation, child_relation, child_rti, &appinfo->translated_vars); /* Now append 'appinfo' to 'root->append_rel_list' */ @@ -575,18 +573,18 @@ append_child_relation(PlannerInfo *root, /* Close child relations, but keep locks */ heap_close(child_relation, NoLock); - /* - * Recursively expand child partition if it has subpartitions - */ + /* Recursively expand child partition if it has subpartitions */ if (child_rte->inh) { + child_rte->inh = false; + pathman_rel_pathlist_hook(root, child_rel, - childRTindex, + child_rti, child_rte); } - return childRTindex; + return child_rti; } From c44284b8f40edbcd139ac5eb6dd490397ba573c8 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 27 Jul 2017 17:54:21 +0300 Subject: [PATCH 101/528] fixes in scan_result_parts_storage(), imporve pathman_subpartitions test --- expected/pathman_subpartitions.out | 233 +++++++++++++++-------------- sql/pathman_subpartitions.sql | 122 ++++++++------- src/include/partition_filter.h | 6 +- src/partition_filter.c | 22 +-- 4 files changed, 202 insertions(+), 181 deletions(-) diff --git a/expected/pathman_subpartitions.out b/expected/pathman_subpartitions.out index f4a2620d..e103b3d5 100644 --- a/expected/pathman_subpartitions.out +++ b/expected/pathman_subpartitions.out @@ -1,89 +1,90 @@ \set VERBOSITY terse CREATE EXTENSION pg_pathman; +CREATE SCHEMA subpartitions; /* Create two level partitioning structure */ -CREATE TABLE abc(a INTEGER NOT NULL, b INTEGER NOT NULL); -INSERT INTO abc SELECT i, i FROM generate_series(1, 200, 20) as i; -SELECT create_range_partitions('abc', 'a', 0, 100, 2); +CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); +INSERT INTO subpartitions.abc SELECT i, i FROM generate_series(1, 200, 20) as i; +SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); create_range_partitions ------------------------- 2 (1 row) -SELECT create_hash_partitions('abc_1', 'a', 3); +SELECT create_hash_partitions('subpartitions.abc_1', 'a', 3); create_hash_partitions ------------------------ 3 (1 row) -SELECT create_hash_partitions('abc_2', 'b', 2); +SELECT create_hash_partitions('subpartitions.abc_2', 'b', 2); create_hash_partitions ------------------------ 2 (1 row) SELECT * FROM pathman_partition_list; - parent | partition | parttype | expr | range_min | range_max ---------+-----------+----------+------+-----------+----------- - abc | abc_1 | 2 | a | 0 | 100 - abc | abc_2 | 2 | a | 100 | 200 - abc_1 | abc_1_0 | 1 | a | | - abc_1 | abc_1_1 | 1 | a | | - abc_1 | abc_1_2 | 1 | a | | - abc_2 | abc_2_0 | 1 | b | | - abc_2 | abc_2_1 | 1 | b | | + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc | subpartitions.abc_1 | 2 | a | 0 | 100 + subpartitions.abc | subpartitions.abc_2 | 2 | a | 100 | 200 + subpartitions.abc_1 | subpartitions.abc_1_0 | 1 | a | | + subpartitions.abc_1 | subpartitions.abc_1_1 | 1 | a | | + subpartitions.abc_1 | subpartitions.abc_1_2 | 1 | a | | + subpartitions.abc_2 | subpartitions.abc_2_0 | 1 | b | | + subpartitions.abc_2 | subpartitions.abc_2_1 | 1 | b | | (7 rows) -SELECT tableoid::regclass, * FROM abc; - tableoid | a | b -----------+-----+----- - abc_1_0 | 21 | 21 - abc_1_0 | 61 | 61 - abc_1_1 | 41 | 41 - abc_1_2 | 1 | 1 - abc_1_2 | 81 | 81 - abc_2_0 | 101 | 101 - abc_2_0 | 141 | 141 - abc_2_1 | 121 | 121 - abc_2_1 | 161 | 161 - abc_2_1 | 181 | 181 +SELECT tableoid::regclass, * FROM subpartitions.abc; + tableoid | a | b +-----------------------+-----+----- + subpartitions.abc_1_0 | 21 | 21 + subpartitions.abc_1_0 | 61 | 61 + subpartitions.abc_1_1 | 41 | 41 + subpartitions.abc_1_2 | 1 | 1 + subpartitions.abc_1_2 | 81 | 81 + subpartitions.abc_2_0 | 101 | 101 + subpartitions.abc_2_0 | 141 | 141 + subpartitions.abc_2_1 | 121 | 121 + subpartitions.abc_2_1 | 161 | 161 + subpartitions.abc_2_1 | 181 | 181 (10 rows) -/* Insert should result in creating of new subpartition */ -SELECT append_range_partition('abc', 'abc_3'); +/* Insert should result in creation of new subpartition */ +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); append_range_partition ------------------------ - abc_3 + subpartitions.abc_3 (1 row) -SELECT create_range_partitions('abc_3', 'b', 200, 10, 2); +SELECT create_range_partitions('subpartitions.abc_3', 'b', 200, 10, 2); create_range_partitions ------------------------- 2 (1 row) -SELECT * FROM pathman_partition_list WHERE parent = 'abc_3'::regclass; - parent | partition | parttype | expr | range_min | range_max ---------+-----------+----------+------+-----------+----------- - abc_3 | abc_3_1 | 2 | b | 200 | 210 - abc_3 | abc_3_2 | 2 | b | 210 | 220 +SELECT * FROM pathman_partition_list WHERE parent = 'subpartitions.abc_3'::regclass; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc_3 | subpartitions.abc_3_1 | 2 | b | 200 | 210 + subpartitions.abc_3 | subpartitions.abc_3_2 | 2 | b | 210 | 220 (2 rows) -INSERT INTO abc VALUES (215, 215); -SELECT * FROM pathman_partition_list WHERE parent = 'abc_3'::regclass; - parent | partition | parttype | expr | range_min | range_max ---------+-----------+----------+------+-----------+----------- - abc_3 | abc_3_1 | 2 | b | 200 | 210 - abc_3 | abc_3_2 | 2 | b | 210 | 220 +INSERT INTO subpartitions.abc VALUES (215, 215); +SELECT * FROM pathman_partition_list WHERE parent = 'subpartitions.abc_3'::regclass; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc_3 | subpartitions.abc_3_1 | 2 | b | 200 | 210 + subpartitions.abc_3 | subpartitions.abc_3_2 | 2 | b | 210 | 220 (2 rows) -SELECT tableoid::regclass, * FROM abc WHERE a = 215 AND b = 215; - tableoid | a | b -----------+-----+----- - abc_3_2 | 215 | 215 +SELECT tableoid::regclass, * FROM subpartitions.abc WHERE a = 215 AND b = 215; + tableoid | a | b +-----------------------+-----+----- + subpartitions.abc_3_2 | 215 | 215 (1 row) /* Pruning tests */ -EXPLAIN (COSTS OFF) SELECT * FROM abc WHERE a < 150; +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a < 150; QUERY PLAN --------------------------------- Append @@ -98,7 +99,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM abc WHERE a < 150; Filter: (a < 150) (10 rows) -EXPLAIN (COSTS OFF) SELECT * FROM abc WHERE b = 215; +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE b = 215; QUERY PLAN --------------------------------- Append @@ -117,7 +118,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM abc WHERE b = 215; Filter: (b = 215) (14 rows) -EXPLAIN (COSTS OFF) SELECT * FROM abc WHERE a = 215 AND b = 215; +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a = 215 AND b = 215; QUERY PLAN ------------------------------------------------- Append @@ -126,7 +127,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM abc WHERE a = 215 AND b = 215; Filter: ((a = 215) AND (b = 215)) (4 rows) -EXPLAIN (COSTS OFF) SELECT * FROM abc WHERE a >= 210 and b >= 210; +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a >= 210 AND b >= 210; QUERY PLAN ---------------------------------- Append @@ -136,7 +137,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM abc WHERE a >= 210 and b >= 210; (4 rows) /* Multilevel partitioning with update triggers */ -CREATE OR REPLACE FUNCTION partitions_tree(rel REGCLASS) +CREATE OR REPLACE FUNCTION subpartitions.partitions_tree(rel REGCLASS) RETURNS SETOF REGCLASS AS $$ DECLARE @@ -151,14 +152,14 @@ BEGIN FOR partition IN (SELECT l.partition FROM pathman_partition_list l WHERE parent = rel) LOOP - FOR subpartition IN (SELECT partitions_tree(partition)) + FOR subpartition IN (SELECT subpartitions.partitions_tree(partition)) LOOP RETURN NEXT subpartition; END LOOP; END LOOP; END $$ LANGUAGE plpgsql; -CREATE OR REPLACE FUNCTION get_triggers(rel REGCLASS) +CREATE OR REPLACE FUNCTION subpartitions.get_triggers(rel REGCLASS) RETURNS SETOF TEXT AS $$ DECLARE @@ -172,124 +173,128 @@ BEGIN RETURN; END; $$ LANGUAGE plpgsql; -SELECT create_update_triggers('abc_1'); /* Cannot perform on partition */ +SELECT create_update_triggers('subpartitions.abc_1'); /* Cannot perform on partition */ ERROR: Parent table must have an update trigger -SELECT create_update_triggers('abc'); /* Only on parent */ +SELECT create_update_triggers('subpartitions.abc'); /* Can perform on parent */ create_update_triggers ------------------------ (1 row) -SELECT p, get_triggers(p) FROM partitions_tree('abc') as p; - p | get_triggers ----------+----------------------------------------------------------------------------------------------------------------------------- - abc | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON abc FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - abc_1 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON abc_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - abc_1 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON abc_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - abc_1_0 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON abc_1_0 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - abc_1_1 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON abc_1_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - abc_1_2 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON abc_1_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - abc_2 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON abc_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - abc_2 | CREATE TRIGGER abc_2_upd_trig BEFORE UPDATE OF b, a ON abc_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - abc_2_0 | CREATE TRIGGER abc_2_upd_trig BEFORE UPDATE OF b, a ON abc_2_0 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - abc_2_1 | CREATE TRIGGER abc_2_upd_trig BEFORE UPDATE OF b, a ON abc_2_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - abc_3 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON abc_3 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - abc_3 | CREATE TRIGGER abc_3_upd_trig BEFORE UPDATE OF b, a ON abc_3 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - abc_3_1 | CREATE TRIGGER abc_3_upd_trig BEFORE UPDATE OF b, a ON abc_3_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - abc_3_2 | CREATE TRIGGER abc_3_upd_trig BEFORE UPDATE OF b, a ON abc_3_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() +SELECT p, subpartitions.get_triggers(p) +FROM subpartitions.partitions_tree('subpartitions.abc') as p; + p | get_triggers +-----------------------+------------------------------------------------------------------------------------------------------------------------------------------- + subpartitions.abc | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_1 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_1 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_1_0 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_1_0 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_1_1 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_1_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_1_2 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_1_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_2 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_2 | CREATE TRIGGER abc_2_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_2_0 | CREATE TRIGGER abc_2_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_2_0 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_2_1 | CREATE TRIGGER abc_2_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_2_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_3 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_3 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_3 | CREATE TRIGGER abc_3_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_3 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_3_1 | CREATE TRIGGER abc_3_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_3_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_3_2 | CREATE TRIGGER abc_3_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_3_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() (14 rows) -SELECT append_range_partition('abc', 'abc_4'); +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_4'); append_range_partition ------------------------ - abc_4 + subpartitions.abc_4 (1 row) -SELECT create_hash_partitions('abc_4', 'b', 2); /* Triggers should automatically +SELECT create_hash_partitions('subpartitions.abc_4', 'b', 2); create_hash_partitions ------------------------ 2 (1 row) - * be created on subpartitions */ -SELECT p, get_triggers(p) FROM partitions_tree('abc_4') as p; - p | get_triggers ----------+----------------------------------------------------------------------------------------------------------------------------- - abc_4 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON abc_4 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - abc_4 | CREATE TRIGGER abc_4_upd_trig BEFORE UPDATE OF b, a ON abc_4 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - abc_4_0 | CREATE TRIGGER abc_4_upd_trig BEFORE UPDATE OF b, a ON abc_4_0 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - abc_4_1 | CREATE TRIGGER abc_4_upd_trig BEFORE UPDATE OF b, a ON abc_4_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() +SELECT p, subpartitions.get_triggers(p) +FROM subpartitions.partitions_tree('subpartitions.abc_4') as p; + p | get_triggers +-----------------------+------------------------------------------------------------------------------------------------------------------------------------------- + subpartitions.abc_4 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_4 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_4 | CREATE TRIGGER abc_4_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_4 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_4_0 | CREATE TRIGGER abc_4_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_4_0 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_4_1 | CREATE TRIGGER abc_4_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_4_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() (4 rows) -SELECT drop_triggers('abc_1'); /* Cannot perform on partition */ +SELECT drop_triggers('subpartitions.abc_1'); /* Cannot perform on partition */ ERROR: Parent table must not have an update trigger -SELECT drop_triggers('abc'); /* Only on parent */ +SELECT drop_triggers('subpartitions.abc'); /* Can perform on parent */ drop_triggers --------------- (1 row) -SELECT p, get_triggers(p) FROM partitions_tree('abc') as p; /* No partitions */ +SELECT p, subpartitions.get_triggers(p) +FROM subpartitions.partitions_tree('subpartitions.abc') as p; p | get_triggers ---+-------------- (0 rows) -DROP TABLE abc CASCADE; +DROP TABLE subpartitions.abc CASCADE; NOTICE: drop cascades to 15 other objects -/* Test that update trigger words correclty */ -CREATE TABLE abc(a INTEGER NOT NULL, b INTEGER NOT NULL); -SELECT create_range_partitions('abc', 'a', 0, 100, 2); +/* Test that update trigger works correctly */ +CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); +SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); create_range_partitions ------------------------- 2 (1 row) -SELECT create_range_partitions('abc_1', 'b', 0, 50, 2); +SELECT create_range_partitions('subpartitions.abc_1', 'b', 0, 50, 2); create_range_partitions ------------------------- 2 (1 row) -SELECT create_range_partitions('abc_2', 'b', 0, 50, 2); +SELECT create_range_partitions('subpartitions.abc_2', 'b', 0, 50, 2); create_range_partitions ------------------------- 2 (1 row) -SELECT create_update_triggers('abc'); +SELECT create_update_triggers('subpartitions.abc'); create_update_triggers ------------------------ (1 row) -INSERT INTO abc VALUES (25, 25); /* Should get into abc_1_1 */ -SELECT tableoid::regclass, * FROM abc; - tableoid | a | b -----------+----+---- - abc_1_1 | 25 | 25 +INSERT INTO subpartitions.abc VALUES (25, 25); +SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitions.abc_1_1 */ + tableoid | a | b +-----------------------+----+---- + subpartitions.abc_1_1 | 25 | 25 (1 row) -UPDATE abc SET a = 125 WHERE a = 25 and b = 25; -SELECT tableoid::regclass, * FROM abc; /* Should be in abc_2_1 */ - tableoid | a | b -----------+-----+---- - abc_2_1 | 125 | 25 +UPDATE subpartitions.abc SET a = 125 WHERE a = 25 and b = 25; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitions.abc_2_1 */ + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_1 | 125 | 25 (1 row) -UPDATE abc SET b = 75 WHERE a = 125 and b = 25; -SELECT tableoid::regclass, * FROM abc; /* Should be in abc_2_2 */ - tableoid | a | b -----------+-----+---- - abc_2_2 | 125 | 75 +UPDATE subpartitions.abc SET b = 75 WHERE a = 125 and b = 25; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitions.abc_2_2 */ + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_2 | 125 | 75 (1 row) -UPDATE abc SET b = 125 WHERE a = 125 and b = 75; -SELECT tableoid::regclass, * FROM abc; /* Should create partition abc_2_3 */ - tableoid | a | b -----------+-----+----- - abc_2_3 | 125 | 125 +UPDATE subpartitions.abc SET b = 125 WHERE a = 125 and b = 75; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should create subpartitions.abc_2_3 */ + tableoid | a | b +-----------------------+-----+----- + subpartitions.abc_2_3 | 125 | 125 (1 row) -DROP TABLE abc CASCADE; +DROP TABLE subpartitions.abc CASCADE; NOTICE: drop cascades to 10 other objects +DROP SCHEMA subpartitions CASCADE; +NOTICE: drop cascades to 2 other objects DROP EXTENSION pg_pathman; diff --git a/sql/pathman_subpartitions.sql b/sql/pathman_subpartitions.sql index 4cf5d1a1..6f8d035c 100644 --- a/sql/pathman_subpartitions.sql +++ b/sql/pathman_subpartitions.sql @@ -1,32 +1,37 @@ \set VERBOSITY terse CREATE EXTENSION pg_pathman; +CREATE SCHEMA subpartitions; + + /* Create two level partitioning structure */ -CREATE TABLE abc(a INTEGER NOT NULL, b INTEGER NOT NULL); -INSERT INTO abc SELECT i, i FROM generate_series(1, 200, 20) as i; -SELECT create_range_partitions('abc', 'a', 0, 100, 2); -SELECT create_hash_partitions('abc_1', 'a', 3); -SELECT create_hash_partitions('abc_2', 'b', 2); +CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); +INSERT INTO subpartitions.abc SELECT i, i FROM generate_series(1, 200, 20) as i; +SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); +SELECT create_hash_partitions('subpartitions.abc_1', 'a', 3); +SELECT create_hash_partitions('subpartitions.abc_2', 'b', 2); SELECT * FROM pathman_partition_list; -SELECT tableoid::regclass, * FROM abc; +SELECT tableoid::regclass, * FROM subpartitions.abc; -/* Insert should result in creating of new subpartition */ -SELECT append_range_partition('abc', 'abc_3'); -SELECT create_range_partitions('abc_3', 'b', 200, 10, 2); -SELECT * FROM pathman_partition_list WHERE parent = 'abc_3'::regclass; -INSERT INTO abc VALUES (215, 215); -SELECT * FROM pathman_partition_list WHERE parent = 'abc_3'::regclass; -SELECT tableoid::regclass, * FROM abc WHERE a = 215 AND b = 215; +/* Insert should result in creation of new subpartition */ +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); +SELECT create_range_partitions('subpartitions.abc_3', 'b', 200, 10, 2); +SELECT * FROM pathman_partition_list WHERE parent = 'subpartitions.abc_3'::regclass; +INSERT INTO subpartitions.abc VALUES (215, 215); +SELECT * FROM pathman_partition_list WHERE parent = 'subpartitions.abc_3'::regclass; +SELECT tableoid::regclass, * FROM subpartitions.abc WHERE a = 215 AND b = 215; /* Pruning tests */ -EXPLAIN (COSTS OFF) SELECT * FROM abc WHERE a < 150; -EXPLAIN (COSTS OFF) SELECT * FROM abc WHERE b = 215; -EXPLAIN (COSTS OFF) SELECT * FROM abc WHERE a = 215 AND b = 215; -EXPLAIN (COSTS OFF) SELECT * FROM abc WHERE a >= 210 and b >= 210; +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a < 150; +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE b = 215; +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a = 215 AND b = 215; +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a >= 210 AND b >= 210; + + /* Multilevel partitioning with update triggers */ -CREATE OR REPLACE FUNCTION partitions_tree(rel REGCLASS) +CREATE OR REPLACE FUNCTION subpartitions.partitions_tree(rel REGCLASS) RETURNS SETOF REGCLASS AS $$ DECLARE @@ -41,7 +46,7 @@ BEGIN FOR partition IN (SELECT l.partition FROM pathman_partition_list l WHERE parent = rel) LOOP - FOR subpartition IN (SELECT partitions_tree(partition)) + FOR subpartition IN (SELECT subpartitions.partitions_tree(partition)) LOOP RETURN NEXT subpartition; END LOOP; @@ -49,7 +54,7 @@ BEGIN END $$ LANGUAGE plpgsql; -CREATE OR REPLACE FUNCTION get_triggers(rel REGCLASS) +CREATE OR REPLACE FUNCTION subpartitions.get_triggers(rel REGCLASS) RETURNS SETOF TEXT AS $$ DECLARE @@ -64,36 +69,47 @@ BEGIN END; $$ LANGUAGE plpgsql; -SELECT create_update_triggers('abc_1'); /* Cannot perform on partition */ -SELECT create_update_triggers('abc'); /* Only on parent */ -SELECT p, get_triggers(p) FROM partitions_tree('abc') as p; - -SELECT append_range_partition('abc', 'abc_4'); -SELECT create_hash_partitions('abc_4', 'b', 2); /* Triggers should automatically - * be created on subpartitions */ -SELECT p, get_triggers(p) FROM partitions_tree('abc_4') as p; -SELECT drop_triggers('abc_1'); /* Cannot perform on partition */ -SELECT drop_triggers('abc'); /* Only on parent */ -SELECT p, get_triggers(p) FROM partitions_tree('abc') as p; /* No partitions */ - -DROP TABLE abc CASCADE; - -/* Test that update trigger words correclty */ -CREATE TABLE abc(a INTEGER NOT NULL, b INTEGER NOT NULL); -SELECT create_range_partitions('abc', 'a', 0, 100, 2); -SELECT create_range_partitions('abc_1', 'b', 0, 50, 2); -SELECT create_range_partitions('abc_2', 'b', 0, 50, 2); -SELECT create_update_triggers('abc'); - -INSERT INTO abc VALUES (25, 25); /* Should get into abc_1_1 */ -SELECT tableoid::regclass, * FROM abc; -UPDATE abc SET a = 125 WHERE a = 25 and b = 25; -SELECT tableoid::regclass, * FROM abc; /* Should be in abc_2_1 */ -UPDATE abc SET b = 75 WHERE a = 125 and b = 25; -SELECT tableoid::regclass, * FROM abc; /* Should be in abc_2_2 */ -UPDATE abc SET b = 125 WHERE a = 125 and b = 75; -SELECT tableoid::regclass, * FROM abc; /* Should create partition abc_2_3 */ - -DROP TABLE abc CASCADE; - -DROP EXTENSION pg_pathman; \ No newline at end of file +SELECT create_update_triggers('subpartitions.abc_1'); /* Cannot perform on partition */ +SELECT create_update_triggers('subpartitions.abc'); /* Can perform on parent */ +SELECT p, subpartitions.get_triggers(p) +FROM subpartitions.partitions_tree('subpartitions.abc') as p; + +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_4'); +SELECT create_hash_partitions('subpartitions.abc_4', 'b', 2); +SELECT p, subpartitions.get_triggers(p) +FROM subpartitions.partitions_tree('subpartitions.abc_4') as p; + +SELECT drop_triggers('subpartitions.abc_1'); /* Cannot perform on partition */ +SELECT drop_triggers('subpartitions.abc'); /* Can perform on parent */ +SELECT p, subpartitions.get_triggers(p) +FROM subpartitions.partitions_tree('subpartitions.abc') as p; + +DROP TABLE subpartitions.abc CASCADE; + + + +/* Test that update trigger works correctly */ +CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); +SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); +SELECT create_range_partitions('subpartitions.abc_1', 'b', 0, 50, 2); +SELECT create_range_partitions('subpartitions.abc_2', 'b', 0, 50, 2); +SELECT create_update_triggers('subpartitions.abc'); + +INSERT INTO subpartitions.abc VALUES (25, 25); +SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitions.abc_1_1 */ + +UPDATE subpartitions.abc SET a = 125 WHERE a = 25 and b = 25; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitions.abc_2_1 */ + +UPDATE subpartitions.abc SET b = 75 WHERE a = 125 and b = 25; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitions.abc_2_2 */ + +UPDATE subpartitions.abc SET b = 125 WHERE a = 125 and b = 75; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should create subpartitions.abc_2_3 */ + +DROP TABLE subpartitions.abc CASCADE; + + + +DROP SCHEMA subpartitions CASCADE; +DROP EXTENSION pg_pathman; diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index 00294050..c20449ab 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -39,9 +39,9 @@ typedef struct { Oid partid; /* partition's relid */ ResultRelInfo *result_rel_info; /* cached ResultRelInfo */ - TupleConversionMap *tuple_map; /* tuple conversion map (parent => child) */ - bool has_subpartitions; - ExprState *expr_state; /* if has_subpartitions true */ + TupleConversionMap *tuple_map; /* tuple mapping (parent => child) */ + bool has_children; /* hint that it might have children */ + ExprState *expr_state; /* children have their own expressions */ } ResultRelInfoHolder; diff --git a/src/partition_filter.c b/src/partition_filter.c index a046cd2b..344f557a 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -327,8 +327,7 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) rri_holder->tuple_map = build_part_tuple_map(parent_rel, child_rel); /* Are there subpartitions? */ - rri_holder->has_subpartitions = - (get_pathman_relation_info(partid) != NULL); + rri_holder->has_children = child_rel->rd_rel->relhassubclass; rri_holder->expr_state = NULL; /* Call on_new_rri_holder_callback() if needed */ @@ -481,25 +480,26 @@ select_partition_for_insert(ExprState *expr_state, elog(ERROR, "table \"%s\" is not partitioned", get_rel_name_or_relid(parent_relid)); } - /* If partition has subpartitions */ - else if (rri_holder->has_subpartitions) + /* This partition might have sub-partitions */ + else if (rri_holder->has_children) { - const PartRelationInfo *subprel; + const PartRelationInfo *sub_prel; /* Fetch PartRelationInfo for this partitioned relation */ - subprel = get_pathman_relation_info(partition_relid); - Assert(subprel != NULL); + sub_prel = get_pathman_relation_info(partition_relid); + + /* Might be a false alarm */ + if (!sub_prel) + break; /* Build an expression state if not yet */ if (!rri_holder->expr_state) - rri_holder->expr_state = prepare_expr_state(subprel, estate); - - Assert(rri_holder->expr_state != NULL); + rri_holder->expr_state = prepare_expr_state(sub_prel, estate); /* Recursively search for subpartitions */ rri_holder = select_partition_for_insert(rri_holder->expr_state, econtext, estate, - subprel, parts_storage); + sub_prel, parts_storage); } } /* Loop until we get some result */ From 8467f5a0a682b7fec83bfa34581aa45f13401698 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 27 Jul 2017 18:02:28 +0300 Subject: [PATCH 102/528] make pathman_subpartitions more stable --- expected/pathman_subpartitions.out | 23 +++++++++++++---------- sql/pathman_subpartitions.sql | 13 ++++++++----- 2 files changed, 21 insertions(+), 15 deletions(-) diff --git a/expected/pathman_subpartitions.out b/expected/pathman_subpartitions.out index e103b3d5..bf31a580 100644 --- a/expected/pathman_subpartitions.out +++ b/expected/pathman_subpartitions.out @@ -34,17 +34,17 @@ SELECT * FROM pathman_partition_list; subpartitions.abc_2 | subpartitions.abc_2_1 | 1 | b | | (7 rows) -SELECT tableoid::regclass, * FROM subpartitions.abc; +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; tableoid | a | b -----------------------+-----+----- + subpartitions.abc_1_2 | 1 | 1 subpartitions.abc_1_0 | 21 | 21 - subpartitions.abc_1_0 | 61 | 61 subpartitions.abc_1_1 | 41 | 41 - subpartitions.abc_1_2 | 1 | 1 + subpartitions.abc_1_0 | 61 | 61 subpartitions.abc_1_2 | 81 | 81 subpartitions.abc_2_0 | 101 | 101 - subpartitions.abc_2_0 | 141 | 141 subpartitions.abc_2_1 | 121 | 121 + subpartitions.abc_2_0 | 141 | 141 subpartitions.abc_2_1 | 161 | 161 subpartitions.abc_2_1 | 181 | 181 (10 rows) @@ -77,7 +77,7 @@ SELECT * FROM pathman_partition_list WHERE parent = 'subpartitions.abc_3'::regcl subpartitions.abc_3 | subpartitions.abc_3_2 | 2 | b | 210 | 220 (2 rows) -SELECT tableoid::regclass, * FROM subpartitions.abc WHERE a = 215 AND b = 215; +SELECT tableoid::regclass, * FROM subpartitions.abc WHERE a = 215 AND b = 215 ORDER BY a, b; tableoid | a | b -----------------------+-----+----- subpartitions.abc_3_2 | 215 | 215 @@ -182,17 +182,18 @@ SELECT create_update_triggers('subpartitions.abc'); /* Can perform on parent */ (1 row) SELECT p, subpartitions.get_triggers(p) -FROM subpartitions.partitions_tree('subpartitions.abc') as p; +FROM subpartitions.partitions_tree('subpartitions.abc') as p +ORDER BY p; p | get_triggers -----------------------+------------------------------------------------------------------------------------------------------------------------------------------- subpartitions.abc | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() subpartitions.abc_1 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() subpartitions.abc_1 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_2 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_2 | CREATE TRIGGER abc_2_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() subpartitions.abc_1_0 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_1_0 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() subpartitions.abc_1_1 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_1_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() subpartitions.abc_1_2 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_1_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - subpartitions.abc_2 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - subpartitions.abc_2 | CREATE TRIGGER abc_2_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() subpartitions.abc_2_0 | CREATE TRIGGER abc_2_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_2_0 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() subpartitions.abc_2_1 | CREATE TRIGGER abc_2_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_2_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() subpartitions.abc_3 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_3 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() @@ -214,7 +215,8 @@ SELECT create_hash_partitions('subpartitions.abc_4', 'b', 2); (1 row) SELECT p, subpartitions.get_triggers(p) -FROM subpartitions.partitions_tree('subpartitions.abc_4') as p; +FROM subpartitions.partitions_tree('subpartitions.abc_4') as p +ORDER BY p; p | get_triggers -----------------------+------------------------------------------------------------------------------------------------------------------------------------------- subpartitions.abc_4 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_4 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() @@ -232,7 +234,8 @@ SELECT drop_triggers('subpartitions.abc'); /* Can perform on parent */ (1 row) SELECT p, subpartitions.get_triggers(p) -FROM subpartitions.partitions_tree('subpartitions.abc') as p; +FROM subpartitions.partitions_tree('subpartitions.abc') as p +ORDER BY p; p | get_triggers ---+-------------- (0 rows) diff --git a/sql/pathman_subpartitions.sql b/sql/pathman_subpartitions.sql index 6f8d035c..b1a79874 100644 --- a/sql/pathman_subpartitions.sql +++ b/sql/pathman_subpartitions.sql @@ -12,7 +12,7 @@ SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); SELECT create_hash_partitions('subpartitions.abc_1', 'a', 3); SELECT create_hash_partitions('subpartitions.abc_2', 'b', 2); SELECT * FROM pathman_partition_list; -SELECT tableoid::regclass, * FROM subpartitions.abc; +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; /* Insert should result in creation of new subpartition */ SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); @@ -20,7 +20,7 @@ SELECT create_range_partitions('subpartitions.abc_3', 'b', 200, 10, 2); SELECT * FROM pathman_partition_list WHERE parent = 'subpartitions.abc_3'::regclass; INSERT INTO subpartitions.abc VALUES (215, 215); SELECT * FROM pathman_partition_list WHERE parent = 'subpartitions.abc_3'::regclass; -SELECT tableoid::regclass, * FROM subpartitions.abc WHERE a = 215 AND b = 215; +SELECT tableoid::regclass, * FROM subpartitions.abc WHERE a = 215 AND b = 215 ORDER BY a, b; /* Pruning tests */ EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a < 150; @@ -72,17 +72,20 @@ $$ LANGUAGE plpgsql; SELECT create_update_triggers('subpartitions.abc_1'); /* Cannot perform on partition */ SELECT create_update_triggers('subpartitions.abc'); /* Can perform on parent */ SELECT p, subpartitions.get_triggers(p) -FROM subpartitions.partitions_tree('subpartitions.abc') as p; +FROM subpartitions.partitions_tree('subpartitions.abc') as p +ORDER BY p; SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_4'); SELECT create_hash_partitions('subpartitions.abc_4', 'b', 2); SELECT p, subpartitions.get_triggers(p) -FROM subpartitions.partitions_tree('subpartitions.abc_4') as p; +FROM subpartitions.partitions_tree('subpartitions.abc_4') as p +ORDER BY p; SELECT drop_triggers('subpartitions.abc_1'); /* Cannot perform on partition */ SELECT drop_triggers('subpartitions.abc'); /* Can perform on parent */ SELECT p, subpartitions.get_triggers(p) -FROM subpartitions.partitions_tree('subpartitions.abc') as p; +FROM subpartitions.partitions_tree('subpartitions.abc') as p +ORDER BY p; DROP TABLE subpartitions.abc CASCADE; From b96b32e20fe30cd44466fa360f70154b4a66d0ea Mon Sep 17 00:00:00 2001 From: Ildus K Date: Fri, 28 Jul 2017 13:48:27 +0300 Subject: [PATCH 103/528] Fix cmocka tests --- src/debug_print.c | 19 ++++++++++--------- tests/cmocka/Makefile | 1 + 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/src/debug_print.c b/src/debug_print.c index 9734ca06..1a4ea417 100644 --- a/src/debug_print.c +++ b/src/debug_print.c @@ -105,10 +105,9 @@ irange_print(IndexRange irange) return str.data; } - -/* ---------------- - * printatt - * ---------------- +#ifndef CMOCKA_TESTS +/* + * Print attribute information */ static char * printatt(unsigned attributeId, @@ -127,9 +126,8 @@ printatt(unsigned attributeId, attributeP->attbyval ? 't' : 'f'); } -/* ---------------- - * debugtup - print one tuple for an interactive backend - * ---------------- +/* + * Print one tuple for an interactive backend */ static char * debugtup(TupleTableSlot *slot) @@ -170,6 +168,9 @@ debugtup(TupleTableSlot *slot) return result; } +/* + * Print contents of tuple slot + */ #ifdef __GNUC__ __attribute__((unused)) #endif @@ -186,8 +187,7 @@ slot_print(TupleTableSlot *slot) } /* - * rt_print - * return contents of range table + * Print contents of range table */ #ifdef __GNUC__ __attribute__((unused)) @@ -251,3 +251,4 @@ rt_print(const List *rtable) return str.data; #undef APPEND_STR } +#endif diff --git a/tests/cmocka/Makefile b/tests/cmocka/Makefile index e31e6d95..f79e2637 100644 --- a/tests/cmocka/Makefile +++ b/tests/cmocka/Makefile @@ -8,6 +8,7 @@ CFLAGS += $(shell $(PG_CONFIG) --cflags_sl) CFLAGS += $(shell $(PG_CONFIG) --cflags) CFLAGS += $(CFLAGS_SL) CFLAGS += $(PG_CPPFLAGS) +CFLAGS += -DCMOCKA_TESTS LDFLAGS += -lcmocka TEST_BIN = rangeset_tests From 41a5286cde5167cee8a370b749293e841975e98a Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 28 Jul 2017 14:26:34 +0300 Subject: [PATCH 104/528] simplify fini_result_parts_storage() --- src/partition_filter.c | 29 ++++++----------------------- 1 file changed, 6 insertions(+), 23 deletions(-) diff --git a/src/partition_filter.c b/src/partition_filter.c index f0edf76d..aa07f4ad 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -182,38 +182,21 @@ fini_result_parts_storage(ResultPartsStorage *parts_storage, bool close_rels) HASH_SEQ_STATUS stat; ResultRelInfoHolder *rri_holder; /* ResultRelInfo holder */ - /* Close partitions and free free conversion-related stuff */ - if (close_rels) + hash_seq_init(&stat, parts_storage->result_rels_table); + while ((rri_holder = (ResultRelInfoHolder *) hash_seq_search(&stat)) != NULL) { - hash_seq_init(&stat, parts_storage->result_rels_table); - while ((rri_holder = (ResultRelInfoHolder *) hash_seq_search(&stat)) != NULL) + /* Close partitions and indices */ + if (close_rels) { ExecCloseIndices(rri_holder->result_rel_info); heap_close(rri_holder->result_rel_info->ri_RelationDesc, parts_storage->heap_close_lock_mode); - - /* Skip if there's no map */ - if (!rri_holder->tuple_map) - continue; - - FreeTupleDesc(rri_holder->tuple_map->indesc); - FreeTupleDesc(rri_holder->tuple_map->outdesc); - - free_conversion_map(rri_holder->tuple_map); } - } - /* Else just free conversion-related stuff */ - else - { - hash_seq_init(&stat, parts_storage->result_rels_table); - while ((rri_holder = (ResultRelInfoHolder *) hash_seq_search(&stat)) != NULL) + /* Free conversion-related stuff */ + if (rri_holder->tuple_map) { - /* Skip if there's no map */ - if (!rri_holder->tuple_map) - continue; - FreeTupleDesc(rri_holder->tuple_map->indesc); FreeTupleDesc(rri_holder->tuple_map->outdesc); From 16eff6b0b3dd5b05f52189cbd588001a80910c55 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 28 Jul 2017 14:48:47 +0300 Subject: [PATCH 105/528] use PG_REGRESS_DIFF_OPTS --- run_tests.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/run_tests.sh b/run_tests.sh index 1b9d7a70..6622ae39 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -66,6 +66,7 @@ status=$? if [ $status -ne 0 ]; then cat /tmp/postgres.log; fi # run regression tests +export PG_REGRESS_DIFF_OPTS="-w -U3" # for alpine's diff (BusyBox) PGPORT=55435 make USE_PGXS=1 installcheck || status=$? # show diff if it exists From 9b36a4c3c8912abbcf7f889046c44171b359a9ad Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 28 Jul 2017 15:03:18 +0300 Subject: [PATCH 106/528] make trigger listing in pathman_subpartitions more stable --- expected/pathman_subpartitions.out | 28 ++++++++++++++-------------- sql/pathman_subpartitions.sql | 12 ++++++------ 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/expected/pathman_subpartitions.out b/expected/pathman_subpartitions.out index bf31a580..e6214f10 100644 --- a/expected/pathman_subpartitions.out +++ b/expected/pathman_subpartitions.out @@ -181,23 +181,23 @@ SELECT create_update_triggers('subpartitions.abc'); /* Can perform on parent */ (1 row) -SELECT p, subpartitions.get_triggers(p) +SELECT p, subpartitions.get_triggers(p) as trig FROM subpartitions.partitions_tree('subpartitions.abc') as p -ORDER BY p; - p | get_triggers +ORDER BY p, trig; + p | trig -----------------------+------------------------------------------------------------------------------------------------------------------------------------------- subpartitions.abc | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - subpartitions.abc_1 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() subpartitions.abc_1 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - subpartitions.abc_2 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_1 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() subpartitions.abc_2 | CREATE TRIGGER abc_2_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_2 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() subpartitions.abc_1_0 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_1_0 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() subpartitions.abc_1_1 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_1_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() subpartitions.abc_1_2 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_1_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() subpartitions.abc_2_0 | CREATE TRIGGER abc_2_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_2_0 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() subpartitions.abc_2_1 | CREATE TRIGGER abc_2_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_2_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - subpartitions.abc_3 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_3 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() subpartitions.abc_3 | CREATE TRIGGER abc_3_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_3 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_3 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_3 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() subpartitions.abc_3_1 | CREATE TRIGGER abc_3_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_3_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() subpartitions.abc_3_2 | CREATE TRIGGER abc_3_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_3_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() (14 rows) @@ -214,13 +214,13 @@ SELECT create_hash_partitions('subpartitions.abc_4', 'b', 2); 2 (1 row) -SELECT p, subpartitions.get_triggers(p) +SELECT p, subpartitions.get_triggers(p) as trig FROM subpartitions.partitions_tree('subpartitions.abc_4') as p -ORDER BY p; - p | get_triggers +ORDER BY p, trig; + p | trig -----------------------+------------------------------------------------------------------------------------------------------------------------------------------- - subpartitions.abc_4 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_4 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() subpartitions.abc_4 | CREATE TRIGGER abc_4_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_4 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_4 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_4 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() subpartitions.abc_4_0 | CREATE TRIGGER abc_4_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_4_0 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() subpartitions.abc_4_1 | CREATE TRIGGER abc_4_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_4_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() (4 rows) @@ -233,11 +233,11 @@ SELECT drop_triggers('subpartitions.abc'); /* Can perform on parent */ (1 row) -SELECT p, subpartitions.get_triggers(p) +SELECT p, subpartitions.get_triggers(p) as trig FROM subpartitions.partitions_tree('subpartitions.abc') as p -ORDER BY p; - p | get_triggers ----+-------------- +ORDER BY p, trig; + p | trig +---+------ (0 rows) DROP TABLE subpartitions.abc CASCADE; diff --git a/sql/pathman_subpartitions.sql b/sql/pathman_subpartitions.sql index b1a79874..00ae6a4a 100644 --- a/sql/pathman_subpartitions.sql +++ b/sql/pathman_subpartitions.sql @@ -71,21 +71,21 @@ $$ LANGUAGE plpgsql; SELECT create_update_triggers('subpartitions.abc_1'); /* Cannot perform on partition */ SELECT create_update_triggers('subpartitions.abc'); /* Can perform on parent */ -SELECT p, subpartitions.get_triggers(p) +SELECT p, subpartitions.get_triggers(p) as trig FROM subpartitions.partitions_tree('subpartitions.abc') as p -ORDER BY p; +ORDER BY p, trig; SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_4'); SELECT create_hash_partitions('subpartitions.abc_4', 'b', 2); -SELECT p, subpartitions.get_triggers(p) +SELECT p, subpartitions.get_triggers(p) as trig FROM subpartitions.partitions_tree('subpartitions.abc_4') as p -ORDER BY p; +ORDER BY p, trig; SELECT drop_triggers('subpartitions.abc_1'); /* Cannot perform on partition */ SELECT drop_triggers('subpartitions.abc'); /* Can perform on parent */ -SELECT p, subpartitions.get_triggers(p) +SELECT p, subpartitions.get_triggers(p) as trig FROM subpartitions.partitions_tree('subpartitions.abc') as p -ORDER BY p; +ORDER BY p, trig; DROP TABLE subpartitions.abc CASCADE; From 503f444435f5d8eb5be103ffeedd2d6b4de47d56 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 31 Jul 2017 15:48:23 +0300 Subject: [PATCH 107/528] Fix trigger related tests (WIP) --- expected/pathman_basic.out | 52 +----------------- expected/pathman_calamity.out | 84 ---------------------------- expected/pathman_expressions.out | 44 --------------- sql/pathman_basic.sql | 7 --- sql/pathman_calamity.sql | 30 ---------- sql/pathman_expressions.sql | 12 ---- src/hooks.c | 3 - src/include/partition_filter.h | 5 +- src/include/partition_update.h | 10 ++-- src/partition_filter.c | 23 ++------ src/partition_update.c | 94 ++++++++++++++++---------------- 11 files changed, 61 insertions(+), 303 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index c19e75ca..0a46ba3e 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1087,7 +1087,6 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-11-15' A (6 rows) SELECT pathman.detach_range_partition('test.range_rel_archive'); -NOTICE: trigger "range_rel_upd_trig" for relation "test.range_rel_archive" does not exist, skipping detach_range_partition ------------------------ test.range_rel_archive @@ -1227,7 +1226,6 @@ SELECT * FROM test.hash_rel WHERE id = 123; /* Test replacing hash partition */ CREATE TABLE test.hash_rel_extern (LIKE test.hash_rel INCLUDING ALL); SELECT pathman.replace_hash_partition('test.hash_rel_0', 'test.hash_rel_extern'); -NOTICE: trigger "hash_rel_upd_trig" for relation "test.hash_rel_0" does not exist, skipping replace_hash_partition ------------------------ test.hash_rel_extern @@ -1281,7 +1279,6 @@ CREATE TABLE test.hash_rel_wrong( id INTEGER NOT NULL, value INTEGER); SELECT pathman.replace_hash_partition('test.hash_rel_1', 'test.hash_rel_wrong'); -NOTICE: trigger "hash_rel_upd_trig" for relation "test.hash_rel_1" does not exist, skipping ERROR: column "value" in child table must be marked NOT NULL EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; QUERY PLAN @@ -1498,55 +1495,8 @@ SELECT * FROM test."TeSt"; 1 | 1 (3 rows) -SELECT pathman.create_update_triggers('test."TeSt"'); - create_update_triggers ------------------------- - -(1 row) - -UPDATE test."TeSt" SET a = 1; -SELECT * FROM test."TeSt"; - a | b ----+--- - 1 | 3 - 1 | 2 - 1 | 1 -(3 rows) - -SELECT * FROM test."TeSt" WHERE a = 1; - a | b ----+--- - 1 | 3 - 1 | 2 - 1 | 1 -(3 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM test."TeSt" WHERE a = 1; - QUERY PLAN ----------------------------- - Append - -> Seq Scan on "TeSt_2" - Filter: (a = 1) -(3 rows) - -SELECT pathman.drop_partitions('test."TeSt"'); -NOTICE: 0 rows copied from test."TeSt_0" -NOTICE: 0 rows copied from test."TeSt_1" -NOTICE: 3 rows copied from test."TeSt_2" - drop_partitions ------------------ - 3 -(1 row) - -SELECT * FROM test."TeSt"; - a | b ----+--- - 1 | 3 - 1 | 2 - 1 | 1 -(3 rows) - DROP TABLE test."TeSt" CASCADE; +NOTICE: drop cascades to 3 other objects CREATE TABLE test."RangeRel" ( id SERIAL PRIMARY KEY, dt TIMESTAMP NOT NULL, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 251ec31c..a75e4518 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -398,36 +398,6 @@ SELECT build_check_constraint_name(NULL) IS NULL; t (1 row) -/* check function build_update_trigger_name() */ -SELECT build_update_trigger_name('calamity.part_test'); /* OK */ - build_update_trigger_name ---------------------------- - part_test_upd_trig -(1 row) - -SELECT build_update_trigger_name(0::REGCLASS); /* not ok */ -ERROR: relation "0" does not exist -SELECT build_update_trigger_name(NULL) IS NULL; - ?column? ----------- - t -(1 row) - -/* check function build_update_trigger_func_name() */ -SELECT build_update_trigger_func_name('calamity.part_test'); /* OK */ - build_update_trigger_func_name ----------------------------------- - calamity.part_test_upd_trig_func -(1 row) - -SELECT build_update_trigger_func_name(0::REGCLASS); /* not ok */ -ERROR: relation "0" does not exist -SELECT build_update_trigger_func_name(NULL) IS NULL; - ?column? ----------- - t -(1 row) - /* check function build_sequence_name() */ SELECT build_sequence_name('calamity.part_test'); /* OK */ build_sequence_name @@ -512,14 +482,6 @@ WARNING: table "pg_class" is not partitioned (1 row) -SELECT has_update_trigger(NULL); - has_update_trigger --------------------- - -(1 row) - -SELECT has_update_trigger(0::REGCLASS); /* not ok */ -ERROR: relation "0" does not exist /* check invoke_on_partition_created_callback() */ CREATE FUNCTION calamity.dummy_cb(arg jsonb) RETURNS void AS $$ begin @@ -806,52 +768,6 @@ SELECT merge_range_partitions('{calamity.merge_test_a_1, ERROR: cannot merge partitions DROP TABLE calamity.merge_test_a,calamity.merge_test_b CASCADE; NOTICE: drop cascades to 6 other objects -/* check function drop_triggers() */ -CREATE TABLE calamity.trig_test_tbl(val INT4 NOT NULL); -SELECT create_hash_partitions('calamity.trig_test_tbl', 'val', 2); - create_hash_partitions ------------------------- - 2 -(1 row) - -SELECT create_update_triggers('calamity.trig_test_tbl'); - create_update_triggers ------------------------- - -(1 row) - -SELECT count(*) FROM pg_trigger WHERE tgrelid = 'calamity.trig_test_tbl'::REGCLASS; - count -------- - 1 -(1 row) - -SELECT count(*) FROM pg_trigger WHERE tgrelid = 'calamity.trig_test_tbl_1'::REGCLASS; - count -------- - 1 -(1 row) - -SELECT drop_triggers('calamity.trig_test_tbl'); /* OK */ - drop_triggers ---------------- - -(1 row) - -SELECT count(*) FROM pg_trigger WHERE tgrelid = 'calamity.trig_test_tbl'::REGCLASS; - count -------- - 0 -(1 row) - -SELECT count(*) FROM pg_trigger WHERE tgrelid = 'calamity.trig_test_tbl_1'::REGCLASS; - count -------- - 0 -(1 row) - -DROP TABLE calamity.trig_test_tbl CASCADE; -NOTICE: drop cascades to 2 other objects DROP SCHEMA calamity CASCADE; NOTICE: drop cascades to 15 other objects DROP EXTENSION pg_pathman; diff --git a/expected/pathman_expressions.out b/expected/pathman_expressions.out index 134fcae9..463ad584 100644 --- a/expected/pathman_expressions.out +++ b/expected/pathman_expressions.out @@ -425,50 +425,6 @@ EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.range_rel WHERE (AGE(dt, '2000-01-0 Filter: (age(dt, 'Sat Jan 01 00:00:00 2000'::timestamp without time zone) = '@ 18 years'::interval) (3 rows) -SELECT create_update_triggers('test_exprs.range_rel'); - create_update_triggers ------------------------- - -(1 row) - -SELECT COUNT(*) FROM test_exprs.range_rel; - count -------- - 65 -(1 row) - -SELECT COUNT(*) FROM test_exprs.range_rel_1; - count -------- - 12 -(1 row) - -SELECT COUNT(*) FROM test_exprs.range_rel_2; - count -------- - 12 -(1 row) - -UPDATE test_exprs.range_rel SET dt = '2016-12-01' WHERE dt >= '2015-10-10' AND dt <= '2017-10-10'; -/* counts in partitions should be changed */ -SELECT COUNT(*) FROM test_exprs.range_rel; - count -------- - 65 -(1 row) - -SELECT COUNT(*) FROM test_exprs.range_rel_1; - count -------- - 10 -(1 row) - -SELECT COUNT(*) FROM test_exprs.range_rel_2; - count -------- - 24 -(1 row) - DROP SCHEMA test_exprs CASCADE; NOTICE: drop cascades to 24 other objects DROP EXTENSION pg_pathman; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index d34285e5..85dd076b 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -455,13 +455,6 @@ INSERT INTO test."TeSt" VALUES (1, 1); INSERT INTO test."TeSt" VALUES (2, 2); INSERT INTO test."TeSt" VALUES (3, 3); SELECT * FROM test."TeSt"; -SELECT pathman.create_update_triggers('test."TeSt"'); -UPDATE test."TeSt" SET a = 1; -SELECT * FROM test."TeSt"; -SELECT * FROM test."TeSt" WHERE a = 1; -EXPLAIN (COSTS OFF) SELECT * FROM test."TeSt" WHERE a = 1; -SELECT pathman.drop_partitions('test."TeSt"'); -SELECT * FROM test."TeSt"; DROP TABLE test."TeSt" CASCADE; CREATE TABLE test."RangeRel" ( diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index 881cebbd..90c700a9 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -179,16 +179,6 @@ SELECT build_check_constraint_name('calamity.part_test'); /* OK */ SELECT build_check_constraint_name(0::REGCLASS); /* not ok */ SELECT build_check_constraint_name(NULL) IS NULL; -/* check function build_update_trigger_name() */ -SELECT build_update_trigger_name('calamity.part_test'); /* OK */ -SELECT build_update_trigger_name(0::REGCLASS); /* not ok */ -SELECT build_update_trigger_name(NULL) IS NULL; - -/* check function build_update_trigger_func_name() */ -SELECT build_update_trigger_func_name('calamity.part_test'); /* OK */ -SELECT build_update_trigger_func_name(0::REGCLASS); /* not ok */ -SELECT build_update_trigger_func_name(NULL) IS NULL; - /* check function build_sequence_name() */ SELECT build_sequence_name('calamity.part_test'); /* OK */ SELECT build_sequence_name(1::REGCLASS); /* not ok */ @@ -222,9 +212,6 @@ SELECT generate_range_bounds('1-jan-2017'::DATE, SELECT check_range_available(NULL, NULL::INT4, NULL); /* not ok */ SELECT check_range_available('pg_class', 1, 10); /* OK (not partitioned) */ -SELECT has_update_trigger(NULL); -SELECT has_update_trigger(0::REGCLASS); /* not ok */ - /* check invoke_on_partition_created_callback() */ CREATE FUNCTION calamity.dummy_cb(arg jsonb) RETURNS void AS $$ begin @@ -347,23 +334,6 @@ SELECT merge_range_partitions('{calamity.merge_test_a_1, DROP TABLE calamity.merge_test_a,calamity.merge_test_b CASCADE; - -/* check function drop_triggers() */ -CREATE TABLE calamity.trig_test_tbl(val INT4 NOT NULL); -SELECT create_hash_partitions('calamity.trig_test_tbl', 'val', 2); -SELECT create_update_triggers('calamity.trig_test_tbl'); - -SELECT count(*) FROM pg_trigger WHERE tgrelid = 'calamity.trig_test_tbl'::REGCLASS; -SELECT count(*) FROM pg_trigger WHERE tgrelid = 'calamity.trig_test_tbl_1'::REGCLASS; - -SELECT drop_triggers('calamity.trig_test_tbl'); /* OK */ - -SELECT count(*) FROM pg_trigger WHERE tgrelid = 'calamity.trig_test_tbl'::REGCLASS; -SELECT count(*) FROM pg_trigger WHERE tgrelid = 'calamity.trig_test_tbl_1'::REGCLASS; - -DROP TABLE calamity.trig_test_tbl CASCADE; - - DROP SCHEMA calamity CASCADE; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_expressions.sql b/sql/pathman_expressions.sql index 1c7f4dbe..46bceafb 100644 --- a/sql/pathman_expressions.sql +++ b/sql/pathman_expressions.sql @@ -168,17 +168,5 @@ INSERT INTO test_exprs.range_rel_6 (dt, txt) VALUES ('2020-01-01'::DATE, md5('as SELECT COUNT(*) FROM test_exprs.range_rel_6; EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.range_rel WHERE (AGE(dt, '2000-01-01'::DATE)) = '18 years'::interval; -SELECT create_update_triggers('test_exprs.range_rel'); -SELECT COUNT(*) FROM test_exprs.range_rel; -SELECT COUNT(*) FROM test_exprs.range_rel_1; -SELECT COUNT(*) FROM test_exprs.range_rel_2; -UPDATE test_exprs.range_rel SET dt = '2016-12-01' WHERE dt >= '2015-10-10' AND dt <= '2017-10-10'; - -/* counts in partitions should be changed */ -SELECT COUNT(*) FROM test_exprs.range_rel; -SELECT COUNT(*) FROM test_exprs.range_rel_1; -SELECT COUNT(*) FROM test_exprs.range_rel_2; - - DROP SCHEMA test_exprs CASCADE; DROP EXTENSION pg_pathman; diff --git a/src/hooks.c b/src/hooks.c index d732a317..f0714288 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -882,10 +882,7 @@ pathman_executor_hook(QueryDesc *queryDesc, ScanDirection direction, * We unset junkfilter to disable junk cleaning in * ExecModifyTable. We don't need junk cleaning because * there is possible modification of tuple in `partition_filter_exec` - * Same time we need this junkfilter in PartitionFilter - * nodes, so we save it in node. */ - cstate->saved_junkFilter = cstate->resultRelInfo->ri_junkFilter; cstate->resultRelInfo->ri_junkFilter = NULL; /* hack, change UPDATE operation to INSERT */ diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index 42344abc..ddb5b72f 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -40,8 +40,7 @@ typedef struct Oid partid; /* partition's relid */ ResultRelInfo *result_rel_info; /* cached ResultRelInfo */ TupleConversionMap *tuple_map; /* tuple mapping (parent => child) */ - JunkFilter *src_junkFilter; /* we keep junkfilter from scanned - ResultRelInfo here */ + JunkFilter *junkfilter; /* junkfilter for cached ResultRelInfo */ bool has_children; /* hint that it might have children */ ExprState *expr_state; /* children have their own expressions */ } ResultRelInfoHolder; @@ -102,7 +101,7 @@ typedef struct CmdType command_type; TupleTableSlot *subplan_slot; /* slot that was returned from subplan */ - JunkFilter *src_junkFilter; /* junkfilter for subplan_slot */ + JunkFilter *junkfilter; /* junkfilter for subplan_slot */ ExprContext *tup_convert_econtext; /* ExprContext for projections */ ExprState *expr_state; /* for partitioning expression */ diff --git a/src/include/partition_update.h b/src/include/partition_update.h index b82ec61a..01405b26 100644 --- a/src/include/partition_update.h +++ b/src/include/partition_update.h @@ -26,12 +26,12 @@ typedef struct PartitionUpdateState { - CustomScanState css; + CustomScanState css; - Oid partitioned_table; - ResultRelInfo *resultRelInfo; - JunkFilter *saved_junkFilter; - Plan *subplan; /* proxy variable to store subplan */ + Oid partitioned_table; + ResultRelInfo *resultRelInfo; + JunkFilter *junkfilter; + Plan *subplan; /* proxy variable to store subplan */ } PartitionUpdateState; extern bool pg_pathman_enable_partition_update; diff --git a/src/partition_filter.c b/src/partition_filter.c index 80442203..7f34104e 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -312,7 +312,11 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) /* Copy necessary fields from saved ResultRelInfo */ CopyToResultRelInfo(ri_WithCheckOptions); CopyToResultRelInfo(ri_WithCheckOptionExprs); - CopyToResultRelInfo(ri_junkFilter); + if (parts_storage->command_type != CMD_UPDATE) + CopyToResultRelInfo(ri_junkFilter); + else + child_result_rel_info->ri_junkFilter = NULL; + CopyToResultRelInfo(ri_projectReturning); CopyToResultRelInfo(ri_onConflictSetProj); CopyToResultRelInfo(ri_onConflictSetWhere); @@ -323,18 +327,6 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) /* Fill the ResultRelInfo holder */ rri_holder->partid = partid; rri_holder->result_rel_info = child_result_rel_info; - rri_holder->src_junkFilter = NULL; - - if (parts_storage->command_type == CMD_UPDATE) - { - JunkFilter *junkfilter = parts_storage->saved_rel_info->ri_junkFilter; - - /* we don't need junk cleaning in ExecModifyTable */ - child_result_rel_info->ri_junkFilter = NULL; - - /* instead we do junk filtering ourselves */ - rri_holder->src_junkFilter = junkfilter; - } /* Generate tuple transformation map and some other stuff */ rri_holder->tuple_map = build_part_tuple_map(base_rel, child_rel); @@ -691,6 +683,7 @@ partition_filter_begin(CustomScanState *node, EState *estate, int eflags) attr_map = build_attributes_map(prel, child_rel, &natts); expr = map_variable_attnos(expr, parent_varno, 0, attr_map, natts, &found_whole_row); + Assert(!found_whole_row); heap_close(child_rel, NoLock); } @@ -722,7 +715,6 @@ partition_filter_exec(CustomScanState *node) slot = ExecProcNode(child_ps); state->subplan_slot = slot; - state->src_junkFilter = NULL; /* Save original ResultRelInfo */ saved_resultRelInfo = estate->es_result_relation_info; @@ -774,9 +766,6 @@ partition_filter_exec(CustomScanState *node) /* Magic: replace parent's ResultRelInfo with ours */ estate->es_result_relation_info = resultRelInfo; - /* pass junkfilter to upper node */ - state->src_junkFilter = rri_holder->src_junkFilter; - /* If there's a transform map, rebuild the tuple */ if (rri_holder->tuple_map) { diff --git a/src/partition_update.c b/src/partition_update.c index 5d9e8dc4..93d44851 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -123,29 +123,19 @@ partition_update_exec(CustomScanState *node) TupleTableSlot *slot; PartitionUpdateState *state = (PartitionUpdateState *) node; - /* - * Restore junkfilter in base resultRelInfo, - * we do it because child's RelResultInfo expects its existence - * for proper initialization. - * Also we set jf_junkAttNo there, because - * it wasn't set in ModifyTable node initialization - */ - state->resultRelInfo->ri_junkFilter = state->saved_junkFilter; - /* execute PartitionFilter child node */ slot = ExecProcNode(child_ps); if (!TupIsNull(slot)) { - Datum datum; - char relkind; - ResultRelInfo *resultRelInfo, - *sourceRelInfo; - ItemPointer tupleid = NULL; - ItemPointerData tuple_ctid; - EPQState epqstate; - PartitionFilterState *child_state; - JunkFilter *junkfilter; + Datum datum; + ResultRelInfo *resultRelInfo, + *sourceRelInfo; + ItemPointer tupleid = NULL; + ItemPointerData tuple_ctid; + EPQState epqstate; + PartitionFilterState *child_state; + char relkind; child_state = (PartitionFilterState *) child_ps; Assert(child_state->command_type == CMD_UPDATE); @@ -154,41 +144,51 @@ partition_update_exec(CustomScanState *node) sourceRelInfo = child_state->result_parts.saved_rel_info; resultRelInfo = estate->es_result_relation_info; - junkfilter = child_state->src_junkFilter; - if (junkfilter != NULL) + /* we generate junkfilter, if it wasn't created before */ + if (state->junkfilter == NULL) + { + state->junkfilter = ExecInitJunkFilter(state->subplan->targetlist, + sourceRelInfo->ri_RelationDesc->rd_att->tdhasoid, + ExecInitExtraTupleSlot(estate)); + + state->junkfilter->jf_junkAttNo = ExecFindJunkAttribute(state->junkfilter, "ctid"); + if (!AttributeNumberIsValid(state->junkfilter->jf_junkAttNo)) + elog(ERROR, "could not find junk ctid column"); + } + + relkind = sourceRelInfo->ri_RelationDesc->rd_rel->relkind; + if (relkind == RELKIND_RELATION) { - relkind = sourceRelInfo->ri_RelationDesc->rd_rel->relkind; - if (relkind == RELKIND_RELATION) - { - bool isNull; - - datum = ExecGetJunkAttribute(child_state->subplan_slot, - junkfilter->jf_junkAttNo, &isNull); - /* shouldn't ever get a null result... */ - if (isNull) - elog(ERROR, "ctid is NULL"); - - tupleid = (ItemPointer) DatumGetPointer(datum); - tuple_ctid = *tupleid; /* be sure we don't free - * ctid!! */ - tupleid = &tuple_ctid; - } - else if (relkind == RELKIND_FOREIGN_TABLE) - elog(ERROR, "update node is not supported for foreign tables"); - else - elog(ERROR, "got unexpected type of relation for update"); - - /* - * Clean from junk attributes before INSERT, - * but only if slot wasn't converted in PartitionFilter - */ - if (TupIsNull(child_state->tup_convert_slot)) - slot = ExecFilterJunk(junkfilter, slot); + bool isNull; + + datum = ExecGetJunkAttribute(child_state->subplan_slot, + state->junkfilter->jf_junkAttNo, &isNull); + /* shouldn't ever get a null result... */ + if (isNull) + elog(ERROR, "ctid is NULL"); + + tupleid = (ItemPointer) DatumGetPointer(datum); + tuple_ctid = *tupleid; /* be sure we don't free + * ctid!! */ + tupleid = &tuple_ctid; } + else if (relkind == RELKIND_FOREIGN_TABLE) + elog(ERROR, "update node is not supported for foreign tables"); + else + elog(ERROR, "got unexpected type of relation for update"); + + /* + * Clean from junk attributes before INSERT, + * but only if slot wasn't converted in PartitionFilter + */ + if (TupIsNull(child_state->tup_convert_slot)) + slot = ExecFilterJunk(state->junkfilter, slot); /* Delete old tuple */ estate->es_result_relation_info = sourceRelInfo; + + Assert(tupleid != NULL); ExecDeleteInternal(tupleid, child_state->subplan_slot, &epqstate, estate); /* we've got the slot that can be inserted to child partition */ From 37f618ee14876c9299408ef0f38c160d4717008b Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 31 Jul 2017 16:53:43 +0300 Subject: [PATCH 108/528] Fix update nodes --- src/partition_filter.c | 68 ++++++++++++++++-------------------------- 1 file changed, 26 insertions(+), 42 deletions(-) diff --git a/src/partition_filter.c b/src/partition_filter.c index 7f34104e..884fbb12 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -625,70 +625,54 @@ void partition_filter_begin(CustomScanState *node, EState *estate, int eflags) { PartitionFilterState *state = (PartitionFilterState *) node; - const PartRelationInfo *prel; - Node *expr; - Index parent_varno = 1; - ListCell *lc; PlanState *child_state; /* It's convenient to store PlanState in 'custom_ps' */ child_state = ExecInitNode(state->subplan, estate, eflags); node->custom_ps = list_make1(child_state); - if (state->command_type == CMD_UPDATE) - parent_varno = ((Scan *) child_state->plan)->scanrelid; - else - { - Index varno = 1; - - foreach(lc, estate->es_range_table) - { - RangeTblEntry *entry = lfirst(lc); - - if (entry->relid == state->partitioned_table) - break; - - varno++; - } - - parent_varno = varno; - Assert(parent_varno <= list_length(estate->es_range_table)); - } - - if (state->expr_state == NULL) { /* Fetch PartRelationInfo for this partitioned relation */ prel = get_pathman_relation_info(state->partitioned_table); Assert(prel != NULL); - /* Change varno in expression Vars according to range table */ - Assert(parent_varno >= 1); - expr = PrelExpressionForRelid(prel, parent_varno); - - /* - * Also in updates we would operate with child relation, but - * expression expects varattnos like in base relation, so we map - * parent varattnos to child varattnos - */ + /* Prepare state for expression execution */ if (state->command_type == CMD_UPDATE) { - int natts; - bool found_whole_row; - AttrNumber *attr_map; - Oid child_relid = getrelid(parent_varno, estate->es_range_table); + int natts; + bool found_whole_row; + AttrNumber *attr_map; + MemoryContext old_mcxt; + + /* + * In UPDATE queries we would operate with child relation, but + * expression expects varattnos like in base relation, so we map + * parent varattnos to child varattnos + */ + + Index relno = ((Scan *) child_state->plan)->scanrelid; + Node *expr = PrelExpressionForRelid(prel, relno); + Oid child_relid = getrelid(relno, estate->es_range_table); Relation child_rel = heap_open(child_relid, NoLock); attr_map = build_attributes_map(prel, child_rel, &natts); - expr = map_variable_attnos(expr, parent_varno, 0, attr_map, natts, + expr = map_variable_attnos(expr, relno, 0, attr_map, natts, &found_whole_row); Assert(!found_whole_row); heap_close(child_rel, NoLock); - } - /* Prepare state for expression execution */ - state->expr_state = prepare_expr_state(prel, estate); + /* Prepare state for expression execution */ + old_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); + state->expr_state = ExecInitExpr((Expr *) expr, NULL); + MemoryContextSwitchTo(old_mcxt); + } + else + { + /* simple INSERT, expression based on parent attribute numbers */ + state->expr_state = prepare_expr_state(prel, estate); + } } /* Init ResultRelInfo cache */ From e8c708ca8edad5f63c073f9cbdc537b890fc9744 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 31 Jul 2017 17:01:25 +0300 Subject: [PATCH 109/528] Remove unused attributes from update node --- src/hooks.c | 10 +++------- src/include/partition_update.h | 1 - src/partition_filter.c | 18 +++++++++--------- 3 files changed, 12 insertions(+), 17 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index f0714288..63808297 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -873,17 +873,13 @@ pathman_executor_hook(QueryDesc *queryDesc, ScanDirection direction, if (strcmp(subplanstate->methods->CustomName, UPDATE_NODE_DESCRIPTION) == 0) { - PartitionUpdateState *cstate = (PartitionUpdateState *) subplanstate; - - /* Save parent resultRelInfo in PartitionUpdate node */ - cstate->resultRelInfo = mt_state->resultRelInfo + i; + ResultRelInfo *rri = mt_state->resultRelInfo + i; /* * We unset junkfilter to disable junk cleaning in - * ExecModifyTable. We don't need junk cleaning because - * there is possible modification of tuple in `partition_filter_exec` + * ExecModifyTable. */ - cstate->resultRelInfo->ri_junkFilter = NULL; + rri->ri_junkFilter = NULL; /* hack, change UPDATE operation to INSERT */ mt_state->operation = CMD_INSERT; diff --git a/src/include/partition_update.h b/src/include/partition_update.h index 01405b26..30e5e329 100644 --- a/src/include/partition_update.h +++ b/src/include/partition_update.h @@ -29,7 +29,6 @@ typedef struct PartitionUpdateState CustomScanState css; Oid partitioned_table; - ResultRelInfo *resultRelInfo; JunkFilter *junkfilter; Plan *subplan; /* proxy variable to store subplan */ } PartitionUpdateState; diff --git a/src/partition_filter.c b/src/partition_filter.c index 884fbb12..d1ba303d 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -641,21 +641,21 @@ partition_filter_begin(CustomScanState *node, EState *estate, int eflags) /* Prepare state for expression execution */ if (state->command_type == CMD_UPDATE) { - int natts; - bool found_whole_row; - AttrNumber *attr_map; - MemoryContext old_mcxt; - /* * In UPDATE queries we would operate with child relation, but * expression expects varattnos like in base relation, so we map * parent varattnos to child varattnos */ - Index relno = ((Scan *) child_state->plan)->scanrelid; - Node *expr = PrelExpressionForRelid(prel, relno); - Oid child_relid = getrelid(relno, estate->es_range_table); - Relation child_rel = heap_open(child_relid, NoLock); + int natts; + bool found_whole_row; + + AttrNumber *attr_map; + MemoryContext old_mcxt; + Index relno = ((Scan *) child_state->plan)->scanrelid; + Node *expr = PrelExpressionForRelid(prel, relno); + Relation child_rel = heap_open( + getrelid(relno, estate->es_range_table), NoLock); attr_map = build_attributes_map(prel, child_rel, &natts); expr = map_variable_attnos(expr, relno, 0, attr_map, natts, From efa1b4867a80cf97940ed1995044c51644f699ac Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 31 Jul 2017 19:10:49 +0300 Subject: [PATCH 110/528] Start fixing update node for subpartitions (WIP) --- expected/pathman_subpartitions.out | 84 ++---------------------------- sql/pathman_subpartitions.sql | 65 ++++++----------------- 2 files changed, 20 insertions(+), 129 deletions(-) diff --git a/expected/pathman_subpartitions.out b/expected/pathman_subpartitions.out index bf31a580..659185bf 100644 --- a/expected/pathman_subpartitions.out +++ b/expected/pathman_subpartitions.out @@ -136,7 +136,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a >= 210 AND b >= 210; Filter: (a >= 210) (4 rows) -/* Multilevel partitioning with update triggers */ +/* Multilevel partitioning with updates */ CREATE OR REPLACE FUNCTION subpartitions.partitions_tree(rel REGCLASS) RETURNS SETOF REGCLASS AS $$ @@ -159,49 +159,6 @@ BEGIN END LOOP; END $$ LANGUAGE plpgsql; -CREATE OR REPLACE FUNCTION subpartitions.get_triggers(rel REGCLASS) -RETURNS SETOF TEXT AS -$$ -DECLARE - def TEXT; -BEGIN - FOR def IN (SELECT pg_get_triggerdef(oid) FROM pg_trigger WHERE tgrelid = rel) - LOOP - RETURN NEXT def; - END LOOP; - - RETURN; -END; -$$ LANGUAGE plpgsql; -SELECT create_update_triggers('subpartitions.abc_1'); /* Cannot perform on partition */ -ERROR: Parent table must have an update trigger -SELECT create_update_triggers('subpartitions.abc'); /* Can perform on parent */ - create_update_triggers ------------------------- - -(1 row) - -SELECT p, subpartitions.get_triggers(p) -FROM subpartitions.partitions_tree('subpartitions.abc') as p -ORDER BY p; - p | get_triggers ------------------------+------------------------------------------------------------------------------------------------------------------------------------------- - subpartitions.abc | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - subpartitions.abc_1 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - subpartitions.abc_1 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - subpartitions.abc_2 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - subpartitions.abc_2 | CREATE TRIGGER abc_2_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - subpartitions.abc_1_0 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_1_0 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - subpartitions.abc_1_1 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_1_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - subpartitions.abc_1_2 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_1_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - subpartitions.abc_2_0 | CREATE TRIGGER abc_2_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_2_0 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - subpartitions.abc_2_1 | CREATE TRIGGER abc_2_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_2_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - subpartitions.abc_3 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_3 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - subpartitions.abc_3 | CREATE TRIGGER abc_3_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_3 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - subpartitions.abc_3_1 | CREATE TRIGGER abc_3_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_3_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - subpartitions.abc_3_2 | CREATE TRIGGER abc_3_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_3_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() -(14 rows) - SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_4'); append_range_partition ------------------------ @@ -214,35 +171,10 @@ SELECT create_hash_partitions('subpartitions.abc_4', 'b', 2); 2 (1 row) -SELECT p, subpartitions.get_triggers(p) -FROM subpartitions.partitions_tree('subpartitions.abc_4') as p -ORDER BY p; - p | get_triggers ------------------------+------------------------------------------------------------------------------------------------------------------------------------------- - subpartitions.abc_4 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_4 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - subpartitions.abc_4 | CREATE TRIGGER abc_4_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_4 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - subpartitions.abc_4_0 | CREATE TRIGGER abc_4_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_4_0 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - subpartitions.abc_4_1 | CREATE TRIGGER abc_4_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_4_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() -(4 rows) - -SELECT drop_triggers('subpartitions.abc_1'); /* Cannot perform on partition */ -ERROR: Parent table must not have an update trigger -SELECT drop_triggers('subpartitions.abc'); /* Can perform on parent */ - drop_triggers ---------------- - -(1 row) - -SELECT p, subpartitions.get_triggers(p) -FROM subpartitions.partitions_tree('subpartitions.abc') as p -ORDER BY p; - p | get_triggers ----+-------------- -(0 rows) - DROP TABLE subpartitions.abc CASCADE; NOTICE: drop cascades to 15 other objects -/* Test that update trigger works correctly */ +/* Test that update works correctly */ +SET pg_pathman.enable_partitionupdate=on; CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); create_range_partitions @@ -262,12 +194,6 @@ SELECT create_range_partitions('subpartitions.abc_2', 'b', 0, 50, 2); 2 (1 row) -SELECT create_update_triggers('subpartitions.abc'); - create_update_triggers ------------------------- - -(1 row) - INSERT INTO subpartitions.abc VALUES (25, 25); SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitions.abc_1_1 */ tableoid | a | b @@ -297,7 +223,7 @@ SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should create subpartiti (1 row) DROP TABLE subpartitions.abc CASCADE; -NOTICE: drop cascades to 10 other objects +NOTICE: drop cascades to 9 other objects DROP SCHEMA subpartitions CASCADE; -NOTICE: drop cascades to 2 other objects +NOTICE: drop cascades to function subpartitions.partitions_tree(regclass) DROP EXTENSION pg_pathman; diff --git a/sql/pathman_subpartitions.sql b/sql/pathman_subpartitions.sql index b1a79874..06cd5603 100644 --- a/sql/pathman_subpartitions.sql +++ b/sql/pathman_subpartitions.sql @@ -3,8 +3,6 @@ CREATE EXTENSION pg_pathman; CREATE SCHEMA subpartitions; - - /* Create two level partitioning structure */ CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); INSERT INTO subpartitions.abc SELECT i, i FROM generate_series(1, 200, 20) as i; @@ -28,75 +26,45 @@ EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE b = 215; EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a = 215 AND b = 215; EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a >= 210 AND b >= 210; - - -/* Multilevel partitioning with update triggers */ -CREATE OR REPLACE FUNCTION subpartitions.partitions_tree(rel REGCLASS) -RETURNS SETOF REGCLASS AS +/* Multilevel partitioning with updates */ +CREATE OR REPLACE FUNCTION subpartitions.partitions_tree( + rel REGCLASS, + level TEXT DEFAULT ' ' +) +RETURNS SETOF TEXT AS $$ DECLARE - partition REGCLASS; - subpartition REGCLASS; + partition REGCLASS; + subpartition TEXT; BEGIN IF rel IS NULL THEN RETURN; END IF; - RETURN NEXT rel; + RETURN NEXT rel::TEXT; FOR partition IN (SELECT l.partition FROM pathman_partition_list l WHERE parent = rel) LOOP - FOR subpartition IN (SELECT subpartitions.partitions_tree(partition)) + FOR subpartition IN (SELECT subpartitions.partitions_tree(partition, level || ' ')) LOOP - RETURN NEXT subpartition; + RETURN NEXT level || subpartition::TEXT; END LOOP; END LOOP; END $$ LANGUAGE plpgsql; -CREATE OR REPLACE FUNCTION subpartitions.get_triggers(rel REGCLASS) -RETURNS SETOF TEXT AS -$$ -DECLARE - def TEXT; -BEGIN - FOR def IN (SELECT pg_get_triggerdef(oid) FROM pg_trigger WHERE tgrelid = rel) - LOOP - RETURN NEXT def; - END LOOP; - - RETURN; -END; -$$ LANGUAGE plpgsql; - -SELECT create_update_triggers('subpartitions.abc_1'); /* Cannot perform on partition */ -SELECT create_update_triggers('subpartitions.abc'); /* Can perform on parent */ -SELECT p, subpartitions.get_triggers(p) -FROM subpartitions.partitions_tree('subpartitions.abc') as p -ORDER BY p; - SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_4'); SELECT create_hash_partitions('subpartitions.abc_4', 'b', 2); -SELECT p, subpartitions.get_triggers(p) -FROM subpartitions.partitions_tree('subpartitions.abc_4') as p -ORDER BY p; - -SELECT drop_triggers('subpartitions.abc_1'); /* Cannot perform on partition */ -SELECT drop_triggers('subpartitions.abc'); /* Can perform on parent */ -SELECT p, subpartitions.get_triggers(p) -FROM subpartitions.partitions_tree('subpartitions.abc') as p -ORDER BY p; - +SELECT subpartitions.partitions_tree('subpartitions.abc'); DROP TABLE subpartitions.abc CASCADE; - - -/* Test that update trigger works correctly */ +\q +/* Test that update works correctly */ +SET pg_pathman.enable_partitionupdate=on; CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); SELECT create_range_partitions('subpartitions.abc_1', 'b', 0, 50, 2); SELECT create_range_partitions('subpartitions.abc_2', 'b', 0, 50, 2); -SELECT create_update_triggers('subpartitions.abc'); INSERT INTO subpartitions.abc VALUES (25, 25); SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitions.abc_1_1 */ @@ -111,8 +79,5 @@ UPDATE subpartitions.abc SET b = 125 WHERE a = 125 and b = 75; SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should create subpartitions.abc_2_3 */ DROP TABLE subpartitions.abc CASCADE; - - - DROP SCHEMA subpartitions CASCADE; DROP EXTENSION pg_pathman; From 6a346b7daac0a262e70c778828a0f68b3123807c Mon Sep 17 00:00:00 2001 From: Ildus K Date: Tue, 1 Aug 2017 10:58:25 +0300 Subject: [PATCH 111/528] Fix update on subpartitions --- expected/pathman_subpartitions.out | 40 +++++++++++++++++++++++------- sql/pathman_subpartitions.sql | 1 - src/planner_tree_modification.c | 22 ++++++---------- 3 files changed, 38 insertions(+), 25 deletions(-) diff --git a/expected/pathman_subpartitions.out b/expected/pathman_subpartitions.out index 659185bf..af35011e 100644 --- a/expected/pathman_subpartitions.out +++ b/expected/pathman_subpartitions.out @@ -137,24 +137,27 @@ EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a >= 210 AND b >= 210; (4 rows) /* Multilevel partitioning with updates */ -CREATE OR REPLACE FUNCTION subpartitions.partitions_tree(rel REGCLASS) -RETURNS SETOF REGCLASS AS +CREATE OR REPLACE FUNCTION subpartitions.partitions_tree( + rel REGCLASS, + level TEXT DEFAULT ' ' +) +RETURNS SETOF TEXT AS $$ DECLARE - partition REGCLASS; - subpartition REGCLASS; + partition REGCLASS; + subpartition TEXT; BEGIN IF rel IS NULL THEN RETURN; END IF; - RETURN NEXT rel; + RETURN NEXT rel::TEXT; FOR partition IN (SELECT l.partition FROM pathman_partition_list l WHERE parent = rel) LOOP - FOR subpartition IN (SELECT subpartitions.partitions_tree(partition)) + FOR subpartition IN (SELECT subpartitions.partitions_tree(partition, level || ' ')) LOOP - RETURN NEXT subpartition; + RETURN NEXT level || subpartition::TEXT; END LOOP; END LOOP; END @@ -171,6 +174,25 @@ SELECT create_hash_partitions('subpartitions.abc_4', 'b', 2); 2 (1 row) +SELECT subpartitions.partitions_tree('subpartitions.abc'); + partitions_tree +-------------------------- + subpartitions.abc + subpartitions.abc_1 + subpartitions.abc_1_0 + subpartitions.abc_1_1 + subpartitions.abc_1_2 + subpartitions.abc_2 + subpartitions.abc_2_0 + subpartitions.abc_2_1 + subpartitions.abc_3 + subpartitions.abc_3_1 + subpartitions.abc_3_2 + subpartitions.abc_4 + subpartitions.abc_4_0 + subpartitions.abc_4_1 +(14 rows) + DROP TABLE subpartitions.abc CASCADE; NOTICE: drop cascades to 15 other objects /* Test that update works correctly */ @@ -223,7 +245,7 @@ SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should create subpartiti (1 row) DROP TABLE subpartitions.abc CASCADE; -NOTICE: drop cascades to 9 other objects +NOTICE: drop cascades to 10 other objects DROP SCHEMA subpartitions CASCADE; -NOTICE: drop cascades to function subpartitions.partitions_tree(regclass) +NOTICE: drop cascades to function subpartitions.partitions_tree(regclass,text) DROP EXTENSION pg_pathman; diff --git a/sql/pathman_subpartitions.sql b/sql/pathman_subpartitions.sql index 06cd5603..23217872 100644 --- a/sql/pathman_subpartitions.sql +++ b/sql/pathman_subpartitions.sql @@ -58,7 +58,6 @@ SELECT create_hash_partitions('subpartitions.abc_4', 'b', 2); SELECT subpartitions.partitions_tree('subpartitions.abc'); DROP TABLE subpartitions.abc CASCADE; -\q /* Test that update works correctly */ SET pg_pathman.enable_partitionupdate=on; CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 8d44ded2..dc72bcb2 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -539,25 +539,17 @@ partition_update_visitor(Plan *plan, void *context) lc3 = list_head(modify_table->returningLists); forboth (lc1, modify_table->plans, lc2, modify_table->resultRelations) { - Oid parent_relid; Index rindex = lfirst_int(lc2); - Oid relid = getrelid(rindex, rtable); - const PartRelationInfo *prel = get_pathman_relation_info(relid); + Oid tmp_relid, + relid = getrelid(rindex, rtable); + const PartRelationInfo *prel; - /* query can be changed earlier to point on child partition, - * so we're possibly now looking at query that updates child partition - */ - if (prel == NULL) - { - parent_relid = get_parent_of_partition(relid, NULL); - if (parent_relid) - { - prel = get_pathman_relation_info(parent_relid); - relid = parent_relid; - } - } + while ((tmp_relid = get_parent_of_partition(relid, NULL)) != 0) + relid = tmp_relid; /* Check that table is partitioned */ + prel = get_pathman_relation_info(relid); + if (prel) { List *returning_list = NIL; From c58238b7e3c913f0c3e81bb94a4930a6c2a7ecd0 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Tue, 1 Aug 2017 12:12:11 +0300 Subject: [PATCH 112/528] Change name of update node --- expected/pathman_update_node.out | 4 ++-- src/include/partition_update.h | 2 +- tests/python/partitioning_test.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/expected/pathman_update_node.out b/expected/pathman_update_node.out index a6214a52..254b301e 100644 --- a/expected/pathman_update_node.out +++ b/expected/pathman_update_node.out @@ -18,7 +18,7 @@ EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 5 WHERE val = 1 QUERY PLAN ------------------------------------------------------------------- Update on test_range_2 - -> Custom Scan (PrepareInsert) + -> Custom Scan (PartitionRoute) -> Custom Scan (PartitionFilter) -> Bitmap Heap Scan on test_range_2 Recheck Cond: (val = '15'::numeric) @@ -31,7 +31,7 @@ EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 14 WHERE val = QUERY PLAN ------------------------------------------------------------------- Update on test_range_2 - -> Custom Scan (PrepareInsert) + -> Custom Scan (PartitionRoute) -> Custom Scan (PartitionFilter) -> Bitmap Heap Scan on test_range_2 Recheck Cond: (val = '15'::numeric) diff --git a/src/include/partition_update.h b/src/include/partition_update.h index 30e5e329..c2bd6926 100644 --- a/src/include/partition_update.h +++ b/src/include/partition_update.h @@ -22,7 +22,7 @@ #include "nodes/extensible.h" #endif -#define UPDATE_NODE_DESCRIPTION ("PrepareInsert") +#define UPDATE_NODE_DESCRIPTION ("PartitionRoute") typedef struct PartitionUpdateState { diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 6ec2f5cb..65892fbd 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -1309,7 +1309,7 @@ def test_update_node_plan1(self): ], "Node Type": "Custom Scan", "Parent Relationship": "Member", - "Custom Plan Provider": "PrepareInsert" + "Custom Plan Provider": "PartitionRoute" } ''' for i, f in enumerate([''] + list(map(str, range(1, 10)))): From eb8b19ff14e1e9072d0f17a0c76cbcd911f7abb5 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 7 Aug 2017 12:59:35 +0300 Subject: [PATCH 113/528] replace get_pathman_lib_version() with pathman_version(), new docs --- README.md | 7 +++ expected/pathman_calamity.out | 8 ++-- init.sql | 4 +- sql/pathman_calamity.sql | 2 +- src/include/init.h | 8 ++-- src/init.c | 84 ++++++++++++++++++++++++++--------- src/pl_funcs.c | 6 +-- 7 files changed, 85 insertions(+), 34 deletions(-) diff --git a/README.md b/README.md index 3b37273f..c075c2a8 100644 --- a/README.md +++ b/README.md @@ -102,6 +102,13 @@ SET pg_pathman.enable = t; ## Available functions +### Module's version + +```plpgsql +pathman_version() +``` +Although it's possible to get major and minor version numbers using `\dx pg_pathman`, it doesn't show the actual [patch number](http://semver.org/). This function returns a complete version number of the loaded pg_pathman module in `MAJOR.MINOR.PATCH` format. + ### Partition creation ```plpgsql create_hash_partitions(relation REGCLASS, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 251ec31c..aceab6e8 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -9,10 +9,10 @@ SELECT debug_capture(); (1 row) -SELECT get_pathman_lib_version(); - get_pathman_lib_version -------------------------- - 10500 +SELECT pathman_version(); + pathman_version +----------------- + 1.5.0 (1 row) set client_min_messages = NOTICE; diff --git a/init.sql b/init.sql index 181a81a7..12abdf78 100644 --- a/init.sql +++ b/init.sql @@ -960,6 +960,6 @@ CREATE OR REPLACE FUNCTION @extschema@.debug_capture() RETURNS VOID AS 'pg_pathman', 'debug_capture' LANGUAGE C STRICT; -CREATE OR REPLACE FUNCTION @extschema@.get_pathman_lib_version() -RETURNS CSTRING AS 'pg_pathman', 'get_pathman_lib_version' +CREATE OR REPLACE FUNCTION @extschema@.pathman_version() +RETURNS CSTRING AS 'pg_pathman', 'pathman_version' LANGUAGE C STRICT; diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index 881cebbd..79e8c79c 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -7,7 +7,7 @@ CREATE SCHEMA calamity; /* call for coverage test */ set client_min_messages = ERROR; SELECT debug_capture(); -SELECT get_pathman_lib_version(); +SELECT pathman_version(); set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index 3f1790ce..928052a4 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -153,11 +153,11 @@ simpify_mcxt_name(MemoryContext mcxt) #define DEFAULT_PATHMAN_OVERRIDE_COPY true -/* Lowest version of Pl/PgSQL frontend compatible with internals (0xAA_BB_CC) */ -#define LOWEST_COMPATIBLE_FRONT 0x010500 +/* Lowest version of Pl/PgSQL frontend compatible with internals */ +#define LOWEST_COMPATIBLE_FRONT "1.5.0" -/* Current version of native C library (0xAA_BB_CC) */ -#define CURRENT_LIB_VERSION 0x010500 +/* Current version of native C library */ +#define CURRENT_LIB_VERSION "1.5.0" void *pathman_cache_search_relid(HTAB *cache_table, diff --git a/src/init.c b/src/init.c index e1a1b5bf..7b0cdda0 100644 --- a/src/init.c +++ b/src/init.c @@ -38,6 +38,8 @@ #include "utils/syscache.h" #include "utils/typcache.h" +#include + /* Various memory contexts for caches */ MemoryContext TopPathmanContext = NULL; @@ -92,9 +94,10 @@ static bool read_opexpr_const(const OpExpr *opexpr, /* Validate SQL facade */ -static uint32 build_sql_facade_version(char *version_cstr); -static uint32 get_sql_facade_version(void); -static void validate_sql_facade_version(uint32 ver); +static uint32 build_semver_uint32(char *version_cstr); +static uint32 get_plpgsql_frontend_version(void); +static void validate_plpgsql_frontend_version(uint32 current_ver, + uint32 compatible_ver); /* @@ -206,7 +209,8 @@ load_config(void) return false; /* remain 'uninitialized', exit before creating main caches */ /* Validate pg_pathman's Pl/PgSQL facade (might be outdated) */ - validate_sql_facade_version(get_sql_facade_version()); + validate_plpgsql_frontend_version(get_plpgsql_frontend_version(), + build_semver_uint32(LOWEST_COMPATIBLE_FRONT)); /* Create various hash tables (caches) */ init_local_cache(); @@ -1196,27 +1200,66 @@ validate_hash_constraint(const Expr *expr, /* Parse cstring and build uint32 representing the version */ static uint32 -build_sql_facade_version(char *version_cstr) +build_semver_uint32(char *version_cstr) { - uint32 version; + uint32 version = 0; + bool expect_num_token = false; + long max_dots = 2; + char *pos = version_cstr; + + while (*pos) + { + /* Invert expected token type */ + expect_num_token = !expect_num_token; + + if (expect_num_token) + { + char *end_pos; + long num; + long i; + + /* Parse number */ + num = strtol(pos, &end_pos, 10); - /* expect to see x+.y+.z+ */ - version = strtol(version_cstr, &version_cstr, 10) & 0xFF; + if (pos == end_pos || num > 99 || num < 0) + goto version_error; - version <<= 8; - if (strlen(version_cstr) > 1) - version |= (strtol(version_cstr + 1, &version_cstr, 10) & 0xFF); + for (i = 0; i < max_dots; i++) + num *= 100; - version <<= 8; - if (strlen(version_cstr) > 1) - version |= (strtol(version_cstr + 1, &version_cstr, 10) & 0xFF); + version += num; + + /* Move position */ + pos = end_pos; + } + else + { + /* Expect to see less dots */ + max_dots--; + + if (*pos != '.' || max_dots < 0) + goto version_error; + + /* Move position */ + pos++; + } + } + + if (!expect_num_token) + goto version_error; return version; + +version_error: + DisablePathman(); /* disable pg_pathman since config is broken */ + ereport(ERROR, (errmsg("wrong version: \"%s\"", version_cstr), + errhint(INIT_ERROR_HINT))); + return 0; /* keep compiler happy */ } /* Get version of pg_pathman's facade written in Pl/PgSQL */ static uint32 -get_sql_facade_version(void) +get_plpgsql_frontend_version(void) { Relation pg_extension_rel; ScanKeyData skey; @@ -1255,20 +1298,21 @@ get_sql_facade_version(void) systable_endscan(scan); heap_close(pg_extension_rel, AccessShareLock); - return build_sql_facade_version(version_cstr); + return build_semver_uint32(version_cstr); } /* Check that current Pl/PgSQL facade is compatible with internals */ static void -validate_sql_facade_version(uint32 ver) +validate_plpgsql_frontend_version(uint32 current_ver, uint32 compatible_ver) { - Assert(ver > 0); + Assert(current_ver > 0); + Assert(compatible_ver > 0); /* Compare ver to 'lowest compatible frontend' version */ - if (ver < LOWEST_COMPATIBLE_FRONT) + if (current_ver < compatible_ver) { elog(DEBUG1, "current version: %x, lowest compatible: %x", - ver, LOWEST_COMPATIBLE_FRONT); + current_ver, compatible_ver); DisablePathman(); /* disable pg_pathman since config is broken */ ereport(ERROR, diff --git a/src/pl_funcs.c b/src/pl_funcs.c index bb66506d..f1cf0000 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -77,7 +77,7 @@ PG_FUNCTION_INFO_V1( create_single_update_trigger ); PG_FUNCTION_INFO_V1( has_update_trigger ); PG_FUNCTION_INFO_V1( debug_capture ); -PG_FUNCTION_INFO_V1( get_pathman_lib_version ); +PG_FUNCTION_INFO_V1( pathman_version ); /* User context for function show_partition_list_internal() */ @@ -1594,7 +1594,7 @@ debug_capture(PG_FUNCTION_ARGS) /* NOTE: just in case */ Datum -get_pathman_lib_version(PG_FUNCTION_ARGS) +pathman_version(PG_FUNCTION_ARGS) { - PG_RETURN_CSTRING(psprintf("%x", CURRENT_LIB_VERSION)); + PG_RETURN_CSTRING(CURRENT_LIB_VERSION); } From e8c3674b11cdf7058dc3b01e2b3d4d5c7d7681d8 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 10 Aug 2017 18:18:40 +0300 Subject: [PATCH 114/528] Add set_trace function in python tests --- tests/python/partitioning_test.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 65892fbd..8c3a5828 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -89,6 +89,11 @@ def setUp(self): def tearDown(self): stop_all() + def set_trace(self, con, command="pg_debug"): + pid = con.execute("select pg_backend_pid()")[0][0] + p = subprocess.Popen([command], stdin=subprocess.PIPE) + p.communicate(str(pid).encode()) + def start_new_pathman_cluster(self, name='test', allows_streaming=False): node = get_new_node(name) node.init(allows_streaming=allows_streaming) From 962efa2f48ffa2b66f09802cb66950a0853a8b8a Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 10 Aug 2017 18:25:34 +0300 Subject: [PATCH 115/528] remove duplicate debug printing functions --- src/debug_print.c | 148 ------------------------------------------ tests/cmocka/Makefile | 1 - 2 files changed, 149 deletions(-) diff --git a/src/debug_print.c b/src/debug_print.c index 1a4ea417..bac1d622 100644 --- a/src/debug_print.c +++ b/src/debug_print.c @@ -104,151 +104,3 @@ irange_print(IndexRange irange) return str.data; } - -#ifndef CMOCKA_TESTS -/* - * Print attribute information - */ -static char * -printatt(unsigned attributeId, - Form_pg_attribute attributeP, - char *value) -{ - return psprintf("\t%2d: %s%s%s%s\t(typeid = %u, len = %d, typmod = %d, byval = %c)\n", - attributeId, - NameStr(attributeP->attname), - value != NULL ? " = \"" : "", - value != NULL ? value : "", - value != NULL ? "\"" : "", - (unsigned int) (attributeP->atttypid), - attributeP->attlen, - attributeP->atttypmod, - attributeP->attbyval ? 't' : 'f'); -} - -/* - * Print one tuple for an interactive backend - */ -static char * -debugtup(TupleTableSlot *slot) -{ - TupleDesc typeinfo = slot->tts_tupleDescriptor; - int natts = typeinfo->natts; - int i; - Datum attr; - char *value; - bool isnull; - Oid typoutput; - bool typisvarlena; - - int result_len = 0; - char *result = (char *) palloc(result_len + 1); - - for (i = 0; i < natts; ++i) - { - char *s; - int len; - - attr = slot_getattr(slot, i + 1, &isnull); - if (isnull) - continue; - getTypeOutputInfo(typeinfo->attrs[i]->atttypid, - &typoutput, &typisvarlena); - - value = OidOutputFunctionCall(typoutput, attr); - - s = printatt((unsigned) i + 1, typeinfo->attrs[i], value); - len = strlen(s); - result = (char *) repalloc(result, result_len + len + 1); - strncpy(result + result_len, s, len); - result_len += len; - } - - result[result_len] = '\0'; - return result; -} - -/* - * Print contents of tuple slot - */ -#ifdef __GNUC__ -__attribute__((unused)) -#endif -static char * -slot_print(TupleTableSlot *slot) -{ - if (TupIsNull(slot)) - return NULL; - - if (!slot->tts_tupleDescriptor) - return NULL; - - return debugtup(slot); -} - -/* - * Print contents of range table - */ -#ifdef __GNUC__ -__attribute__((unused)) -#endif -static char * -rt_print(const List *rtable) -{ -#define APPEND_STR(si, ...) \ -{ \ - char *line = psprintf(__VA_ARGS__); \ - appendStringInfo(&si, "%s", line); \ - pfree(line); \ -} - - const ListCell *l; - int i = 1; - - StringInfoData str; - - initStringInfo(&str); - APPEND_STR(str, "resno\trefname \trelid\tinFromCl\n"); - APPEND_STR(str, "-----\t---------\t-----\t--------\n"); - - foreach(l, rtable) - { - RangeTblEntry *rte = lfirst(l); - - switch (rte->rtekind) - { - case RTE_RELATION: - APPEND_STR(str, "%d\t%s\t%u\t%c", - i, rte->eref->aliasname, rte->relid, rte->relkind); - break; - case RTE_SUBQUERY: - APPEND_STR(str, "%d\t%s\t[subquery]", - i, rte->eref->aliasname); - break; - case RTE_JOIN: - APPEND_STR(str, "%d\t%s\t[join]", - i, rte->eref->aliasname); - break; - case RTE_FUNCTION: - APPEND_STR(str, "%d\t%s\t[rangefunction]", i, rte->eref->aliasname); - break; - case RTE_VALUES: - APPEND_STR(str, "%d\t%s\t[values list]", i, rte->eref->aliasname); - break; - case RTE_CTE: - APPEND_STR(str, "%d\t%s\t[cte]", i, rte->eref->aliasname); - break; - default: - elog(ERROR, "%d\t%s\t[unknown rtekind]", - i, rte->eref->aliasname); - } - - APPEND_STR(str, "\t%s\t%s\n", (rte->inh ? "inh" : ""), - (rte->inFromCl ? "inFromCl" : "")); - - i++; - } - return str.data; -#undef APPEND_STR -} -#endif diff --git a/tests/cmocka/Makefile b/tests/cmocka/Makefile index f79e2637..e31e6d95 100644 --- a/tests/cmocka/Makefile +++ b/tests/cmocka/Makefile @@ -8,7 +8,6 @@ CFLAGS += $(shell $(PG_CONFIG) --cflags_sl) CFLAGS += $(shell $(PG_CONFIG) --cflags) CFLAGS += $(CFLAGS_SL) CFLAGS += $(PG_CPPFLAGS) -CFLAGS += -DCMOCKA_TESTS LDFLAGS += -lcmocka TEST_BIN = rangeset_tests From 8bc0be32109377f2a75e590cab8378024d0b543d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 10 Aug 2017 18:52:06 +0300 Subject: [PATCH 116/528] remove useless variable 'v_upper_parent' --- hash.sql | 3 --- range.sql | 2 -- 2 files changed, 5 deletions(-) diff --git a/hash.sql b/hash.sql index 5159e189..8cf9b19a 100644 --- a/hash.sql +++ b/hash.sql @@ -19,9 +19,6 @@ CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions( partition_names TEXT[] DEFAULT NULL, tablespaces TEXT[] DEFAULT NULL) RETURNS INTEGER AS $$ -DECLARE - v_upper_parent REGCLASS; - BEGIN PERFORM @extschema@.prepare_for_partitioning(parent_relid, expression, diff --git a/range.sql b/range.sql index 0fd287e7..67cf3d7a 100644 --- a/range.sql +++ b/range.sql @@ -65,7 +65,6 @@ DECLARE end_value start_value%TYPE; part_count INTEGER := 0; i INTEGER; - v_upper_parent REGCLASS; BEGIN PERFORM @extschema@.prepare_for_partitioning(parent_relid, @@ -165,7 +164,6 @@ DECLARE end_value start_value%TYPE; part_count INTEGER := 0; i INTEGER; - v_upper_parent REGCLASS; BEGIN PERFORM @extschema@.prepare_for_partitioning(parent_relid, From bff27f1b2048fb96e48d7f1a40e02dd72430f61b Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 11 Aug 2017 13:21:08 +0300 Subject: [PATCH 117/528] fix regression tests --- expected/pathman_basic.out | 4 ++-- expected/pathman_expressions.out | 16 ++++++++-------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 0a46ba3e..06551aa1 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -18,7 +18,7 @@ PL/pgSQL function pathman.prepare_for_partitioning(regclass,text,boolean) line 9 SQL statement "SELECT pathman.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function pathman.create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 6 at PERFORM +PL/pgSQL function pathman.create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM \set VERBOSITY terse ALTER TABLE test.hash_rel ALTER COLUMN value SET NOT NULL; SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3, partition_data:=false); @@ -147,7 +147,7 @@ PL/pgSQL function pathman.prepare_for_partitioning(regclass,text,boolean) line 9 SQL statement "SELECT pathman.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function pathman.create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 13 at PERFORM +PL/pgSQL function pathman.create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 12 at PERFORM \set VERBOSITY terse ALTER TABLE test.range_rel ALTER COLUMN dt SET NOT NULL; SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL, 2); diff --git a/expected/pathman_expressions.out b/expected/pathman_expressions.out index 463ad584..c0f4b0e9 100644 --- a/expected/pathman_expressions.out +++ b/expected/pathman_expressions.out @@ -234,7 +234,7 @@ PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERF SQL statement "SELECT public.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 6 at PERFORM +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM /* Try using system attributes */ SELECT create_hash_partitions('test_exprs.hash_rel', 'xmin', 4); ERROR: failed to analyze partitioning expression "xmin" @@ -244,7 +244,7 @@ PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERF SQL statement "SELECT public.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 6 at PERFORM +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM /* Try using subqueries */ SELECT create_hash_partitions('test_exprs.hash_rel', 'value, (select oid from pg_class limit 1)', @@ -256,7 +256,7 @@ PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERF SQL statement "SELECT public.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 6 at PERFORM +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM /* Try using mutable expression */ SELECT create_hash_partitions('test_exprs.hash_rel', 'random()', 4); ERROR: failed to analyze partitioning expression "random()" @@ -266,7 +266,7 @@ PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERF SQL statement "SELECT public.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 6 at PERFORM +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM /* Try using broken parentheses */ SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2))', 4); ERROR: failed to parse partitioning expression "value * value2))" @@ -276,7 +276,7 @@ CONTEXT: PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line SQL statement "SELECT public.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 6 at PERFORM +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM /* Try using missing columns */ SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value3', 4); ERROR: failed to analyze partitioning expression "value * value3" @@ -287,7 +287,7 @@ CONTEXT: PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line SQL statement "SELECT public.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 6 at PERFORM +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM /* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy SELECT * FROM test_exprs.canary WHERE val = 1; @@ -371,7 +371,7 @@ PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERF SQL statement "SELECT public.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 13 at PERFORM +PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 12 at PERFORM /* Try using mutable expression */ SELECT create_range_partitions('test_exprs.range_rel', 'RANDOM()', '15 years'::INTERVAL, '1 year'::INTERVAL, 10); @@ -382,7 +382,7 @@ PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERF SQL statement "SELECT public.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 13 at PERFORM +PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 12 at PERFORM /* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy SELECT * FROM test_exprs.canary WHERE val = 1; From 5d955368a30470fb2fa8ce9de479e0e94fd92198 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 11 Aug 2017 17:26:34 +0300 Subject: [PATCH 118/528] code cleanup --- src/include/partition_filter.h | 2 +- src/include/relation_info.h | 5 ++-- src/partition_filter.c | 44 +++++++++++++++++++--------------- src/relation_info.c | 11 +++------ 4 files changed, 31 insertions(+), 31 deletions(-) diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index ddb5b72f..2a4ca382 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -74,7 +74,7 @@ struct ResultPartsStorage EState *estate; /* pointer to executor's state */ - CmdType command_type; /* currenly we only allow INSERT */ + CmdType command_type; /* INSERT | UPDATE */ LOCKMODE head_open_lock_mode; LOCKMODE heap_close_lock_mode; }; diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 2a22bf91..9921a029 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -393,9 +393,8 @@ extern bool pg_pathman_enable_bounds_cache; void init_relation_info_static_data(void); -AttrNumber * build_attributes_map(const PartRelationInfo *prel, - Relation child_rel, - int *map_length); +AttrNumber *build_attributes_map(const PartRelationInfo *prel, + TupleDesc child_tupdesc); #endif /* RELATION_INFO_H */ diff --git a/src/partition_filter.c b/src/partition_filter.c index 503bffe5..17490bb6 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -169,6 +169,7 @@ init_result_parts_storage(ResultPartsStorage *parts_storage, parts_storage->on_new_rri_holder_callback = on_new_rri_holder_cb; parts_storage->callback_arg = on_new_rri_holder_cb_arg; + Assert(cmd_type == CMD_INSERT || cmd_type == CMD_UPDATE); parts_storage->command_type = cmd_type; parts_storage->speculative_inserts = speculative_inserts; @@ -582,15 +583,16 @@ partition_filter_create_scan_state(CustomScan *node) state = (PartitionFilterState *) palloc0(sizeof(PartitionFilterState)); NodeSetTag(state, T_CustomScanState); - state->css.flags = node->flags; - state->css.methods = &partition_filter_exec_methods; + /* Initialize base CustomScanState */ + state->css.flags = node->flags; + state->css.methods = &partition_filter_exec_methods; /* Extract necessary variables */ - state->subplan = (Plan *) linitial(node->custom_plans); - state->partitioned_table = intVal(linitial(node->custom_private)); - state->on_conflict_action = intVal(lsecond(node->custom_private)); - state->returning_list = lthird(node->custom_private); - state->command_type = (CmdType) intVal(lfourth(node->custom_private)); + state->subplan = (Plan *) linitial(node->custom_plans); + state->partitioned_table = (Oid) intVal(linitial(node->custom_private)); + state->on_conflict_action = intVal(lsecond(node->custom_private)); + state->returning_list = (List *) lthird(node->custom_private); + state->command_type = (CmdType) intVal(lfourth(node->custom_private)); /* Check boundaries */ Assert(state->on_conflict_action >= ONCONFLICT_NONE || @@ -619,7 +621,6 @@ partition_filter_begin(CustomScanState *node, EState *estate, int eflags) { /* Fetch PartRelationInfo for this partitioned relation */ prel = get_pathman_relation_info(state->partitioned_table); - Assert(prel != NULL); /* Prepare state for expression execution */ if (state->command_type == CMD_UPDATE) @@ -630,20 +631,25 @@ partition_filter_begin(CustomScanState *node, EState *estate, int eflags) * parent varattnos to child varattnos */ - int natts; bool found_whole_row; - AttrNumber *attr_map; + AttrNumber *map; MemoryContext old_mcxt; Index relno = ((Scan *) child_state->plan)->scanrelid; - Node *expr = PrelExpressionForRelid(prel, relno); - Relation child_rel = heap_open( - getrelid(relno, estate->es_range_table), NoLock); - - attr_map = build_attributes_map(prel, child_rel, &natts); - expr = map_variable_attnos(expr, relno, 0, attr_map, natts, - &found_whole_row); - Assert(!found_whole_row); + Node *expr; + Relation child_rel; + + child_rel = heap_open(getrelid(relno, estate->es_range_table), NoLock); + + map = build_attributes_map(prel, RelationGetDescr(child_rel)); + expr = map_variable_attnos(PrelExpressionForRelid(prel, relno), + relno, 0, map, + RelationGetDescr(child_rel)->natts, + &found_whole_row); + + if (found_whole_row) + elog(ERROR, "unexpected whole-row reference found in partition key"); + heap_close(child_rel, NoLock); /* Prepare state for expression execution */ @@ -653,7 +659,7 @@ partition_filter_begin(CustomScanState *node, EState *estate, int eflags) } else { - /* simple INSERT, expression based on parent attribute numbers */ + /* Simple INSERT, expression based on parent attribute numbers */ state->expr_state = prepare_expr_state(prel, estate); } } diff --git a/src/relation_info.c b/src/relation_info.c index ffcff1c2..eb8b0980 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -1447,18 +1447,13 @@ shout_if_prel_is_invalid(const Oid parent_oid, * And it should be faster if expression uses not all fields from relation. */ AttrNumber * -build_attributes_map(const PartRelationInfo *prel, Relation child_rel, - int *map_length) +build_attributes_map(const PartRelationInfo *prel, TupleDesc child_tupdesc) { AttrNumber i = -1; Oid parent_relid = PrelParentRelid(prel); - TupleDesc child_descr = RelationGetDescr(child_rel); - int natts = child_descr->natts; + int natts = child_tupdesc->natts; AttrNumber *result = (AttrNumber *) palloc0(natts * sizeof(AttrNumber)); - if (map_length != NULL) - *map_length = natts; - while ((i = bms_next_member(prel->expr_atts, i)) >= 0) { int j; @@ -1467,7 +1462,7 @@ build_attributes_map(const PartRelationInfo *prel, Relation child_rel, for (j = 0; j < natts; j++) { - Form_pg_attribute att = child_descr->attrs[j]; + Form_pg_attribute att = child_tupdesc->attrs[j]; if (att->attisdropped) continue; /* attrMap[attnum - 1] is already 0 */ From 7df9159d851c494fe0609cf0d431d0d07522cf1f Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 21 Aug 2017 15:00:23 +0300 Subject: [PATCH 119/528] resolve conflicts, fix make_partition_update() --- expected/pathman_inserts.out | 12 ++++++------ expected/pathman_inserts_1.out | 24 ++++++++++++------------ src/include/partition_update.h | 1 + src/partition_update.c | 20 +++++++++++++++----- src/planner_tree_modification.c | 4 ++-- 5 files changed, 36 insertions(+), 25 deletions(-) diff --git a/expected/pathman_inserts.out b/expected/pathman_inserts.out index c3a8566f..15136608 100644 --- a/expected/pathman_inserts.out +++ b/expected/pathman_inserts.out @@ -872,11 +872,11 @@ RETURNING e * 2, b, tableoid::regclass; EXPLAIN (VERBOSE, COSTS OFF) INSERT INTO test_inserts.storage (d, e) SELECT i, i FROM generate_series(1, 10) i; - QUERY PLAN -------------------------------------------------------------------------------- + QUERY PLAN +----------------------------------------------------------------------------------- Insert on test_inserts.storage -> Custom Scan (PartitionFilter) - Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + Output: NULL::integer, NULL::integer, NULL::integer, storage.d, storage.e -> Function Scan on pg_catalog.generate_series i Output: NULL::integer, NULL::integer, NULL::integer, i.i, i.i Function Call: generate_series(1, 10) @@ -889,7 +889,7 @@ FROM generate_series(1, 10) i; ----------------------------------------------------------------------------------- Insert on test_inserts.storage -> Custom Scan (PartitionFilter) - Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + Output: NULL::integer, storage.b, NULL::integer, NULL::text, NULL::bigint -> Function Scan on pg_catalog.generate_series i Output: NULL::integer, i.i, NULL::integer, NULL::text, NULL::bigint Function Call: generate_series(1, 10) @@ -943,7 +943,7 @@ FROM test_inserts.storage; ---------------------------------------------------------------------------------------------- Insert on test_inserts.storage -> Custom Scan (PartitionFilter) - Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + Output: NULL::integer, storage.b, NULL::integer, storage.d, NULL::bigint -> Result Output: NULL::integer, storage_11.b, NULL::integer, storage_11.d, NULL::bigint -> Append @@ -984,7 +984,7 @@ FROM test_inserts.storage; -------------------------------------------------------------------------------------------- Insert on test_inserts.storage -> Custom Scan (PartitionFilter) - Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + Output: NULL::integer, storage.b, NULL::integer, NULL::text, NULL::bigint -> Result Output: NULL::integer, storage_11.b, NULL::integer, NULL::text, NULL::bigint -> Append diff --git a/expected/pathman_inserts_1.out b/expected/pathman_inserts_1.out index 9f8633ab..d20e2c3a 100644 --- a/expected/pathman_inserts_1.out +++ b/expected/pathman_inserts_1.out @@ -872,11 +872,11 @@ RETURNING e * 2, b, tableoid::regclass; EXPLAIN (VERBOSE, COSTS OFF) INSERT INTO test_inserts.storage (d, e) SELECT i, i FROM generate_series(1, 10) i; - QUERY PLAN -------------------------------------------------------------------------------- + QUERY PLAN +----------------------------------------------------------------------------------- Insert on test_inserts.storage -> Custom Scan (PartitionFilter) - Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + Output: NULL::integer, NULL::integer, NULL::integer, storage.d, storage.e -> Function Scan on pg_catalog.generate_series i Output: NULL::integer, NULL::integer, NULL::integer, i, i Function Call: generate_series(1, 10) @@ -885,11 +885,11 @@ FROM generate_series(1, 10) i; EXPLAIN (VERBOSE, COSTS OFF) INSERT INTO test_inserts.storage (b) SELECT i FROM generate_series(1, 10) i; - QUERY PLAN ---------------------------------------------------------------------------------- + QUERY PLAN +----------------------------------------------------------------------------------- Insert on test_inserts.storage -> Custom Scan (PartitionFilter) - Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + Output: NULL::integer, storage.b, NULL::integer, NULL::text, NULL::bigint -> Function Scan on pg_catalog.generate_series i Output: NULL::integer, i, NULL::integer, NULL::text, NULL::bigint Function Call: generate_series(1, 10) @@ -939,11 +939,11 @@ FROM test_inserts.storage; EXPLAIN (VERBOSE, COSTS OFF) INSERT INTO test_inserts.storage (b, d) SELECT b, d FROM test_inserts.storage; - QUERY PLAN -------------------------------------------------------------------------------- + QUERY PLAN +---------------------------------------------------------------------------------- Insert on test_inserts.storage -> Custom Scan (PartitionFilter) - Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + Output: NULL::integer, storage.b, NULL::integer, storage.d, NULL::bigint -> Result Output: NULL::integer, b, NULL::integer, d, NULL::bigint -> Append @@ -980,11 +980,11 @@ FROM test_inserts.storage; EXPLAIN (VERBOSE, COSTS OFF) INSERT INTO test_inserts.storage (b) SELECT b FROM test_inserts.storage; - QUERY PLAN ---------------------------------------------------------------------------------- + QUERY PLAN +----------------------------------------------------------------------------------- Insert on test_inserts.storage -> Custom Scan (PartitionFilter) - Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + Output: NULL::integer, storage.b, NULL::integer, NULL::text, NULL::bigint -> Result Output: NULL::integer, b, NULL::integer, NULL::text, NULL::bigint -> Append diff --git a/src/include/partition_update.h b/src/include/partition_update.h index c2bd6926..7efdfe51 100644 --- a/src/include/partition_update.h +++ b/src/include/partition_update.h @@ -51,6 +51,7 @@ TupleTableSlot *partition_update_exec(CustomScanState *node); Plan *make_partition_update(Plan *subplan, Oid parent_relid, + Index parent_rti, List *returning_list); #endif /* PARTITION_UPDATE_H */ diff --git a/src/partition_update.c b/src/partition_update.c index 93d44851..f89edcdd 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -63,11 +63,20 @@ init_partition_update_static_data(void) Plan * make_partition_update(Plan *subplan, Oid parent_relid, + Index parent_rti, List *returning_list) { - Plan *pfilter; - CustomScan *cscan = makeNode(CustomScan); + CustomScan *cscan = makeNode(CustomScan); + Plan *pfilter; + + /* Create child PartitionFilter node */ + pfilter = make_partition_filter(subplan, + parent_relid, + parent_rti, + ONCONFLICT_NONE, + returning_list, + CMD_UPDATE); /* Copy costs etc */ cscan->scan.plan.startup_cost = subplan->startup_cost; @@ -77,15 +86,16 @@ make_partition_update(Plan *subplan, /* Setup methods and child plan */ cscan->methods = &partition_update_plan_methods; - pfilter = make_partition_filter(subplan, parent_relid, ONCONFLICT_NONE, - returning_list, CMD_UPDATE); cscan->custom_plans = list_make1(pfilter); + + /* Build an appropriate target list */ cscan->scan.plan.targetlist = pfilter->targetlist; /* No physical relation will be scanned */ cscan->scan.scanrelid = 0; + + /* FIXME: should we use the same tlist? */ cscan->custom_scan_tlist = subplan->targetlist; - cscan->custom_private = NULL; return &cscan->scan.plan; } diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 293ce941..9af8c302 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -557,8 +557,8 @@ partition_update_visitor(Plan *plan, void *context) lc3 = lnext(lc3); } - lfirst(lc1) = make_partition_update((Plan *) lfirst(lc1), - relid, + lfirst(lc1) = make_partition_update((Plan *) lfirst(lc1), relid, + modify_table->nominalRelation, returning_list); } } From 1e0af6f3b9bb5598f6559f56d2085c170deb6b09 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 21 Aug 2017 16:32:24 +0300 Subject: [PATCH 120/528] test variants in 'extected' using a bash script --- expected/test_variants.sh | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100755 expected/test_variants.sh diff --git a/expected/test_variants.sh b/expected/test_variants.sh new file mode 100755 index 00000000..46bf2817 --- /dev/null +++ b/expected/test_variants.sh @@ -0,0 +1,27 @@ +#!/usr/bin/bash + +ret=0 + +red="\033[0;31m" +reset='\033[0m' + +shopt -s extglob + +for result in ./*_+([0-9]).out; do + f1="$result" + f2="${f1//_+([0-9])/}" + + printf "examine $(basename $f1) \n" + + file_diff=$(diff $f1 $f2 | wc -l) + + if [ $file_diff -eq 0 ]; then + printf $red + printf "WARNING: $(basename $f1) is redundant \n" >&2 + printf $reset + + ret=1 # change exit code + fi +done + +exit $ret From 75c0c77409214cde583564c28ba3b8cc45aae92f Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 23 Aug 2017 16:10:33 +0300 Subject: [PATCH 121/528] rename update node to PartitionRouter --- Makefile | 2 +- expected/pathman_subpartitions.out | 2 +- expected/pathman_update_node.out | 6 +- sql/pathman_subpartitions.sql | 8 +- sql/pathman_update_node.sql | 4 +- src/hooks.c | 41 ++-- src/include/hooks.h | 6 +- src/include/partition_filter.h | 33 ++- src/include/partition_router.h | 85 +++++++ src/include/partition_update.h | 57 ----- src/include/planner_tree_modification.h | 2 +- src/partition_filter.c | 47 ++-- ...{partition_update.c => partition_router.c} | 128 +++++----- src/pg_pathman.c | 4 +- src/planner_tree_modification.c | 229 +++++++++--------- src/utility_stmt_hooking.c | 22 +- 16 files changed, 375 insertions(+), 301 deletions(-) create mode 100644 src/include/partition_router.h delete mode 100644 src/include/partition_update.h rename src/{partition_update.c => partition_router.c} (66%) diff --git a/Makefile b/Makefile index 1b159d23..3bd96b31 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ OBJS = src/init.o src/relation_info.o src/utils.o src/partition_filter.o \ src/hooks.o src/nodes_common.o src/xact_handling.o src/utility_stmt_hooking.o \ src/planner_tree_modification.o src/debug_print.o src/partition_creation.o \ src/compat/pg_compat.o src/compat/relation_tags.o src/compat/rowmarks_fix.o \ - src/partition_update.o $(WIN32RES) + src/partition_router.o $(WIN32RES) override PG_CPPFLAGS += -I$(CURDIR)/src/include diff --git a/expected/pathman_subpartitions.out b/expected/pathman_subpartitions.out index af35011e..ab93090d 100644 --- a/expected/pathman_subpartitions.out +++ b/expected/pathman_subpartitions.out @@ -196,7 +196,7 @@ SELECT subpartitions.partitions_tree('subpartitions.abc'); DROP TABLE subpartitions.abc CASCADE; NOTICE: drop cascades to 15 other objects /* Test that update works correctly */ -SET pg_pathman.enable_partitionupdate=on; +SET pg_pathman.enable_partitionrouter = ON; CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); create_range_partitions diff --git a/expected/pathman_update_node.out b/expected/pathman_update_node.out index 254b301e..125eedd4 100644 --- a/expected/pathman_update_node.out +++ b/expected/pathman_update_node.out @@ -2,7 +2,7 @@ SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA test_update_node; -SET pg_pathman.enable_partitionupdate=on; +SET pg_pathman.enable_partitionrouter = ON; /* Partition table by RANGE (NUMERIC) */ CREATE TABLE test_update_node.test_range(val NUMERIC NOT NULL, comment TEXT); CREATE INDEX val_idx ON test_update_node.test_range (val); @@ -18,7 +18,7 @@ EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 5 WHERE val = 1 QUERY PLAN ------------------------------------------------------------------- Update on test_range_2 - -> Custom Scan (PartitionRoute) + -> Custom Scan (PartitionRouter) -> Custom Scan (PartitionFilter) -> Bitmap Heap Scan on test_range_2 Recheck Cond: (val = '15'::numeric) @@ -31,7 +31,7 @@ EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 14 WHERE val = QUERY PLAN ------------------------------------------------------------------- Update on test_range_2 - -> Custom Scan (PartitionRoute) + -> Custom Scan (PartitionRouter) -> Custom Scan (PartitionFilter) -> Bitmap Heap Scan on test_range_2 Recheck Cond: (val = '15'::numeric) diff --git a/sql/pathman_subpartitions.sql b/sql/pathman_subpartitions.sql index 23217872..1e5b2e47 100644 --- a/sql/pathman_subpartitions.sql +++ b/sql/pathman_subpartitions.sql @@ -3,6 +3,8 @@ CREATE EXTENSION pg_pathman; CREATE SCHEMA subpartitions; + + /* Create two level partitioning structure */ CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); INSERT INTO subpartitions.abc SELECT i, i FROM generate_series(1, 200, 20) as i; @@ -58,8 +60,10 @@ SELECT create_hash_partitions('subpartitions.abc_4', 'b', 2); SELECT subpartitions.partitions_tree('subpartitions.abc'); DROP TABLE subpartitions.abc CASCADE; + /* Test that update works correctly */ -SET pg_pathman.enable_partitionupdate=on; +SET pg_pathman.enable_partitionrouter = ON; + CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); SELECT create_range_partitions('subpartitions.abc_1', 'b', 0, 50, 2); @@ -77,6 +81,8 @@ SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitio UPDATE subpartitions.abc SET b = 125 WHERE a = 125 and b = 75; SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should create subpartitions.abc_2_3 */ + + DROP TABLE subpartitions.abc CASCADE; DROP SCHEMA subpartitions CASCADE; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_update_node.sql b/sql/pathman_update_node.sql index f451010e..aff7f8ec 100644 --- a/sql/pathman_update_node.sql +++ b/sql/pathman_update_node.sql @@ -3,7 +3,9 @@ SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA test_update_node; -SET pg_pathman.enable_partitionupdate=on; + + +SET pg_pathman.enable_partitionrouter = ON; /* Partition table by RANGE (NUMERIC) */ diff --git a/src/hooks.c b/src/hooks.c index 63808297..7f77514a 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -17,7 +17,7 @@ #include "hooks.h" #include "init.h" #include "partition_filter.h" -#include "partition_update.h" +#include "partition_router.h" #include "pathman_workers.h" #include "planner_tree_modification.h" #include "runtimeappend.h" @@ -559,8 +559,8 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) /* Add PartitionFilter node for INSERT queries */ ExecuteForPlanTree(result, add_partition_filters); - /* Add PartitionUpdate node for UPDATE queries */ - ExecuteForPlanTree(result, add_partition_update_nodes); + /* Add PartitionRouter node for UPDATE queries */ + ExecuteForPlanTree(result, add_partition_routers); /* Decrement relation tags refcount */ decr_refcount_relation_tags(); @@ -847,41 +847,45 @@ pathman_process_utility_hook(Node *first_arg, #define EXECUTOR_RUN(q,d,c) standard_ExecutorRun((q),(d),(c)) #endif +/* + * Executor hook (for PartitionRouter). + */ #if PG_VERSION_NUM >= 100000 void -pathman_executor_hook(QueryDesc *queryDesc, ScanDirection direction, - ExecutorRun_CountArgType count, bool execute_once) +pathman_executor_hook(QueryDesc *queryDesc, + ScanDirection direction, + ExecutorRun_CountArgType count, + bool execute_once) #else void -pathman_executor_hook(QueryDesc *queryDesc, ScanDirection direction, - ExecutorRun_CountArgType count) +pathman_executor_hook(QueryDesc *queryDesc, + ScanDirection direction, + ExecutorRun_CountArgType count) #endif { PlanState *state = (PlanState *) queryDesc->planstate; if (IsA(state, ModifyTableState)) { - int i; ModifyTableState *mt_state = (ModifyTableState *) state; + int i; for (i = 0; i < mt_state->mt_nplans; i++) { - CustomScanState *subplanstate = (CustomScanState *) mt_state->mt_plans[i]; + CustomScanState *pr_state = (CustomScanState *) mt_state->mt_plans[i]; - if (!IsA(subplanstate, CustomScanState)) - continue; - - if (strcmp(subplanstate->methods->CustomName, UPDATE_NODE_DESCRIPTION) == 0) + /* Check if this is a PartitionRouter node */ + if (IsPartitionRouterState(pr_state)) { - ResultRelInfo *rri = mt_state->resultRelInfo + i; + ResultRelInfo *rri = &mt_state->resultRelInfo[i]; /* - * We unset junkfilter to disable junk cleaning in - * ExecModifyTable. + * We unset junkfilter to disable junk + * cleaning in ExecModifyTable. */ rri->ri_junkFilter = NULL; - /* hack, change UPDATE operation to INSERT */ + /* HACK: change UPDATE operation to INSERT */ mt_state->operation = CMD_INSERT; } } @@ -891,6 +895,5 @@ pathman_executor_hook(QueryDesc *queryDesc, ScanDirection direction, if (executor_run_hook_next) EXECUTOR_HOOK_NEXT(queryDesc, direction, count); /* Else call internal implementation */ - else - EXECUTOR_RUN(queryDesc, direction, count); + else EXECUTOR_RUN(queryDesc, direction, count); } diff --git a/src/include/hooks.h b/src/include/hooks.h index d512436d..0c9922f7 100644 --- a/src/include/hooks.h +++ b/src/include/hooks.h @@ -79,11 +79,13 @@ typedef long ExecutorRun_CountArgType; #endif #if PG_VERSION_NUM >= 100000 -void pathman_executor_hook(QueryDesc *queryDesc, ScanDirection direction, +void pathman_executor_hook(QueryDesc *queryDesc, + ScanDirection direction, ExecutorRun_CountArgType count, bool execute_once); #else -void pathman_executor_hook(QueryDesc *queryDesc, ScanDirection direction, +void pathman_executor_hook(QueryDesc *queryDesc, + ScanDirection direction, ExecutorRun_CountArgType count); #endif diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index c2626e46..841cd0cb 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -25,10 +25,13 @@ #endif +#define INSERT_NODE_NAME "PartitionFilter" + + #define ERR_PART_ATTR_NULL "partitioning expression's value should not be NULL" #define ERR_PART_ATTR_MULTIPLE_RESULTS "partitioning expression should return single value" #define ERR_PART_ATTR_NO_PART "no suitable partition for key '%s'" -#define ERR_PART_ATTR_MULTIPLE "PartitionFilter selected more than one partition" +#define ERR_PART_ATTR_MULTIPLE INSERT_NODE_NAME " selected more than one partition" #define ERR_PART_DESC_CONVERT "could not convert row type for partition" @@ -46,6 +49,9 @@ typedef struct } ResultRelInfoHolder; +/* Standard size of ResultPartsStorage entry */ +#define ResultPartsStorageStandard 0 + /* Forward declaration (for on_new_rri_holder()) */ struct ResultPartsStorage; typedef struct ResultPartsStorage ResultPartsStorage; @@ -63,7 +69,8 @@ typedef void (*on_new_rri_holder)(EState *estate, */ struct ResultPartsStorage { - ResultRelInfo *saved_rel_info; /* original ResultRelInfo (parent) */ + ResultRelInfo *base_rri; /* original ResultRelInfo (parent) */ + HTAB *result_rels_table; HASHCTL result_rels_table_config; @@ -79,11 +86,6 @@ struct ResultPartsStorage LOCKMODE heap_close_lock_mode; }; -/* - * Standard size of ResultPartsStorage entry. - */ -#define ResultPartsStorageStandard 0 - typedef struct { CustomScanState css; @@ -115,6 +117,23 @@ extern CustomScanMethods partition_filter_plan_methods; extern CustomExecMethods partition_filter_exec_methods; +#define IsPartitionFilterPlan(node) \ + ( \ + IsA((node), CustomScan) && \ + (((CustomScan *) (node))->methods == &partition_filter_plan_methods) \ + ) + +#define IsPartitionFilterState(node) \ + ( \ + IsA((node), CustomScanState) && \ + (((CustomScanState *) (node))->methods == &partition_filter_exec_methods) \ + ) + +#define IsPartitionFilter(node) \ + ( IsPartitionFilterPlan(node) || IsPartitionFilterState(node) ) + + + void init_partition_filter_static_data(void); diff --git a/src/include/partition_router.h b/src/include/partition_router.h new file mode 100644 index 00000000..e90893ba --- /dev/null +++ b/src/include/partition_router.h @@ -0,0 +1,85 @@ +/* ------------------------------------------------------------------------ + * + * partition_update.h + * Insert row to right partition in UPDATE operation + * + * Copyright (c) 2017, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + +#ifndef PARTITION_UPDATE_H +#define PARTITION_UPDATE_H + +#include "relation_info.h" +#include "utils.h" + +#include "postgres.h" +#include "commands/explain.h" +#include "optimizer/planner.h" + +#if PG_VERSION_NUM >= 90600 +#include "nodes/extensible.h" +#endif + + +#define UPDATE_NODE_NAME "PartitionRouter" + + +typedef struct PartitionRouterState +{ + CustomScanState css; + + Oid partitioned_table; + JunkFilter *junkfilter; + Plan *subplan; /* proxy variable to store subplan */ +} PartitionRouterState; + + +extern bool pg_pathman_enable_partition_router; + +extern CustomScanMethods partition_router_plan_methods; +extern CustomExecMethods partition_router_exec_methods; + + +#define IsPartitionRouterPlan(node) \ + ( \ + IsA((node), CustomScan) && \ + (((CustomScan *) (node))->methods == &partition_router_plan_methods) \ + ) + +#define IsPartitionRouterState(node) \ + ( \ + IsA((node), CustomScanState) && \ + (((CustomScanState *) (node))->methods == &partition_router_exec_methods) \ + ) + +#define IsPartitionRouter(node) \ + ( IsPartitionRouterPlan(node) || IsPartitionRouterState(node) ) + + +void init_partition_router_static_data(void); + + +Plan *make_partition_router(Plan *subplan, + Oid parent_relid, + Index parent_rti, + List *returning_list); + + +Node *partition_router_create_scan_state(CustomScan *node); + +void partition_router_begin(CustomScanState *node, EState *estate, int eflags); + +TupleTableSlot *partition_router_exec(CustomScanState *node); + +void partition_router_end(CustomScanState *node); + +void partition_router_rescan(CustomScanState *node); + +void partition_router_explain(CustomScanState *node, + List *ancestors, + ExplainState *es); + + +#endif /* PARTITION_UPDATE_H */ diff --git a/src/include/partition_update.h b/src/include/partition_update.h deleted file mode 100644 index 7efdfe51..00000000 --- a/src/include/partition_update.h +++ /dev/null @@ -1,57 +0,0 @@ -/* ------------------------------------------------------------------------ - * - * partition_update.h - * Insert row to right partition in UPDATE operation - * - * Copyright (c) 2017, Postgres Professional - * - * ------------------------------------------------------------------------ - */ - -#ifndef PARTITION_UPDATE_H -#define PARTITION_UPDATE_H - -#include "relation_info.h" -#include "utils.h" - -#include "postgres.h" -#include "commands/explain.h" -#include "optimizer/planner.h" - -#if PG_VERSION_NUM >= 90600 -#include "nodes/extensible.h" -#endif - -#define UPDATE_NODE_DESCRIPTION ("PartitionRoute") - -typedef struct PartitionUpdateState -{ - CustomScanState css; - - Oid partitioned_table; - JunkFilter *junkfilter; - Plan *subplan; /* proxy variable to store subplan */ -} PartitionUpdateState; - -extern bool pg_pathman_enable_partition_update; - -extern CustomScanMethods partition_update_plan_methods; -extern CustomExecMethods partition_update_exec_methods; - -void init_partition_update_static_data(void); -Node *partition_update_create_scan_state(CustomScan *node); - -void partition_update_begin(CustomScanState *node, EState *estate, int eflags); -void partition_update_end(CustomScanState *node); -void partition_update_rescan(CustomScanState *node); -void partition_update_explain(CustomScanState *node, List *ancestors, - ExplainState *es); - -TupleTableSlot *partition_update_exec(CustomScanState *node); - -Plan *make_partition_update(Plan *subplan, - Oid parent_relid, - Index parent_rti, - List *returning_list); - -#endif /* PARTITION_UPDATE_H */ diff --git a/src/include/planner_tree_modification.h b/src/include/planner_tree_modification.h index 64053b3d..f7de3e3e 100644 --- a/src/include/planner_tree_modification.h +++ b/src/include/planner_tree_modification.h @@ -34,7 +34,7 @@ void pathman_transform_query(Query *parse, ParamListInfo params); /* These functions scribble on Plan tree */ void add_partition_filters(List *rtable, Plan *plan); -void add_partition_update_nodes(List *rtable, Plan *plan); +void add_partition_routers(List *rtable, Plan *plan); /* used by assign_rel_parenthood_status() etc */ diff --git a/src/partition_filter.c b/src/partition_filter.c index 77a62961..46ee75e9 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -96,10 +96,10 @@ static estate_mod_data * fetch_estate_mod_data(EState *estate); void init_partition_filter_static_data(void) { - partition_filter_plan_methods.CustomName = "PartitionFilter"; + partition_filter_plan_methods.CustomName = INSERT_NODE_NAME; partition_filter_plan_methods.CreateCustomScanState = partition_filter_create_scan_state; - partition_filter_exec_methods.CustomName = "PartitionFilter"; + partition_filter_exec_methods.CustomName = INSERT_NODE_NAME; partition_filter_exec_methods.BeginCustomScan = partition_filter_begin; partition_filter_exec_methods.ExecCustomScan = partition_filter_exec; partition_filter_exec_methods.EndCustomScan = partition_filter_end; @@ -109,7 +109,7 @@ init_partition_filter_static_data(void) partition_filter_exec_methods.ExplainCustomScan = partition_filter_explain; DefineCustomBoolVariable("pg_pathman.enable_partitionfilter", - "Enables the planner's use of PartitionFilter custom node.", + "Enables the planner's use of " INSERT_NODE_NAME " custom node.", NULL, &pg_pathman_enable_partition_filter, true, @@ -164,7 +164,7 @@ init_result_parts_storage(ResultPartsStorage *parts_storage, result_rels_table_config, HASH_ELEM | HASH_BLOBS); parts_storage->estate = estate; - parts_storage->saved_rel_info = NULL; + parts_storage->base_rri = NULL; parts_storage->on_new_rri_holder_callback = on_new_rri_holder_cb; parts_storage->callback_arg = on_new_rri_holder_cb_arg; @@ -216,7 +216,7 @@ ResultRelInfoHolder * scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) { #define CopyToResultRelInfo(field_name) \ - ( child_result_rel_info->field_name = parts_storage->saved_rel_info->field_name ) + ( child_result_rel_info->field_name = parts_storage->base_rri->field_name ) ResultRelInfoHolder *rri_holder; bool found; @@ -229,13 +229,17 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) if (!found) { Relation child_rel, - base_rel = parts_storage->saved_rel_info->ri_RelationDesc; + base_rel; RangeTblEntry *child_rte, *parent_rte; Index child_rte_idx; ResultRelInfo *child_result_rel_info; List *translated_vars; + /* Check that 'base_rri' is set */ + if (!parts_storage->base_rri) + elog(ERROR, "ResultPartsStorage contains no base_rri"); + /* Lock partition and check if it exists */ LockRelationOid(partid, parts_storage->head_open_lock_mode); if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(partid))) @@ -249,10 +253,13 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) return NULL; } - parent_rte = rt_fetch(parts_storage->saved_rel_info->ri_RangeTableIndex, + parent_rte = rt_fetch(parts_storage->base_rri->ri_RangeTableIndex, parts_storage->estate->es_range_table); - /* Open relation and check if it is a valid target */ + /* Get base relation */ + base_rel = parts_storage->base_rri->ri_RelationDesc; + + /* Open child relation and check if it is a valid target */ child_rel = heap_open(partid, NoLock); CheckValidResultRel(child_rel, parts_storage->command_type); @@ -281,10 +288,6 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) /* Create ResultRelInfo for partition */ child_result_rel_info = makeNode(ResultRelInfo); - /* Check that 'saved_rel_info' is set */ - if (!parts_storage->saved_rel_info) - elog(ERROR, "ResultPartsStorage contains no saved_rel_info"); - InitResultRelInfoCompat(child_result_rel_info, child_rel, child_rte_idx, @@ -550,10 +553,10 @@ make_partition_filter(Plan *subplan, errmsg("ON CONFLICT clause is not supported with partitioned tables"))); /* Copy costs etc */ - cscan->scan.plan.startup_cost = subplan->startup_cost; - cscan->scan.plan.total_cost = subplan->total_cost; - cscan->scan.plan.plan_rows = subplan->plan_rows; - cscan->scan.plan.plan_width = subplan->plan_width; + cscan->scan.plan.startup_cost = subplan->startup_cost; + cscan->scan.plan.total_cost = subplan->total_cost; + cscan->scan.plan.plan_rows = subplan->plan_rows; + cscan->scan.plan.plan_width = subplan->plan_width; /* Setup methods and child plan */ cscan->methods = &partition_filter_plan_methods; @@ -689,15 +692,13 @@ partition_filter_exec(CustomScanState *node) EState *estate = node->ss.ps.state; PlanState *child_ps = (PlanState *) linitial(node->custom_ps); TupleTableSlot *slot; - ResultRelInfo *saved_resultRelInfo; slot = ExecProcNode(child_ps); state->subplan_slot = slot; - /* Save original ResultRelInfo */ - saved_resultRelInfo = estate->es_result_relation_info; - if (!state->result_parts.saved_rel_info) - state->result_parts.saved_rel_info = saved_resultRelInfo; + /* Don't forget to initialize 'base_rri'! */ + if (!state->result_parts.base_rri) + state->result_parts.base_rri = estate->es_result_relation_info; if (state->tup_convert_slot) ExecClearTuple(state->tup_convert_slot); @@ -715,7 +716,7 @@ partition_filter_exec(CustomScanState *node) { if (!state->warning_triggered) elog(WARNING, "table \"%s\" is not partitioned, " - "PartitionFilter will behave as a normal INSERT", + INSERT_NODE_NAME " will behave as a normal INSERT", get_rel_name_or_relid(state->partitioned_table)); return slot; @@ -895,7 +896,7 @@ prepare_rri_returning_for_insert(EState *estate, return; child_rri = rri_holder->result_rel_info; - parent_rri = rps_storage->saved_rel_info; + parent_rri = rps_storage->base_rri; parent_rt_idx = parent_rri->ri_RangeTableIndex; /* Create ExprContext for tuple projections */ diff --git a/src/partition_update.c b/src/partition_router.c similarity index 66% rename from src/partition_update.c rename to src/partition_router.c index f89edcdd..84a98668 100644 --- a/src/partition_update.c +++ b/src/partition_router.c @@ -1,7 +1,7 @@ /* ------------------------------------------------------------------------ * - * partition_update.c - * Insert row to right partition in UPDATE operation + * partition_router.c + * Route row to a right partition in UPDATE operation * * Copyright (c) 2017, Postgres Professional * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group @@ -11,7 +11,7 @@ */ #include "partition_filter.h" -#include "partition_update.h" +#include "partition_router.h" #include "compat/pg_compat.h" #include "access/xact.h" @@ -23,10 +23,10 @@ #include "utils/guc.h" #include "utils/rel.h" -bool pg_pathman_enable_partition_update = true; +bool pg_pathman_enable_partition_router = true; -CustomScanMethods partition_update_plan_methods; -CustomExecMethods partition_update_exec_methods; +CustomScanMethods partition_router_plan_methods; +CustomExecMethods partition_router_exec_methods; static TupleTableSlot *ExecDeleteInternal(ItemPointer tupleid, TupleTableSlot *planSlot, @@ -34,24 +34,24 @@ static TupleTableSlot *ExecDeleteInternal(ItemPointer tupleid, EState *estate); void -init_partition_update_static_data(void) +init_partition_router_static_data(void) { - partition_update_plan_methods.CustomName = UPDATE_NODE_DESCRIPTION; - partition_update_plan_methods.CreateCustomScanState = partition_update_create_scan_state; - - partition_update_exec_methods.CustomName = UPDATE_NODE_DESCRIPTION; - partition_update_exec_methods.BeginCustomScan = partition_update_begin; - partition_update_exec_methods.ExecCustomScan = partition_update_exec; - partition_update_exec_methods.EndCustomScan = partition_update_end; - partition_update_exec_methods.ReScanCustomScan = partition_update_rescan; - partition_update_exec_methods.MarkPosCustomScan = NULL; - partition_update_exec_methods.RestrPosCustomScan = NULL; - partition_update_exec_methods.ExplainCustomScan = partition_update_explain; - - DefineCustomBoolVariable("pg_pathman.enable_partitionupdate", - "Enables the planner's use of PartitionUpdate custom node.", + partition_router_plan_methods.CustomName = UPDATE_NODE_NAME; + partition_router_plan_methods.CreateCustomScanState = partition_router_create_scan_state; + + partition_router_exec_methods.CustomName = UPDATE_NODE_NAME; + partition_router_exec_methods.BeginCustomScan = partition_router_begin; + partition_router_exec_methods.ExecCustomScan = partition_router_exec; + partition_router_exec_methods.EndCustomScan = partition_router_end; + partition_router_exec_methods.ReScanCustomScan = partition_router_rescan; + partition_router_exec_methods.MarkPosCustomScan = NULL; + partition_router_exec_methods.RestrPosCustomScan = NULL; + partition_router_exec_methods.ExplainCustomScan = partition_router_explain; + + DefineCustomBoolVariable("pg_pathman.enable_partitionrouter", + "Enables the planner's use of " UPDATE_NODE_NAME " custom node.", NULL, - &pg_pathman_enable_partition_update, + &pg_pathman_enable_partition_router, false, PGC_USERSET, 0, @@ -61,7 +61,7 @@ init_partition_update_static_data(void) } Plan * -make_partition_update(Plan *subplan, +make_partition_router(Plan *subplan, Oid parent_relid, Index parent_rti, List *returning_list) @@ -79,13 +79,13 @@ make_partition_update(Plan *subplan, CMD_UPDATE); /* Copy costs etc */ - cscan->scan.plan.startup_cost = subplan->startup_cost; - cscan->scan.plan.total_cost = subplan->total_cost; - cscan->scan.plan.plan_rows = subplan->plan_rows; - cscan->scan.plan.plan_width = subplan->plan_width; + cscan->scan.plan.startup_cost = subplan->startup_cost; + cscan->scan.plan.total_cost = subplan->total_cost; + cscan->scan.plan.plan_rows = subplan->plan_rows; + cscan->scan.plan.plan_width = subplan->plan_width; /* Setup methods and child plan */ - cscan->methods = &partition_update_plan_methods; + cscan->methods = &partition_router_plan_methods; cscan->custom_plans = list_make1(pfilter); /* Build an appropriate target list */ @@ -101,15 +101,15 @@ make_partition_update(Plan *subplan, } Node * -partition_update_create_scan_state(CustomScan *node) +partition_router_create_scan_state(CustomScan *node) { - PartitionUpdateState *state; + PartitionRouterState *state; - state = (PartitionUpdateState *) palloc0(sizeof(PartitionUpdateState)); + state = (PartitionRouterState *) palloc0(sizeof(PartitionRouterState)); NodeSetTag(state, T_CustomScanState); state->css.flags = node->flags; - state->css.methods = &partition_update_exec_methods; + state->css.methods = &partition_router_exec_methods; /* Extract necessary variables */ state->subplan = (Plan *) linitial(node->custom_plans); @@ -117,30 +117,29 @@ partition_update_create_scan_state(CustomScan *node) } void -partition_update_begin(CustomScanState *node, EState *estate, int eflags) +partition_router_begin(CustomScanState *node, EState *estate, int eflags) { - PartitionUpdateState *state = (PartitionUpdateState *) node; + PartitionRouterState *state = (PartitionRouterState *) node; /* Initialize PartitionFilter child node */ node->custom_ps = list_make1(ExecInitNode(state->subplan, estate, eflags)); } TupleTableSlot * -partition_update_exec(CustomScanState *node) +partition_router_exec(CustomScanState *node) { EState *estate = node->ss.ps.state; PlanState *child_ps = (PlanState *) linitial(node->custom_ps); TupleTableSlot *slot; - PartitionUpdateState *state = (PartitionUpdateState *) node; + PartitionRouterState *state = (PartitionRouterState *) node; /* execute PartitionFilter child node */ slot = ExecProcNode(child_ps); if (!TupIsNull(slot)) { - Datum datum; - ResultRelInfo *resultRelInfo, - *sourceRelInfo; + ResultRelInfo *result_rri, + *parent_rri; ItemPointer tupleid = NULL; ItemPointerData tuple_ctid; EPQState epqstate; @@ -152,41 +151,46 @@ partition_update_exec(CustomScanState *node) EvalPlanQualSetSlot(&epqstate, child_state->subplan_slot); - sourceRelInfo = child_state->result_parts.saved_rel_info; - resultRelInfo = estate->es_result_relation_info; + parent_rri = child_state->result_parts.base_rri; + result_rri = estate->es_result_relation_info; - /* we generate junkfilter, if it wasn't created before */ + /* Build new junkfilter if we have to */ if (state->junkfilter == NULL) { - state->junkfilter = ExecInitJunkFilter(state->subplan->targetlist, - sourceRelInfo->ri_RelationDesc->rd_att->tdhasoid, - ExecInitExtraTupleSlot(estate)); + state->junkfilter = + ExecInitJunkFilter(state->subplan->targetlist, + parent_rri->ri_RelationDesc->rd_att->tdhasoid, + ExecInitExtraTupleSlot(estate)); + + state->junkfilter->jf_junkAttNo = + ExecFindJunkAttribute(state->junkfilter, "ctid"); - state->junkfilter->jf_junkAttNo = ExecFindJunkAttribute(state->junkfilter, "ctid"); if (!AttributeNumberIsValid(state->junkfilter->jf_junkAttNo)) elog(ERROR, "could not find junk ctid column"); } - relkind = sourceRelInfo->ri_RelationDesc->rd_rel->relkind; + relkind = parent_rri->ri_RelationDesc->rd_rel->relkind; if (relkind == RELKIND_RELATION) { - bool isNull; + Datum ctid_datum; + bool ctid_isnull; + + ctid_datum = ExecGetJunkAttribute(child_state->subplan_slot, + state->junkfilter->jf_junkAttNo, + &ctid_isnull); - datum = ExecGetJunkAttribute(child_state->subplan_slot, - state->junkfilter->jf_junkAttNo, &isNull); /* shouldn't ever get a null result... */ - if (isNull) + if (ctid_isnull) elog(ERROR, "ctid is NULL"); - tupleid = (ItemPointer) DatumGetPointer(datum); - tuple_ctid = *tupleid; /* be sure we don't free - * ctid!! */ + tupleid = (ItemPointer) DatumGetPointer(ctid_datum); + tuple_ctid = *tupleid; /* be sure we don't free ctid! */ tupleid = &tuple_ctid; } else if (relkind == RELKIND_FOREIGN_TABLE) - elog(ERROR, "update node is not supported for foreign tables"); + elog(ERROR, UPDATE_NODE_NAME " does not support foreign tables"); else - elog(ERROR, "got unexpected type of relation for update"); + elog(ERROR, UPDATE_NODE_NAME " cannot handle relkind %u", relkind); /* * Clean from junk attributes before INSERT, @@ -196,13 +200,13 @@ partition_update_exec(CustomScanState *node) slot = ExecFilterJunk(state->junkfilter, slot); /* Delete old tuple */ - estate->es_result_relation_info = sourceRelInfo; + estate->es_result_relation_info = parent_rri; Assert(tupleid != NULL); ExecDeleteInternal(tupleid, child_state->subplan_slot, &epqstate, estate); - /* we've got the slot that can be inserted to child partition */ - estate->es_result_relation_info = resultRelInfo; + /* We've got the slot that can be inserted to child partition */ + estate->es_result_relation_info = result_rri; return slot; } @@ -210,21 +214,21 @@ partition_update_exec(CustomScanState *node) } void -partition_update_end(CustomScanState *node) +partition_router_end(CustomScanState *node) { Assert(list_length(node->custom_ps) == 1); ExecEndNode((PlanState *) linitial(node->custom_ps)); } void -partition_update_rescan(CustomScanState *node) +partition_router_rescan(CustomScanState *node) { Assert(list_length(node->custom_ps) == 1); ExecReScan((PlanState *) linitial(node->custom_ps)); } void -partition_update_explain(CustomScanState *node, List *ancestors, ExplainState *es) +partition_router_explain(CustomScanState *node, List *ancestors, ExplainState *es) { /* Nothing to do here now */ } diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 57dbcef3..d6bfde96 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -16,7 +16,7 @@ #include "hooks.h" #include "pathman.h" #include "partition_filter.h" -#include "partition_update.h" +#include "partition_router.h" #include "runtimeappend.h" #include "runtime_merge_append.h" @@ -321,7 +321,7 @@ _PG_init(void) init_runtimeappend_static_data(); init_runtime_merge_append_static_data(); init_partition_filter_static_data(); - init_partition_update_static_data(); + init_partition_router_static_data(); } /* Get cached PATHMAN_CONFIG relation Oid */ diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 9af8c302..b0d61672 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -14,7 +14,7 @@ #include "compat/rowmarks_fix.h" #include "partition_filter.h" -#include "partition_update.h" +#include "partition_router.h" #include "planner_tree_modification.h" #include "relation_info.h" #include "rewrite/rewriteManip.h" @@ -38,20 +38,23 @@ typedef enum FP_NON_SINGULAR_RESULT /* Multiple or no partitions */ } FindPartitionResult; + static bool pathman_transform_query_walker(Node *node, void *context); static void disable_standard_inheritance(Query *parse); static void handle_modification_query(Query *parse, ParamListInfo params); static void partition_filter_visitor(Plan *plan, void *context); -static void partition_update_visitor(Plan *plan, void *context); +static void partition_router_visitor(Plan *plan, void *context); static rel_parenthood_status tag_extract_parenthood_status(List *relation_tag); -static FindPartitionResult find_deepest_partition(Oid relid, Index idx, Expr *quals, Oid *partition); +static FindPartitionResult find_deepest_partition(Oid relid, Index idx, + Expr *quals, Oid *partition); static Node *eval_extern_params_mutator(Node *node, ParamListInfo params); static bool modifytable_contains_fdw(List *rtable, ModifyTable *node); + /* * HACK: We have to mark each Query with a unique * id in order to recognize them properly. @@ -274,10 +277,7 @@ handle_modification_query(Query *parse, ParamListInfo params) if (params && clause_contains_params((Node *) quals)) quals = (Expr *) eval_extern_params_mutator((Node *) quals, params); - /* - * Parse syntax tree and extract deepest partition (if there is only one - * satisfying quals) - */ + /* Parse syntax tree and extract deepest partition */ fp_result = find_deepest_partition(rte->relid, result_rel, quals, &child); /* @@ -342,82 +342,11 @@ handle_modification_query(Query *parse, ParamListInfo params) } } -/* - * Find a single deepest subpartition. If there are more than one partitions - * satisfies quals or no such partition at all then return InvalidOid. - */ -static FindPartitionResult -find_deepest_partition(Oid relid, Index idx, Expr *quals, Oid *partition) -{ - const PartRelationInfo *prel; - Node *prel_expr; - WalkerContext context; - List *ranges; - WrapperNode *wrap; - - prel = get_pathman_relation_info(relid); - - /* Exit if it's not partitioned */ - if (!prel) - return FP_PLAIN_TABLE; - - /* Exit if we must include parent */ - if (prel->enable_parent) - return FP_NON_SINGULAR_RESULT; - - /* Exit if there's no quals (no use) */ - if (!quals) - return FP_NON_SINGULAR_RESULT; - - /* Prepare partitioning expression */ - prel_expr = PrelExpressionForRelid(prel, idx); - - ranges = list_make1_irange_full(prel, IR_COMPLETE); - - /* Parse syntax tree and extract partition ranges */ - InitWalkerContext(&context, prel_expr, prel, NULL); - wrap = walk_expr_tree(quals, &context); - ranges = irange_list_intersection(ranges, wrap->rangeset); - - if (irange_list_length(ranges) == 1) - { - IndexRange irange = linitial_irange(ranges); - - if (irange_lower(irange) == irange_upper(irange)) - { - Oid *children = PrelGetChildrenArray(prel), - child = children[irange_lower(irange)], - subpartition; - FindPartitionResult result; - - /* - * Try to go deeper and see if there is subpartition - */ - result = find_deepest_partition(child, - idx, - quals, - &subpartition); - switch(result) - { - case FP_FOUND: - *partition = subpartition; - return FP_FOUND; - case FP_PLAIN_TABLE: - *partition = child; - return FP_FOUND; - case FP_NON_SINGULAR_RESULT: - return FP_NON_SINGULAR_RESULT; - } - } - } - - return FP_NON_SINGULAR_RESULT; -} /* - * ------------------------------- - * PartitionFilter and PartitionUpdate-related stuff - * ------------------------------- + * ---------------------------------------------------- + * PartitionFilter and PartitionRouter -related stuff + * ---------------------------------------------------- */ /* Add PartitionFilter nodes to the plan tree */ @@ -428,16 +357,16 @@ add_partition_filters(List *rtable, Plan *plan) plan_tree_walker(plan, partition_filter_visitor, rtable); } -/* Add PartitionUpdate nodes to the plan tree */ +/* Add PartitionRouter nodes to the plan tree */ void -add_partition_update_nodes(List *context, Plan *plan) +add_partition_routers(List *rtable, Plan *plan) { - if (pg_pathman_enable_partition_update) - plan_tree_walker(plan, partition_update_visitor, context); + if (pg_pathman_enable_partition_router) + plan_tree_walker(plan, partition_router_visitor, rtable); } /* - * Add partition filters to ModifyTable node's children. + * Add PartitionFilters to ModifyTable node's children. * * 'context' should point to the PlannedStmt->rtable. */ @@ -484,32 +413,13 @@ partition_filter_visitor(Plan *plan, void *context) } } - -static bool -modifytable_contains_fdw(List *rtable, ModifyTable *node) -{ - ListCell *lc; - - foreach(lc, node->resultRelations) - { - Index rti = lfirst_int(lc); - RangeTblEntry *rte = rt_fetch(rti, rtable); - - if (rte->relkind == RELKIND_FOREIGN_TABLE) - return true; - } - - return false; -} - - /* - * Add partition update to ModifyTable node's children. + * Add PartitionRouter to ModifyTable node's children. * * 'context' should point to the PlannedStmt->rtable. */ static void -partition_update_visitor(Plan *plan, void *context) +partition_router_visitor(Plan *plan, void *context) { List *rtable = (List *) context; ModifyTable *modify_table = (ModifyTable *) plan; @@ -527,8 +437,8 @@ partition_update_visitor(Plan *plan, void *context) { ereport(NOTICE, (errcode(ERRCODE_STATEMENT_TOO_COMPLEX), - errmsg("discovered mix of local and foreign tables," - " pg_pathman's update node will not be used"))); + errmsg("discovered mix of local and foreign tables, " + UPDATE_NODE_NAME " will be disabled"))); return; } @@ -540,7 +450,8 @@ partition_update_visitor(Plan *plan, void *context) relid = getrelid(rindex, rtable); const PartRelationInfo *prel; - while ((tmp_relid = get_parent_of_partition(relid, NULL)) != 0) + /* Find topmost parent */ + while ((tmp_relid = get_parent_of_partition(relid, NULL)) != InvalidOid) relid = tmp_relid; /* Check that table is partitioned */ @@ -557,7 +468,7 @@ partition_update_visitor(Plan *plan, void *context) lc3 = lnext(lc3); } - lfirst(lc1) = make_partition_update((Plan *) lfirst(lc1), relid, + lfirst(lc1) = make_partition_router((Plan *) lfirst(lc1), relid, modify_table->nominalRelation, returning_list); } @@ -625,6 +536,102 @@ tag_extract_parenthood_status(List *relation_tag) } +/* + * -------------------------- + * Various helper functions + * -------------------------- + */ + +/* Does ModifyTable node contain any FDWs? */ +static bool +modifytable_contains_fdw(List *rtable, ModifyTable *node) +{ + ListCell *lc; + + foreach(lc, node->resultRelations) + { + Index rti = lfirst_int(lc); + RangeTblEntry *rte = rt_fetch(rti, rtable); + + if (rte->relkind == RELKIND_FOREIGN_TABLE) + return true; + } + + return false; +} + +/* + * Find a single deepest subpartition. + * Return InvalidOid if that's impossible. + */ +static FindPartitionResult +find_deepest_partition(Oid relid, Index idx, Expr *quals, Oid *partition) +{ + const PartRelationInfo *prel; + Node *prel_expr; + WalkerContext context; + List *ranges; + WrapperNode *wrap; + + prel = get_pathman_relation_info(relid); + + /* Exit if it's not partitioned */ + if (!prel) + return FP_PLAIN_TABLE; + + /* Exit if we must include parent */ + if (prel->enable_parent) + return FP_NON_SINGULAR_RESULT; + + /* Exit if there's no quals (no use) */ + if (!quals) + return FP_NON_SINGULAR_RESULT; + + /* Prepare partitioning expression */ + prel_expr = PrelExpressionForRelid(prel, idx); + + ranges = list_make1_irange_full(prel, IR_COMPLETE); + + /* Parse syntax tree and extract partition ranges */ + InitWalkerContext(&context, prel_expr, prel, NULL); + wrap = walk_expr_tree(quals, &context); + ranges = irange_list_intersection(ranges, wrap->rangeset); + + if (irange_list_length(ranges) == 1) + { + IndexRange irange = linitial_irange(ranges); + + if (irange_lower(irange) == irange_upper(irange)) + { + Oid *children = PrelGetChildrenArray(prel), + child = children[irange_lower(irange)], + subpartition; + FindPartitionResult result; + + /* Try to go deeper and see if there is subpartition */ + result = find_deepest_partition(child, + idx, + quals, + &subpartition); + switch(result) + { + case FP_FOUND: + *partition = subpartition; + return FP_FOUND; + + case FP_PLAIN_TABLE: + *partition = child; + return FP_FOUND; + + case FP_NON_SINGULAR_RESULT: + return FP_NON_SINGULAR_RESULT; + } + } + } + + return FP_NON_SINGULAR_RESULT; +} + /* Replace extern param nodes with consts */ static Node * eval_extern_params_mutator(Node *node, ParamListInfo params) diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index ee7468a9..05183a0b 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -551,7 +551,7 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, bool *nulls; ResultPartsStorage parts_storage; - ResultRelInfo *parent_result_rel; + ResultRelInfo *parent_rri; EState *estate = CreateExecutorState(); /* for ExecConstraints() */ ExprContext *econtext; @@ -566,16 +566,16 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, tupDesc = RelationGetDescr(parent_rel); - parent_result_rel = makeNode(ResultRelInfo); - InitResultRelInfoCompat(parent_result_rel, + parent_rri = makeNode(ResultRelInfo); + InitResultRelInfoCompat(parent_rri, parent_rel, 1, /* dummy rangetable index */ 0); - ExecOpenIndices(parent_result_rel, false); + ExecOpenIndices(parent_rri, false); - estate->es_result_relations = parent_result_rel; + estate->es_result_relations = parent_rri; estate->es_num_result_relations = 1; - estate->es_result_relation_info = parent_result_rel; + estate->es_result_relation_info = parent_rri; estate->es_range_table = range_table; /* Initialize ResultPartsStorage */ @@ -583,7 +583,9 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, ResultPartsStorageStandard, prepare_rri_for_copy, NULL, CMD_INSERT); - parts_storage.saved_rel_info = parent_result_rel; + + /* Don't forget to initialize 'base_rri'! */ + parts_storage.base_rri = parent_rri; /* Set up a tuple slot too */ myslot = ExecInitExtraTupleSlot(estate); @@ -600,7 +602,7 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, * such. However, executing these triggers maintains consistency with the * EACH ROW triggers that we already fire on COPY. */ - ExecBSInsertTriggers(estate, parent_result_rel); + ExecBSInsertTriggers(estate, parent_rri); values = (Datum *) palloc(tupDesc->natts * sizeof(Datum)); nulls = (bool *) palloc(tupDesc->natts * sizeof(bool)); @@ -742,7 +744,7 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, pq_endmsgread(); /* Execute AFTER STATEMENT insertion triggers (FIXME: NULL transition) */ - ExecASInsertTriggersCompat(estate, parent_result_rel, NULL); + ExecASInsertTriggersCompat(estate, parent_rri, NULL); /* Handle queued AFTER triggers */ AfterTriggerEndQuery(estate); @@ -756,7 +758,7 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, fini_result_parts_storage(&parts_storage, true); /* Close parent's indices */ - ExecCloseIndices(parent_result_rel); + ExecCloseIndices(parent_rri); FreeExecutorState(estate); From 284ae41a1c2e010eb80a2e03db27ad87e30d059a Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 23 Aug 2017 16:18:49 +0300 Subject: [PATCH 122/528] fix some comments (PartitionRouter) --- src/partition_router.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/src/partition_router.c b/src/partition_router.c index 84a98668..b719bf40 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -184,7 +184,7 @@ partition_router_exec(CustomScanState *node) elog(ERROR, "ctid is NULL"); tupleid = (ItemPointer) DatumGetPointer(ctid_datum); - tuple_ctid = *tupleid; /* be sure we don't free ctid! */ + tuple_ctid = *tupleid; /* be sure we don't free ctid! */ tupleid = &tuple_ctid; } else if (relkind == RELKIND_FOREIGN_TABLE) @@ -194,19 +194,20 @@ partition_router_exec(CustomScanState *node) /* * Clean from junk attributes before INSERT, - * but only if slot wasn't converted in PartitionFilter + * but only if slot wasn't transformed in PartitionFilter. */ if (TupIsNull(child_state->tup_convert_slot)) slot = ExecFilterJunk(state->junkfilter, slot); - /* Delete old tuple */ + /* Magic: replace current ResultRelInfo with parent's one (DELETE) */ estate->es_result_relation_info = parent_rri; Assert(tupleid != NULL); ExecDeleteInternal(tupleid, child_state->subplan_slot, &epqstate, estate); - /* We've got the slot that can be inserted to child partition */ + /* Magic: replace parent's ResultRelInfo with child's one (INSERT) */ estate->es_result_relation_info = result_rri; + return slot; } @@ -234,11 +235,13 @@ partition_router_explain(CustomScanState *node, List *ancestors, ExplainState *e } -/* ---------------------------------------------------------------- - * ExecDeleteInternal +/* + * ---------------------------------------------------------------- + * ExecDeleteInternal * Basicly copy of ExecDelete from executor/nodeModifyTable.c * ---------------------------------------------------------------- */ + static TupleTableSlot * ExecDeleteInternal(ItemPointer tupleid, TupleTableSlot *planSlot, @@ -272,7 +275,7 @@ ExecDeleteInternal(ItemPointer tupleid, if (tupleid != NULL) { /* delete the tuple */ -ldelete:; +ldelete: result = heap_delete(resultRelationDesc, tupleid, estate->es_output_cid, estate->es_crosscheck_snapshot, From 2c4155eb5818222ab40ac29901b8c334502b4ab8 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 23 Aug 2017 18:10:04 +0300 Subject: [PATCH 123/528] small refactoring for PrelExpressionForRelid() & partition_filter_begin() --- src/partition_filter.c | 33 +++++++++------------------------ 1 file changed, 9 insertions(+), 24 deletions(-) diff --git a/src/partition_filter.c b/src/partition_filter.c index 46ee75e9..b2004c58 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -505,21 +505,10 @@ prepare_expr_state(const PartRelationInfo *prel, EState *estate) { ExprState *expr_state; MemoryContext old_mcxt; - Index parent_varno = 1; Node *expr; - ListCell *lc; /* Change varno in Vars according to range table */ - foreach(lc, estate->es_range_table) - { - RangeTblEntry *entry = lfirst(lc); - - if (entry->relid == PrelParentRelid(prel)) - break; - - parent_varno += 1; - } - expr = PrelExpressionForRelid(prel, parent_varno); + expr = PrelExpressionForRelid(prel, PART_EXPR_VARNO); /* Prepare state for expression execution */ old_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); @@ -634,32 +623,28 @@ partition_filter_begin(CustomScanState *node, EState *estate, int eflags) if (state->command_type == CMD_UPDATE) { /* - * In UPDATE queries we would operate with child relation, but - * expression expects varattnos like in base relation, so we map - * parent varattnos to child varattnos + * In UPDATE queries we would work with child relation, but + * expression contains varattnos of base relation, so we map + * parent varattnos to child varattnos. */ - bool found_whole_row; AttrNumber *map; - MemoryContext old_mcxt; - Index relno = ((Scan *) child_state->plan)->scanrelid; Node *expr; - Relation child_rel; + ResultRelInfo *child_rri = estate->es_result_relation_info; + Relation child_rel = child_rri->ri_RelationDesc; - child_rel = heap_open(getrelid(relno, estate->es_range_table), NoLock); + MemoryContext old_mcxt; map = build_attributes_map(prel, RelationGetDescr(child_rel)); - expr = map_variable_attnos(PrelExpressionForRelid(prel, relno), - relno, 0, map, + expr = map_variable_attnos(PrelExpressionForRelid(prel, PART_EXPR_VARNO), + PART_EXPR_VARNO, 0, map, RelationGetDescr(child_rel)->natts, &found_whole_row); if (found_whole_row) elog(ERROR, "unexpected whole-row reference found in partition key"); - heap_close(child_rel, NoLock); - /* Prepare state for expression execution */ old_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); state->expr_state = ExecInitExpr((Expr *) expr, NULL); From 4ef0128413caaf1e038ace20c234059b42042675 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 24 Aug 2017 18:18:17 +0300 Subject: [PATCH 124/528] fix incorrect Oid for sub_prel in select_partition_for_insert() --- src/partition_filter.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/partition_filter.c b/src/partition_filter.c index b2004c58..d4873cbe 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -478,11 +478,11 @@ select_partition_for_insert(ExprState *expr_state, const PartRelationInfo *sub_prel; /* Fetch PartRelationInfo for this partitioned relation */ - sub_prel = get_pathman_relation_info(partition_relid); + sub_prel = get_pathman_relation_info(rri_holder->partid); /* Might be a false alarm */ if (!sub_prel) - break; + return rri_holder; /* Build an expression state if not yet */ if (!rri_holder->expr_state) From 81668ed0ebc092dd15d4c29b0fb44dcd07beb4a1 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 28 Aug 2017 18:54:39 +0300 Subject: [PATCH 125/528] bsearch: fix non-eq WHERE conditions pointing to gaps (issue #117) --- src/pg_pathman.c | 85 ++++++++++++++++++++++++++---------------------- 1 file changed, 46 insertions(+), 39 deletions(-) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 4a8c4ff5..ce74b361 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -591,22 +591,20 @@ select_range_partitions(const Datum value, WrapperNode *result) /* returned partitions */ { bool lossy = false, - is_less, - is_greater; - -#ifdef USE_ASSERT_CHECKING - bool found = false; - int counter = 0; -#endif + miss_left, /* 'value' is less than left bound */ + miss_right; /* 'value' is greater that right bound */ int startidx = 0, endidx = nranges - 1, cmp_min, cmp_max, - i; + i = 0; Bound value_bound = MakeBound(value); /* convert value to Bound */ +#ifdef USE_ASSERT_CHECKING + int counter = 0; +#endif /* Initial value (no missing partitions found) */ result->found_gap = false; @@ -628,9 +626,9 @@ select_range_partitions(const Datum value, cmp_min = cmp_bounds(cmp_func, collid, &value_bound, &ranges[startidx].min); cmp_max = cmp_bounds(cmp_func, collid, &value_bound, &ranges[endidx].max); - if ((cmp_min <= 0 && strategy == BTLessStrategyNumber) || - (cmp_min < 0 && (strategy == BTLessEqualStrategyNumber || - strategy == BTEqualStrategyNumber))) + if ((cmp_min <= 0 && strategy == BTLessStrategyNumber) || + (cmp_min < 0 && (strategy == BTLessEqualStrategyNumber || + strategy == BTEqualStrategyNumber))) { result->rangeset = NIL; return; @@ -644,7 +642,7 @@ select_range_partitions(const Datum value, return; } - if ((cmp_min < 0 && strategy == BTGreaterStrategyNumber) || + if ((cmp_min < 0 && strategy == BTGreaterStrategyNumber) || (cmp_min <= 0 && strategy == BTGreaterEqualStrategyNumber)) { result->rangeset = list_make1_irange(make_irange(startidx, @@ -677,44 +675,55 @@ select_range_partitions(const Datum value, cmp_min = cmp_bounds(cmp_func, collid, &value_bound, &ranges[i].min); cmp_max = cmp_bounds(cmp_func, collid, &value_bound, &ranges[i].max); - is_less = (cmp_min < 0 || (cmp_min == 0 && strategy == BTLessStrategyNumber)); - is_greater = (cmp_max > 0 || (cmp_max >= 0 && strategy != BTLessStrategyNumber)); + /* How is 'value' located with respect to left & right bounds? */ + miss_left = (cmp_min < 0 || (cmp_min == 0 && strategy == BTLessStrategyNumber)); + miss_right = (cmp_max > 0 || (cmp_max == 0 && strategy != BTLessStrategyNumber)); - if (!is_less && !is_greater) + /* Searched value is inside of partition */ + if (!miss_left && !miss_right) { - if (strategy == BTGreaterEqualStrategyNumber && cmp_min == 0) + /* 'value' == 'min' and we want everything on the right */ + if (cmp_min == 0 && strategy == BTGreaterEqualStrategyNumber) lossy = false; - else if (strategy == BTLessStrategyNumber && cmp_max == 0) + /* 'value' == 'max' and we want everything on the left */ + else if (cmp_max == 0 && strategy == BTLessStrategyNumber) lossy = false; - else - lossy = true; + /* We're somewhere in the middle */ + else lossy = true; -#ifdef USE_ASSERT_CHECKING - found = true; -#endif - break; + break; /* just exit loop */ } /* Indices have met, looks like there's no partition */ if (startidx >= endidx) { - result->rangeset = NIL; + result->rangeset = NIL; result->found_gap = true; - return; + + /* Return if it's "key = value" */ + if (strategy == BTEqualStrategyNumber) + return; + + if ((miss_left && (strategy == BTLessStrategyNumber || + strategy == BTLessEqualStrategyNumber)) || + (miss_right && (strategy == BTGreaterStrategyNumber || + strategy == BTGreaterEqualStrategyNumber))) + lossy = true; + else + lossy = false; + + break; /* just exit loop */ } - if (is_less) + if (miss_left) endidx = i - 1; - else if (is_greater) + else if (miss_right) startidx = i + 1; /* For debug's sake */ Assert(++counter < 100); } - /* Should've been found by now */ - Assert(found); - /* Filter partitions */ switch(strategy) { @@ -743,18 +752,16 @@ select_range_partitions(const Datum value, { result->rangeset = list_make1_irange(make_irange(i, i, IR_LOSSY)); if (i < nranges - 1) - result->rangeset = - lappend_irange(result->rangeset, - make_irange(i + 1, - nranges - 1, - IR_COMPLETE)); + result->rangeset = lappend_irange(result->rangeset, + make_irange(i + 1, + nranges - 1, + IR_COMPLETE)); } else { - result->rangeset = - list_make1_irange(make_irange(i, - nranges - 1, - IR_COMPLETE)); + result->rangeset = list_make1_irange(make_irange(i, + nranges - 1, + IR_COMPLETE)); } break; From 7bb6af4a0f4b745116ba5eb50859f4e36e4a21d9 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 29 Aug 2017 15:39:59 +0300 Subject: [PATCH 126/528] more comments and tests for range + gap case --- Makefile | 1 + expected/pathman_gaps.out | 823 ++++++++++++++++++++++++++++++++++++++ sql/pathman_gaps.sql | 137 +++++++ src/pg_pathman.c | 5 + 4 files changed, 966 insertions(+) create mode 100644 expected/pathman_gaps.out create mode 100644 sql/pathman_gaps.sql diff --git a/Makefile b/Makefile index c2cacaae..40738ddf 100644 --- a/Makefile +++ b/Makefile @@ -35,6 +35,7 @@ REGRESS = pathman_array_qual \ pathman_domains \ pathman_expressions \ pathman_foreign_keys \ + pathman_gaps \ pathman_inserts \ pathman_interval \ pathman_join_clause \ diff --git a/expected/pathman_gaps.out b/expected/pathman_gaps.out new file mode 100644 index 00000000..a21734f0 --- /dev/null +++ b/expected/pathman_gaps.out @@ -0,0 +1,823 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA gaps; +CREATE TABLE gaps.test_1(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_1', 'val', 1, 10, 3); + create_range_partitions +------------------------- + 3 +(1 row) + +DROP TABLE gaps.test_1_2; +CREATE TABLE gaps.test_2(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_2', 'val', 1, 10, 5); + create_range_partitions +------------------------- + 5 +(1 row) + +DROP TABLE gaps.test_2_3; +CREATE TABLE gaps.test_3(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_3', 'val', 1, 10, 8); + create_range_partitions +------------------------- + 8 +(1 row) + +DROP TABLE gaps.test_3_4; +CREATE TABLE gaps.test_4(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_4', 'val', 1, 10, 11); + create_range_partitions +------------------------- + 11 +(1 row) + +DROP TABLE gaps.test_4_4; +DROP TABLE gaps.test_4_5; +/* Check existing partitions */ +SELECT * FROM pathman_partition_list ORDER BY parent, partition; + parent | partition | parttype | expr | range_min | range_max +-------------+----------------+----------+------+-----------+----------- + gaps.test_1 | gaps.test_1_1 | 2 | val | 1 | 11 + gaps.test_1 | gaps.test_1_3 | 2 | val | 21 | 31 + gaps.test_2 | gaps.test_2_1 | 2 | val | 1 | 11 + gaps.test_2 | gaps.test_2_2 | 2 | val | 11 | 21 + gaps.test_2 | gaps.test_2_4 | 2 | val | 31 | 41 + gaps.test_2 | gaps.test_2_5 | 2 | val | 41 | 51 + gaps.test_3 | gaps.test_3_1 | 2 | val | 1 | 11 + gaps.test_3 | gaps.test_3_2 | 2 | val | 11 | 21 + gaps.test_3 | gaps.test_3_3 | 2 | val | 21 | 31 + gaps.test_3 | gaps.test_3_5 | 2 | val | 41 | 51 + gaps.test_3 | gaps.test_3_6 | 2 | val | 51 | 61 + gaps.test_3 | gaps.test_3_7 | 2 | val | 61 | 71 + gaps.test_3 | gaps.test_3_8 | 2 | val | 71 | 81 + gaps.test_4 | gaps.test_4_1 | 2 | val | 1 | 11 + gaps.test_4 | gaps.test_4_2 | 2 | val | 11 | 21 + gaps.test_4 | gaps.test_4_3 | 2 | val | 21 | 31 + gaps.test_4 | gaps.test_4_6 | 2 | val | 51 | 61 + gaps.test_4 | gaps.test_4_7 | 2 | val | 61 | 71 + gaps.test_4 | gaps.test_4_8 | 2 | val | 71 | 81 + gaps.test_4 | gaps.test_4_9 | 2 | val | 81 | 91 + gaps.test_4 | gaps.test_4_10 | 2 | val | 91 | 101 + gaps.test_4 | gaps.test_4_11 | 2 | val | 101 | 111 +(22 rows) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 11; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 16; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_3 + Filter: (val = 21) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 11; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_1 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 11; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_1 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 16; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_1 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 16; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_1 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_1 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 21; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_1_1 + -> Seq Scan on test_1_3 + Filter: (val <= 21) +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 11; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_3 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 11; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_3 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 16; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_3 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 16; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_3 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_3 + Filter: (val > 21) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_3 +(2 rows) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 21; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 26; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + Filter: (val = 31) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 26; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 26; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 31; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 + Filter: (val <= 31) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 41; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 + Filter: (val <= 41) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 11; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_2 + Filter: (val > 11) + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 11; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 26; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 26; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + Filter: (val > 31) + -> Seq Scan on test_2_5 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(3 rows) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 31; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 36; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + Filter: (val = 41) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 41; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 + Filter: (val <= 41) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 51; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 51; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + Filter: (val <= 51) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_3 + Filter: (val > 21) + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + Filter: (val > 41) + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(5 rows) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 31; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 36; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 41; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 46; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 51; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_6 + Filter: (val = 51) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 46; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 46; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 51; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 51; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 + Filter: (val <= 51) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 61; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 61; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + Filter: (val <= 61) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 21; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_3 + Filter: (val > 21) + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 21; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 31; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 31; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 36; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 36; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 41; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 41; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 46; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 46; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 51; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + Filter: (val > 51) + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 51; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +DROP SCHEMA gaps CASCADE; +NOTICE: drop cascades to 30 other objects +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_gaps.sql b/sql/pathman_gaps.sql new file mode 100644 index 00000000..eb185ff2 --- /dev/null +++ b/sql/pathman_gaps.sql @@ -0,0 +1,137 @@ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA gaps; + + + +CREATE TABLE gaps.test_1(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_1', 'val', 1, 10, 3); +DROP TABLE gaps.test_1_2; + +CREATE TABLE gaps.test_2(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_2', 'val', 1, 10, 5); +DROP TABLE gaps.test_2_3; + +CREATE TABLE gaps.test_3(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_3', 'val', 1, 10, 8); +DROP TABLE gaps.test_3_4; + +CREATE TABLE gaps.test_4(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_4', 'val', 1, 10, 11); +DROP TABLE gaps.test_4_4; +DROP TABLE gaps.test_4_5; + + + +/* Check existing partitions */ +SELECT * FROM pathman_partition_list ORDER BY parent, partition; + + + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 11; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 16; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 21; + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 11; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 11; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 16; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 16; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 21; + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 11; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 11; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 16; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 16; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 21; + + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 26; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 31; + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 26; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 26; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 41; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 41; + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 11; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 11; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 26; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 26; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 31; + + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 36; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 41; + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 36; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 36; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 41; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 41; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 51; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 51; + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 36; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 36; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 41; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 41; + + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 36; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 41; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 46; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 51; + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 36; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 36; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 41; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 41; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 46; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 46; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 51; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 51; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 61; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 61; + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 36; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 36; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 41; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 41; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 46; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 46; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 51; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 51; + + + +DROP SCHEMA gaps CASCADE; +DROP EXTENSION pg_pathman; diff --git a/src/pg_pathman.c b/src/pg_pathman.c index ce74b361..41090f3f 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -704,6 +704,11 @@ select_range_partitions(const Datum value, if (strategy == BTEqualStrategyNumber) return; + /* + * Use current partition 'i' as a pivot that will be + * excluded by relation_excluded_by_constraints() if + * (lossy == true) & its WHERE clauses are trivial. + */ if ((miss_left && (strategy == BTLessStrategyNumber || strategy == BTLessEqualStrategyNumber)) || (miss_right && (strategy == BTGreaterStrategyNumber || From 217efab4c42a4e306399393f3b8704e98523f220 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 29 Aug 2017 16:35:52 +0300 Subject: [PATCH 127/528] undef useless defines (MSFT) --- src/planner_tree_modification.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 2b8c811c..68701b3d 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -24,10 +24,26 @@ #include "utils/syscache.h" +#ifdef SELECT +#undef SELECT +#endif + +#ifdef INSERT +#undef INSERT +#endif + +#ifdef UPDATE +#undef UPDATE +#endif + +#ifdef DELETE +#undef DELETE +#endif + + /* for assign_rel_parenthood_status() */ #define PARENTHOOD_TAG CppAsString(PARENTHOOD) - /* Build transform_query_cxt field name */ #define TRANSFORM_CONTEXT_FIELD(command_type) \ has_parent_##command_type##_query From ef72caa3fe9823602f2b3dfdf8de7b3bdcf26d2f Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 30 Aug 2017 14:15:01 +0300 Subject: [PATCH 128/528] reorder some steps in pathman_post_parse_analysis_hook() (issue #118) --- src/hooks.c | 64 ++++++++++++++++++++++--------------- src/include/xact_handling.h | 2 +- src/xact_handling.c | 20 +++++++----- 3 files changed, 51 insertions(+), 35 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index f4996e65..abe6face 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -632,39 +632,51 @@ pathman_post_parse_analysis_hook(ParseState *pstate, Query *query) if (post_parse_analyze_hook_next) post_parse_analyze_hook_next(pstate, query); - /* Hooks can be disabled */ + /* See cook_partitioning_expression() */ if (!pathman_hooks_enabled) return; - /* Finish delayed invalidation jobs */ - if (IsPathmanReady()) - finish_delayed_invalidation(); + /* We shouldn't proceed on: ... */ + if (query->commandType == CMD_UTILITY) + { + /* ... BEGIN */ + if (xact_is_transaction_stmt(query->utilityStmt)) + return; - /* - * We shouldn't proceed on: - * BEGIN - * SET [TRANSACTION] - */ - if (query->commandType == CMD_UTILITY && - (xact_is_transaction_stmt(query->utilityStmt) || - xact_is_set_stmt(query->utilityStmt))) - return; + /* ... SET pg_pathman.enable */ + if (xact_is_set_stmt(query->utilityStmt, PATHMAN_ENABLE)) + { + /* Accept all events in case it's "enable = OFF" */ + if (IsPathmanReady()) + finish_delayed_invalidation(); - /* - * We should also disable pg_pathman on: - * ALTER EXTENSION pg_pathman - */ - if (query->commandType == CMD_UTILITY && - xact_is_alter_pathman_stmt(query->utilityStmt)) - { - /* Disable pg_pathman to perform a painless update */ - (void) set_config_option(PATHMAN_ENABLE, "off", - PGC_SUSET, PGC_S_SESSION, - GUC_ACTION_SAVE, true, 0, false); + return; + } - return; + /* ... SET [TRANSACTION] */ + if (xact_is_set_stmt(query->utilityStmt, NULL)) + return; + + /* ... ALTER EXTENSION pg_pathman */ + if (xact_is_alter_pathman_stmt(query->utilityStmt)) + { + /* Leave no delayed events before ALTER EXTENSION */ + if (IsPathmanReady()) + finish_delayed_invalidation(); + + /* Disable pg_pathman to perform a painless update */ + (void) set_config_option(PATHMAN_ENABLE, "off", + PGC_SUSET, PGC_S_SESSION, + GUC_ACTION_SAVE, true, 0, false); + + return; + } } + /* Finish all delayed invalidation jobs */ + if (IsPathmanReady()) + finish_delayed_invalidation(); + /* Load config if pg_pathman exists & it's still necessary */ if (IsPathmanEnabled() && !IsPathmanInitialized() && @@ -746,7 +758,7 @@ pathman_relcache_hook(Datum arg, Oid relid) { Oid parent_relid; - /* Hooks can be disabled */ + /* See cook_partitioning_expression() */ if (!pathman_hooks_enabled) return; diff --git a/src/include/xact_handling.h b/src/include/xact_handling.h index 27939304..a762f197 100644 --- a/src/include/xact_handling.h +++ b/src/include/xact_handling.h @@ -28,7 +28,7 @@ LockAcquireResult xact_lock_rel(Oid relid, LOCKMODE lockmode, bool nowait); bool xact_bgw_conflicting_lock_exists(Oid relid); bool xact_is_level_read_committed(void); bool xact_is_transaction_stmt(Node *stmt); -bool xact_is_set_stmt(Node *stmt); +bool xact_is_set_stmt(Node *stmt, const char *name); bool xact_is_alter_pathman_stmt(Node *stmt); bool xact_object_is_visible(TransactionId obj_xmin); diff --git a/src/xact_handling.c b/src/xact_handling.c index 0d4ea5b0..a65bf3af 100644 --- a/src/xact_handling.c +++ b/src/xact_handling.c @@ -96,7 +96,7 @@ xact_is_level_read_committed(void) } /* - * Check if 'stmt' is BEGIN\ROLLBACK etc transaction statement. + * Check if 'stmt' is BEGIN/ROLLBACK/etc [TRANSACTION] statement. */ bool xact_is_transaction_stmt(Node *stmt) @@ -111,10 +111,10 @@ xact_is_transaction_stmt(Node *stmt) } /* - * Check if 'stmt' is SET [TRANSACTION] statement. + * Check if 'stmt' is SET ('name' | [TRANSACTION]) statement. */ bool -xact_is_set_stmt(Node *stmt) +xact_is_set_stmt(Node *stmt, const char *name) { /* Check that SET TRANSACTION is implemented via VariableSetStmt */ Assert(VAR_SET_MULTI > 0); @@ -122,7 +122,10 @@ xact_is_set_stmt(Node *stmt) if (!stmt) return false; - if (IsA(stmt, VariableSetStmt)) + if (!IsA(stmt, VariableSetStmt)) + return false; + + if (!name || pg_strcasecmp(name, ((VariableSetStmt *) stmt)->name) == 0) return true; return false; @@ -137,16 +140,17 @@ xact_is_alter_pathman_stmt(Node *stmt) if (!stmt) return false; - if (IsA(stmt, AlterExtensionStmt) && - 0 == strcmp(((AlterExtensionStmt *) stmt)->extname, - "pg_pathman")) + if (!IsA(stmt, AlterExtensionStmt)) + return false; + + if (pg_strcasecmp(((AlterExtensionStmt *) stmt)->extname, "pg_pathman") == 0) return true; return false; } /* - * Check if object is visible in newer transactions. + * Check if object is visible to newer transactions. */ bool xact_object_is_visible(TransactionId obj_xmin) From 5c913a703c37b02493ebb6efdf312bf1271d59f9 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 30 Aug 2017 15:30:08 +0300 Subject: [PATCH 129/528] add a comment regarding '#undef DELETE' --- src/planner_tree_modification.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 68701b3d..1163197b 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -24,6 +24,10 @@ #include "utils/syscache.h" +/* + * Drop conflicting macros for the sake of TRANSFORM_CONTEXT_FIELD(...). + * For instance, Windows.h contains a nasty "#define DELETE". + */ #ifdef SELECT #undef SELECT #endif From fc365c5f0b16c1883f848b3167ea041cea3f338b Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 31 Aug 2017 13:55:31 +0300 Subject: [PATCH 130/528] bump lib version to 1.4.4 --- META.json | 4 ++-- expected/pathman_calamity.out | 2 +- src/include/init.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/META.json b/META.json index 72e75c25..b3f0bf35 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_pathman", "abstract": "Partitioning tool", "description": "The `pg_pathman` module provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.4.3", + "version": "1.4.4", "maintainer": [ "Ildar Musin ", "Dmitry Ivanov ", @@ -24,7 +24,7 @@ "pg_pathman": { "file": "pg_pathman--1.4.sql", "docfile": "README.md", - "version": "1.4.3", + "version": "1.4.4", "abstract": "Partitioning tool" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 29573fba..4adcbeb4 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -12,7 +12,7 @@ SELECT debug_capture(); SELECT get_pathman_lib_version(); get_pathman_lib_version ------------------------- - 10403 + 10404 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index c34eda56..546206aa 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -157,7 +157,7 @@ simpify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT 0x010400 /* Current version of native C library (0xAA_BB_CC) */ -#define CURRENT_LIB_VERSION 0x010403 +#define CURRENT_LIB_VERSION 0x010404 void *pathman_cache_search_relid(HTAB *cache_table, From 594f617cb4af9725e882ae1276346ae9b42754e7 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 1 Sep 2017 12:25:55 +0300 Subject: [PATCH 131/528] fix crash on RESET ALL (issue #121) --- expected/pathman_calamity.out | 19 +++++++++++++++++++ sql/pathman_calamity.sql | 21 +++++++++++++++++++++ src/xact_handling.c | 9 ++++++++- 3 files changed, 48 insertions(+), 1 deletion(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 4adcbeb4..6c1a1729 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -855,6 +855,25 @@ NOTICE: drop cascades to 2 other objects DROP SCHEMA calamity CASCADE; NOTICE: drop cascades to 15 other objects DROP EXTENSION pg_pathman; +/* + * ------------------------------- + * Special tests (SET statement) + * ------------------------------- + */ +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = false; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SET pg_pathman.enable = true; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SET pg_pathman.enable = false; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +RESET pg_pathman.enable; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +RESET ALL; +BEGIN; ROLLBACK; +BEGIN ISOLATION LEVEL SERIALIZABLE; ROLLBACK; +BEGIN; SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; ROLLBACK; +DROP EXTENSION pg_pathman; /* * ------------------------------------- * Special tests (pathman_cache_stats) diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index 881cebbd..ed1b7b82 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -369,6 +369,27 @@ DROP EXTENSION pg_pathman; +/* + * ------------------------------- + * Special tests (SET statement) + * ------------------------------- + */ + +CREATE EXTENSION pg_pathman; + +SET pg_pathman.enable = false; +SET pg_pathman.enable = true; +SET pg_pathman.enable = false; +RESET pg_pathman.enable; +RESET ALL; +BEGIN; ROLLBACK; +BEGIN ISOLATION LEVEL SERIALIZABLE; ROLLBACK; +BEGIN; SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; ROLLBACK; + +DROP EXTENSION pg_pathman; + + + /* * ------------------------------------- * Special tests (pathman_cache_stats) diff --git a/src/xact_handling.c b/src/xact_handling.c index a65bf3af..c6696cce 100644 --- a/src/xact_handling.c +++ b/src/xact_handling.c @@ -125,8 +125,15 @@ xact_is_set_stmt(Node *stmt, const char *name) if (!IsA(stmt, VariableSetStmt)) return false; - if (!name || pg_strcasecmp(name, ((VariableSetStmt *) stmt)->name) == 0) + if (!name) return true; + else + { + char *set_name = ((VariableSetStmt *) stmt)->name; + + if (set_name && pg_strcasecmp(name, set_name) == 0) + return true; + } return false; } From a5e742233591da60078728b9ac9ddd3e920cb0eb Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 1 Sep 2017 12:50:47 +0300 Subject: [PATCH 132/528] bump lib version to 1.4.5 --- META.json | 4 ++-- expected/pathman_calamity.out | 2 +- src/include/init.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/META.json b/META.json index b3f0bf35..b05c65a4 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_pathman", "abstract": "Partitioning tool", "description": "The `pg_pathman` module provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.4.4", + "version": "1.4.5", "maintainer": [ "Ildar Musin ", "Dmitry Ivanov ", @@ -24,7 +24,7 @@ "pg_pathman": { "file": "pg_pathman--1.4.sql", "docfile": "README.md", - "version": "1.4.4", + "version": "1.4.5", "abstract": "Partitioning tool" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 6c1a1729..66925628 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -12,7 +12,7 @@ SELECT debug_capture(); SELECT get_pathman_lib_version(); get_pathman_lib_version ------------------------- - 10404 + 10405 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index 546206aa..e43747e1 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -157,7 +157,7 @@ simpify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT 0x010400 /* Current version of native C library (0xAA_BB_CC) */ -#define CURRENT_LIB_VERSION 0x010404 +#define CURRENT_LIB_VERSION 0x010405 void *pathman_cache_search_relid(HTAB *cache_table, From 7ca0dda771238fdd1ded9c2a7eea539c8c90e0d3 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sat, 2 Sep 2017 15:48:42 +0300 Subject: [PATCH 133/528] improve PGXN config --- META.json | 12 +++++++++--- README.md | 1 + 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/META.json b/META.json index b05c65a4..ee2d0f5f 100644 --- a/META.json +++ b/META.json @@ -1,7 +1,7 @@ { "name": "pg_pathman", "abstract": "Partitioning tool", - "description": "The `pg_pathman` module provides optimized partitioning mechanism and functions to manage partitions.", + "description": "pg_pathman provides optimized partitioning mechanism and functions to manage partitions.", "version": "1.4.5", "maintainer": [ "Ildar Musin ", @@ -25,7 +25,7 @@ "file": "pg_pathman--1.4.sql", "docfile": "README.md", "version": "1.4.5", - "abstract": "Partitioning tool" + "abstract": "Effective partitioning tool for PostgreSQL 9.5 and higher" } }, "meta-spec": { @@ -35,6 +35,12 @@ "tags": [ "partitioning", "partition", - "optimization" + "optimization", + "table", + "tables", + "custom node", + "runtime append", + "background worker", + "fdw" ] } diff --git a/README.md b/README.md index 2935ff3c..19dc98aa 100644 --- a/README.md +++ b/README.md @@ -8,6 +8,7 @@ The `pg_pathman` module provides optimized partitioning mechanism and functions to manage partitions. The extension is compatible with: + * PostgreSQL 9.5, 9.6, 10; * Postgres Pro Standard 9.5, 9.6; * Postgres Pro Enterprise; From 88fcacb7b4b9732588f350e177c87260b580a17b Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 12 Sep 2017 15:03:05 +0300 Subject: [PATCH 134/528] restore compatibility with PostgreSQL 10, refactoring of prepare_expr_state() --- pg_compat_available.sh | 6 ++ src/include/compat/pg_compat.h | 44 ++++++++-- src/include/relation_info.h | 7 +- src/partition_filter.c | 142 +++++++++++++++++++-------------- src/pg_pathman.c | 19 ++--- src/relation_info.c | 49 +++++++++--- 6 files changed, 176 insertions(+), 91 deletions(-) create mode 100755 pg_compat_available.sh diff --git a/pg_compat_available.sh b/pg_compat_available.sh new file mode 100755 index 00000000..d2d7cabc --- /dev/null +++ b/pg_compat_available.sh @@ -0,0 +1,6 @@ +#!/usr/bin/bash + +dir=$(dirname $0) +func="$1" + +grep -n -r --include=pg_compat.c --include=pg_compat.h $func $dir | head -n1 diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 72b23dc8..09844beb 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -158,6 +158,18 @@ #endif +/* + * CheckValidResultRel() + */ +#if PG_VERSION_NUM >= 100000 +#define CheckValidResultRelCompat(rri, cmd) \ + CheckValidResultRel((rri), (cmd)) +#elif PG_VERSION_NUM >= 90500 +#define CheckValidResultRelCompat(rri, cmd) \ + CheckValidResultRel((rri)->ri_RelationDesc, (cmd)) +#endif + + /* * create_append_path() */ @@ -266,7 +278,7 @@ extern void create_plain_partial_paths(PlannerInfo *root, /* - * ExecBuildProjectionInfo + * ExecBuildProjectionInfo() */ #if PG_VERSION_NUM >= 100000 #define ExecBuildProjectionInfoCompat(targetList, econtext, resultSlot, \ @@ -366,7 +378,7 @@ char get_rel_persistence(Oid relid); /* - * initial_cost_nestloop + * initial_cost_nestloop() */ #if PG_VERSION_NUM >= 100000 || (defined(PGPRO_VERSION) && PG_VERSION_NUM >= 90603) #define initial_cost_nestloop_compat(root, workspace, jointype, outer_path, \ @@ -382,7 +394,7 @@ char get_rel_persistence(Oid relid); /* - * InitResultRelInfo + * InitResultRelInfo() * * for v10 set NULL into 'partition_root' argument to specify that result * relation is not vanilla partition @@ -461,7 +473,7 @@ extern int oid_cmp(const void *p1, const void *p2); /* - * pg_analyze_and_rewrite + * pg_analyze_and_rewrite() * * for v10 cast first arg to RawStmt type */ @@ -479,7 +491,7 @@ extern int oid_cmp(const void *p1, const void *p2); /* - * ProcessUtility + * ProcessUtility() * * for v10 set NULL into 'queryEnv' argument */ @@ -577,6 +589,7 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, ExecARInsertTriggers((estate), (relinfo), (trigtuple), (recheck_indexes)) #endif + /* * ExecARDeleteTriggers() */ @@ -591,6 +604,7 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, ExecARDeleteTriggers((estate), (relinfo), (tupleid), (fdw_trigtuple)) #endif + /* * ExecASInsertTriggers() */ @@ -603,6 +617,26 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, #endif +/* + * map_variable_attnos() + */ +#if PG_VERSION_NUM >= 100000 +#define map_variable_attnos_compat(node, varno, \ + sublevels_up, map, map_len, \ + to_rowtype, found_wholerow) \ + map_variable_attnos((node), (varno), \ + (sublevels_up), (map), (map_len), \ + (to_rowtype), (found_wholerow)) +#elif PG_VERSION_NUM >= 90500 +#define map_variable_attnos_compat(node, varno, \ + sublevels_up, map, map_len, \ + to_rowtype, found_wholerow) \ + map_variable_attnos((node), (varno), \ + (sublevels_up), (map), (map_len), \ + (found_wholerow)) +#endif + + /* * ------------- diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 9921a029..ea6c9abe 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -271,6 +271,10 @@ PrelExpressionForRelid(const PartRelationInfo *prel, Index rel_index) return expr; } +AttrNumber *PrelExpressionAttributesMap(const PartRelationInfo *prel, + TupleDesc source_tupdesc, + int *map_length); + const PartRelationInfo *refresh_pathman_relation_info(Oid relid, Datum *values, @@ -393,8 +397,5 @@ extern bool pg_pathman_enable_bounds_cache; void init_relation_info_static_data(void); -AttrNumber *build_attributes_map(const PartRelationInfo *prel, - TupleDesc child_tupdesc); - #endif /* RELATION_INFO_H */ diff --git a/src/partition_filter.c b/src/partition_filter.c index d4873cbe..35475365 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -69,7 +69,9 @@ CustomScanMethods partition_filter_plan_methods; CustomExecMethods partition_filter_exec_methods; static ExprState *prepare_expr_state(const PartRelationInfo *prel, - EState *estate); + Relation source_rel, + EState *estate, + bool try_map); static void prepare_rri_for_insert(EState *estate, ResultRelInfoHolder *rri_holder, const ResultPartsStorage *rps_storage, @@ -261,7 +263,6 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) /* Open child relation and check if it is a valid target */ child_rel = heap_open(partid, NoLock); - CheckValidResultRel(child_rel, parts_storage->command_type); /* Build Var translation list for 'inserted_cols' */ make_inh_translation_list(base_rel, child_rel, 0, &translated_vars); @@ -311,6 +312,10 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) /* ri_ConstraintExprs will be initialized by ExecRelCheck() */ child_result_rel_info->ri_ConstraintExprs = NULL; + /* Check that this partition is a valid result relation */ + CheckValidResultRelCompat(child_result_rel_info, + parts_storage->command_type); + /* Fill the ResultRelInfo holder */ rri_holder->partid = partid; rri_holder->result_rel_info = child_result_rel_info; @@ -446,11 +451,11 @@ select_partition_for_insert(ExprState *expr_state, elog(ERROR, ERR_PART_ATTR_MULTIPLE); else if (nparts == 0) { - partition_relid = create_partitions_for_value(parent_relid, - value, prel->ev_type); + partition_relid = create_partitions_for_value(parent_relid, + value, prel->ev_type); - /* get_pathman_relation_info() will refresh this entry */ - invalidate_pathman_relation_info(parent_relid, NULL); + /* get_pathman_relation_info() will refresh this entry */ + invalidate_pathman_relation_info(parent_relid, NULL); } else partition_relid = parts[0]; @@ -475,23 +480,32 @@ select_partition_for_insert(ExprState *expr_state, /* This partition might have sub-partitions */ else if (rri_holder->has_children) { - const PartRelationInfo *sub_prel; + const PartRelationInfo *child_prel; /* Fetch PartRelationInfo for this partitioned relation */ - sub_prel = get_pathman_relation_info(rri_holder->partid); + child_prel = get_pathman_relation_info(rri_holder->partid); /* Might be a false alarm */ - if (!sub_prel) + if (!child_prel) return rri_holder; - /* Build an expression state if not yet */ + /* Build an expression state if it's not ready yet */ if (!rri_holder->expr_state) - rri_holder->expr_state = prepare_expr_state(sub_prel, estate); + { + /* Fetch original topmost parent */ + Relation source_rel = parts_storage->base_rri->ri_RelationDesc; + + /* Build a partitioning expression state */ + rri_holder->expr_state = prepare_expr_state(child_prel, + source_rel, + estate, + true); + } /* Recursively search for subpartitions */ rri_holder = select_partition_for_insert(rri_holder->expr_state, econtext, estate, - sub_prel, parts_storage); + child_prel, parts_storage); } } /* Loop until we get some result */ @@ -501,15 +515,45 @@ select_partition_for_insert(ExprState *expr_state, } static ExprState * -prepare_expr_state(const PartRelationInfo *prel, EState *estate) +prepare_expr_state(const PartRelationInfo *prel, + Relation source_rel, + EState *estate, + bool try_map) { ExprState *expr_state; MemoryContext old_mcxt; Node *expr; - /* Change varno in Vars according to range table */ + /* Fetch partitioning expression (we don't care about varno) */ expr = PrelExpressionForRelid(prel, PART_EXPR_VARNO); + /* Should we try using map? */ + if (try_map) + { + + AttrNumber *map; + int map_length; + TupleDesc source_tupdesc = RelationGetDescr(source_rel); + + /* Remap expression attributes for source relation */ + map = PrelExpressionAttributesMap(prel, source_tupdesc, &map_length); + + if (map) + { + bool found_whole_row; + + expr = map_variable_attnos_compat(expr, PART_EXPR_VARNO, 0, map, + map_length, InvalidOid, + &found_whole_row); + + if (found_whole_row) + elog(ERROR, "unexpected whole-row reference" + " found in partition key"); + + pfree(map); + } + } + /* Prepare state for expression execution */ old_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); expr_state = ExecInitExpr((Expr *) expr, NULL); @@ -595,8 +639,6 @@ partition_filter_create_scan_state(CustomScan *node) Assert(state->on_conflict_action >= ONCONFLICT_NONE || state->on_conflict_action <= ONCONFLICT_UPDATE); - state->expr_state = NULL; - /* There should be exactly one subplan */ Assert(list_length(node->custom_plans) == 1); @@ -607,55 +649,35 @@ void partition_filter_begin(CustomScanState *node, EState *estate, int eflags) { PartitionFilterState *state = (PartitionFilterState *) node; - const PartRelationInfo *prel; PlanState *child_state; + ResultRelInfo *current_rri; + Relation current_rel; + const PartRelationInfo *prel; + bool try_map; /* It's convenient to store PlanState in 'custom_ps' */ child_state = ExecInitNode(state->subplan, estate, eflags); node->custom_ps = list_make1(child_state); - if (state->expr_state == NULL) - { - /* Fetch PartRelationInfo for this partitioned relation */ - prel = get_pathman_relation_info(state->partitioned_table); + /* Fetch current result relation (rri + rel) */ + current_rri = estate->es_result_relation_info; + current_rel = current_rri->ri_RelationDesc; - /* Prepare state for expression execution */ - if (state->command_type == CMD_UPDATE) - { - /* - * In UPDATE queries we would work with child relation, but - * expression contains varattnos of base relation, so we map - * parent varattnos to child varattnos. - */ - bool found_whole_row; - - AttrNumber *map; - Node *expr; - ResultRelInfo *child_rri = estate->es_result_relation_info; - Relation child_rel = child_rri->ri_RelationDesc; - - MemoryContext old_mcxt; - - map = build_attributes_map(prel, RelationGetDescr(child_rel)); - expr = map_variable_attnos(PrelExpressionForRelid(prel, PART_EXPR_VARNO), - PART_EXPR_VARNO, 0, map, - RelationGetDescr(child_rel)->natts, - &found_whole_row); + /* Fetch PartRelationInfo for this partitioned relation */ + prel = get_pathman_relation_info(state->partitioned_table); - if (found_whole_row) - elog(ERROR, "unexpected whole-row reference found in partition key"); + /* + * In UPDATE queries we have to work with child relation tlist, + * but expression contains varattnos of base relation, so we + * map parent varattnos to child varattnos. + * + * We don't need map if current relation == base relation. + */ + try_map = state->command_type == CMD_UPDATE && + RelationGetRelid(current_rel) != state->partitioned_table; - /* Prepare state for expression execution */ - old_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); - state->expr_state = ExecInitExpr((Expr *) expr, NULL); - MemoryContextSwitchTo(old_mcxt); - } - else - { - /* Simple INSERT, expression based on parent attribute numbers */ - state->expr_state = prepare_expr_state(prel, estate); - } - } + /* Build a partitioning expression state */ + state->expr_state = prepare_expr_state(prel, current_rel, estate, try_map); /* Init ResultRelInfo cache */ init_result_parts_storage(&state->result_parts, estate, @@ -665,6 +687,10 @@ partition_filter_begin(CustomScanState *node, EState *estate, int eflags) (void *) state, state->command_type); + /* Don't forget to initialize 'base_rri'! */ + state->result_parts.base_rri = current_rri; + + /* No warnings yet */ state->warning_triggered = false; } @@ -681,10 +707,6 @@ partition_filter_exec(CustomScanState *node) slot = ExecProcNode(child_ps); state->subplan_slot = slot; - /* Don't forget to initialize 'base_rri'! */ - if (!state->result_parts.base_rri) - state->result_parts.base_rri = estate->es_result_relation_info; - if (state->tup_convert_slot) ExecClearTuple(state->tup_convert_slot); diff --git a/src/pg_pathman.c b/src/pg_pathman.c index d6bfde96..0bd0919c 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -420,11 +420,7 @@ append_child_relation(PlannerInfo *root, child_rte->relid = child_oid; child_rte->relkind = child_relation->rd_rel->relkind; child_rte->requiredPerms = 0; /* perform all checks on parent */ - - /* Does this child have subpartitions? */ - child_rte->inh = (child_oid == parent_rte->relid) ? - false : /* it's a parent, skip */ - child_relation->rd_rel->relhassubclass; + child_rte->inh = false; /* Add 'child_rte' to rtable and 'root->simple_rte_array' */ root->parse->rtable = lappend(root->parse->rtable, child_rte); @@ -574,20 +570,19 @@ append_child_relation(PlannerInfo *root, add_child_rel_equivalences(root, appinfo, parent_rel, child_rel); child_rel->has_eclass_joins = parent_rel->has_eclass_joins; - /* Close child relations, but keep locks */ - heap_close(child_relation, NoLock); - - /* Recursively expand child partition if it has subpartitions */ - if (child_rte->inh) + /* Expand child partition if it might have subpartitions */ + if (parent_rte->relid != child_oid && + child_relation->rd_rel->relhassubclass) { - child_rte->inh = false; - pathman_rel_pathlist_hook(root, child_rel, child_rti, child_rte); } + /* Close child relations, but keep locks */ + heap_close(child_relation, NoLock); + return child_rti; } diff --git a/src/relation_info.c b/src/relation_info.c index eb8b0980..e6a40a36 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -1442,27 +1442,43 @@ shout_if_prel_is_invalid(const Oid parent_oid, } /* - * Get attributes map between parent and child relation. - * This is simplified version of functions that return TupleConversionMap. - * And it should be faster if expression uses not all fields from relation. + * Remap partitioning expression columns for tuple source relation. + * This is a simplified version of functions that return TupleConversionMap. + * It should be faster if expression uses a few fields of relation. */ AttrNumber * -build_attributes_map(const PartRelationInfo *prel, TupleDesc child_tupdesc) +PrelExpressionAttributesMap(const PartRelationInfo *prel, + TupleDesc source_tupdesc, + int *map_length) { - AttrNumber i = -1; Oid parent_relid = PrelParentRelid(prel); - int natts = child_tupdesc->natts; - AttrNumber *result = (AttrNumber *) palloc0(natts * sizeof(AttrNumber)); + int source_natts = source_tupdesc->natts, + expr_natts = 0; + AttrNumber *result, + i; + bool is_trivial = true; + + /* Get largest attribute number used in expression */ + i = -1; + while ((i = bms_next_member(prel->expr_atts, i)) >= 0) + expr_natts = i; + + /* Allocate array for map */ + result = (AttrNumber *) palloc0(expr_natts * sizeof(AttrNumber)); + /* Find a match for each attribute */ + i = -1; while ((i = bms_next_member(prel->expr_atts, i)) >= 0) { - int j; AttrNumber attnum = i + FirstLowInvalidHeapAttributeNumber; char *attname = get_attname(parent_relid, attnum); + int j; + + Assert(attnum <= expr_natts); - for (j = 0; j < natts; j++) + for (j = 0; j < source_natts; j++) { - Form_pg_attribute att = child_tupdesc->attrs[j]; + Form_pg_attribute att = source_tupdesc->attrs[j]; if (att->attisdropped) continue; /* attrMap[attnum - 1] is already 0 */ @@ -1475,8 +1491,19 @@ build_attributes_map(const PartRelationInfo *prel, TupleDesc child_tupdesc) } if (result[attnum - 1] == 0) - elog(ERROR, "Couldn't find '%s' column in child relation", attname); + elog(ERROR, "cannot find column \"%s\" in child relation", attname); + + if (result[attnum - 1] != attnum) + is_trivial = false; + } + + /* Check if map is trivial */ + if (is_trivial) + { + pfree(result); + return NULL; } + *map_length = expr_natts; return result; } From eecab831eeb5cdc7664870e33db1964e550b3008 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 13 Sep 2017 11:57:13 +0300 Subject: [PATCH 135/528] fixup! restore compatibility with PostgreSQL 10, refactoring of prepare_expr_state() --- src/partition_router.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/partition_router.c b/src/partition_router.c index b719bf40..f4a8cb6c 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -238,7 +238,7 @@ partition_router_explain(CustomScanState *node, List *ancestors, ExplainState *e /* * ---------------------------------------------------------------- * ExecDeleteInternal - * Basicly copy of ExecDelete from executor/nodeModifyTable.c + * Basicly is a copy of ExecDelete from executor/nodeModifyTable.c * ---------------------------------------------------------------- */ From e263f6a661899875657ee81dfe9a90d6d95e5fac Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 13 Sep 2017 14:21:15 +0300 Subject: [PATCH 136/528] Introduce TupleDescAttr macro for future compability with pg11 --- src/compat/pg_compat.c | 9 +++++---- src/init.c | 10 ++++------ src/partition_creation.c | 5 ++--- src/partition_filter.c | 2 +- src/pg_pathman.c | 6 +++--- src/relation_info.c | 2 +- src/utility_stmt_hooking.c | 11 ++++++----- 7 files changed, 22 insertions(+), 23 deletions(-) diff --git a/src/compat/pg_compat.c b/src/compat/pg_compat.c index 71f93a1e..ff2aa15f 100644 --- a/src/compat/pg_compat.c +++ b/src/compat/pg_compat.c @@ -520,6 +520,9 @@ get_rel_persistence(Oid relid) } #endif +#if PG_VERSION_NUM < 110000 +#define TupleDescAttr(tupdesc, i) ((tupdesc)->attrs[(i)]) +#endif #if (PG_VERSION_NUM >= 90500 && PG_VERSION_NUM <= 90505) || \ (PG_VERSION_NUM >= 90600 && PG_VERSION_NUM <= 90601) @@ -542,7 +545,7 @@ convert_tuples_by_name_map(TupleDesc indesc, attrMap = (AttrNumber *) palloc0(n * sizeof(AttrNumber)); for (i = 0; i < n; i++) { - Form_pg_attribute att = outdesc->attrs[i]; + Form_pg_attribute att = TupleDescAttr(outdesc, i); char *attname; Oid atttypid; int32 atttypmod; @@ -555,7 +558,7 @@ convert_tuples_by_name_map(TupleDesc indesc, atttypmod = att->atttypmod; for (j = 0; j < indesc->natts; j++) { - att = indesc->attrs[j]; + att = TupleDescAttr(indesc, j); if (att->attisdropped) continue; if (strcmp(attname, NameStr(att->attname)) == 0) @@ -587,8 +590,6 @@ convert_tuples_by_name_map(TupleDesc indesc, } #endif - - /* * ------------- * Common code diff --git a/src/init.c b/src/init.c index 7b0cdda0..13487f7e 100644 --- a/src/init.c +++ b/src/init.c @@ -631,9 +631,8 @@ pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, rel = heap_open(get_pathman_config_relid(false), AccessShareLock); /* Check that 'partrel' column is of regclass type */ - Assert(RelationGetDescr(rel)-> - attrs[Anum_pathman_config_partrel - 1]-> - atttypid == REGCLASSOID); + Assert(TupleDescAttr(RelationGetDescr(rel), + Anum_pathman_config_partrel - 1)->atttypid == REGCLASSOID); /* Check that number of columns == Natts_pathman_config */ Assert(RelationGetDescr(rel)->natts == Natts_pathman_config); @@ -880,9 +879,8 @@ read_pathman_config(void (*per_row_cb)(Datum *values, rel = heap_open(get_pathman_config_relid(false), AccessShareLock); /* Check that 'partrel' column is if regclass type */ - Assert(RelationGetDescr(rel)-> - attrs[Anum_pathman_config_partrel - 1]-> - atttypid == REGCLASSOID); + Assert(TupleDescAttr(RelationGetDescr(rel), + Anum_pathman_config_partrel - 1)->atttypid == REGCLASSOID); /* Check that number of columns == Natts_pathman_config */ Assert(RelationGetDescr(rel)->natts == Natts_pathman_config); diff --git a/src/partition_creation.c b/src/partition_creation.c index 3b98761a..51532089 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -920,8 +920,7 @@ postprocess_child_table_and_atts(Oid parent_relid, Oid partition_relid) { Form_pg_attribute acl_column; - acl_column = pg_class_desc->attrs[Anum_pg_class_relacl - 1]; - + acl_column = TupleDescAttr(pg_class_desc, Anum_pg_class_relacl - 1); acl_datum = datumCopy(acl_datum, acl_column->attbyval, acl_column->attlen); } @@ -997,7 +996,7 @@ postprocess_child_table_and_atts(Oid parent_relid, Oid partition_relid) { Form_pg_attribute acl_column; - acl_column = pg_attribute_desc->attrs[Anum_pg_attribute_attacl - 1]; + acl_column = TupleDescAttr(pg_attribute_desc, Anum_pg_attribute_attacl - 1); acl_datum = datumCopy(acl_datum, acl_column->attbyval, diff --git a/src/partition_filter.c b/src/partition_filter.c index 35475365..b70a296f 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -1022,7 +1022,7 @@ prepare_rri_fdw_for_insert(EState *estate, TargetEntry *te; Param *param; - attr = tupdesc->attrs[i]; + attr = TupleDescAttr(tupdesc, i); if (attr->attisdropped) continue; diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 0bd0919c..25308479 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -1787,7 +1787,7 @@ make_inh_translation_list(Relation oldrelation, Relation newrelation, Oid attcollation; int new_attno; - att = old_tupdesc->attrs[old_attno]; + att = TupleDescAttr(old_tupdesc, old_attno); if (att->attisdropped) { /* Just put NULL into this list entry */ @@ -1825,7 +1825,7 @@ make_inh_translation_list(Relation oldrelation, Relation newrelation, * notational device to include the assignment into the if-clause. */ if (old_attno < newnatts && - (att = new_tupdesc->attrs[old_attno]) != NULL && + (att = TupleDescAttr(new_tupdesc, old_attno)) != NULL && !att->attisdropped && att->attinhcount != 0 && strcmp(attname, NameStr(att->attname)) == 0) new_attno = old_attno; @@ -1833,7 +1833,7 @@ make_inh_translation_list(Relation oldrelation, Relation newrelation, { for (new_attno = 0; new_attno < newnatts; new_attno++) { - att = new_tupdesc->attrs[new_attno]; + att = TupleDescAttr(new_tupdesc, new_attno); /* * Make clang analyzer happy: diff --git a/src/relation_info.c b/src/relation_info.c index e6a40a36..2e0ce598 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -1478,7 +1478,7 @@ PrelExpressionAttributesMap(const PartRelationInfo *prel, for (j = 0; j < source_natts; j++) { - Form_pg_attribute att = source_tupdesc->attrs[j]; + Form_pg_attribute att = TupleDescAttr(source_tupdesc, j); if (att->attisdropped) continue; /* attrMap[attnum - 1] is already 0 */ diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 05183a0b..d8c956af 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -262,13 +262,12 @@ CopyGetAttnums(TupleDesc tupDesc, Relation rel, List *attnamelist) if (attnamelist == NIL) { /* Generate default column list */ - Form_pg_attribute *attr = tupDesc->attrs; int attr_count = tupDesc->natts; int i; for (i = 0; i < attr_count; i++) { - if (attr[i]->attisdropped) + if (TupleDescAttr(tupDesc, i)->attisdropped) continue; attnums = lappend_int(attnums, i + 1); } @@ -288,11 +287,13 @@ CopyGetAttnums(TupleDesc tupDesc, Relation rel, List *attnamelist) attnum = InvalidAttrNumber; for (i = 0; i < tupDesc->natts; i++) { - if (tupDesc->attrs[i]->attisdropped) + Form_pg_attribute att = TupleDescAttr(tupDesc, i); + + if (att->attisdropped) continue; - if (namestrcmp(&(tupDesc->attrs[i]->attname), name) == 0) + if (namestrcmp(&(att->attname), name) == 0) { - attnum = tupDesc->attrs[i]->attnum; + attnum = att->attnum; break; } } From a874ae7492dc2a6e7301cefad68b152e91396f62 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 13 Sep 2017 16:25:45 +0300 Subject: [PATCH 137/528] Fix INSERTs for subpartitions --- expected/pathman_subpartitions.out | 46 ++++++++++++++++++++++++++---- sql/pathman_subpartitions.sql | 2 +- src/compat/pg_compat.c | 4 --- src/include/compat/pg_compat.h | 4 +++ src/partition_filter.c | 4 ++- 5 files changed, 49 insertions(+), 11 deletions(-) diff --git a/expected/pathman_subpartitions.out b/expected/pathman_subpartitions.out index ab93090d..1965b7a1 100644 --- a/expected/pathman_subpartitions.out +++ b/expected/pathman_subpartitions.out @@ -216,33 +216,69 @@ SELECT create_range_partitions('subpartitions.abc_2', 'b', 0, 50, 2); 2 (1 row) -INSERT INTO subpartitions.abc VALUES (25, 25); +INSERT INTO subpartitions.abc SELECT 25, 25 FROM generate_series(1, 10); SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitions.abc_1_1 */ tableoid | a | b -----------------------+----+---- subpartitions.abc_1_1 | 25 | 25 -(1 row) + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 +(10 rows) UPDATE subpartitions.abc SET a = 125 WHERE a = 25 and b = 25; SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitions.abc_2_1 */ tableoid | a | b -----------------------+-----+---- subpartitions.abc_2_1 | 125 | 25 -(1 row) + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 +(10 rows) UPDATE subpartitions.abc SET b = 75 WHERE a = 125 and b = 25; SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitions.abc_2_2 */ tableoid | a | b -----------------------+-----+---- subpartitions.abc_2_2 | 125 | 75 -(1 row) + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 +(10 rows) UPDATE subpartitions.abc SET b = 125 WHERE a = 125 and b = 75; SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should create subpartitions.abc_2_3 */ tableoid | a | b -----------------------+-----+----- subpartitions.abc_2_3 | 125 | 125 -(1 row) + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 +(10 rows) DROP TABLE subpartitions.abc CASCADE; NOTICE: drop cascades to 10 other objects diff --git a/sql/pathman_subpartitions.sql b/sql/pathman_subpartitions.sql index 1e5b2e47..3d48f26a 100644 --- a/sql/pathman_subpartitions.sql +++ b/sql/pathman_subpartitions.sql @@ -69,7 +69,7 @@ SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); SELECT create_range_partitions('subpartitions.abc_1', 'b', 0, 50, 2); SELECT create_range_partitions('subpartitions.abc_2', 'b', 0, 50, 2); -INSERT INTO subpartitions.abc VALUES (25, 25); +INSERT INTO subpartitions.abc SELECT 25, 25 FROM generate_series(1, 10); SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitions.abc_1_1 */ UPDATE subpartitions.abc SET a = 125 WHERE a = 25 and b = 25; diff --git a/src/compat/pg_compat.c b/src/compat/pg_compat.c index ff2aa15f..809dc79f 100644 --- a/src/compat/pg_compat.c +++ b/src/compat/pg_compat.c @@ -520,10 +520,6 @@ get_rel_persistence(Oid relid) } #endif -#if PG_VERSION_NUM < 110000 -#define TupleDescAttr(tupdesc, i) ((tupdesc)->attrs[(i)]) -#endif - #if (PG_VERSION_NUM >= 90500 && PG_VERSION_NUM <= 90505) || \ (PG_VERSION_NUM >= 90600 && PG_VERSION_NUM <= 90601) /* diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 09844beb..f63f1bf9 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -22,6 +22,7 @@ #include "compat/debug_compat_features.h" #include "postgres.h" +#include "access/tupdesc.h" #include "commands/trigger.h" #include "executor/executor.h" #include "nodes/memnodes.h" @@ -636,6 +637,9 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, (found_wholerow)) #endif +#ifndef TupleDescAttr +#define TupleDescAttr(tupdesc, i) ((tupdesc)->attrs[(i)]) +#endif /* diff --git a/src/partition_filter.c b/src/partition_filter.c index b70a296f..78123c71 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -524,6 +524,9 @@ prepare_expr_state(const PartRelationInfo *prel, MemoryContext old_mcxt; Node *expr; + /* Make sure we use query memory context */ + old_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); + /* Fetch partitioning expression (we don't care about varno) */ expr = PrelExpressionForRelid(prel, PART_EXPR_VARNO); @@ -555,7 +558,6 @@ prepare_expr_state(const PartRelationInfo *prel, } /* Prepare state for expression execution */ - old_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); expr_state = ExecInitExpr((Expr *) expr, NULL); MemoryContextSwitchTo(old_mcxt); From c6ebde7909ae7c37462ff9a43f1ff0505ec5ce70 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 13 Sep 2017 18:25:27 +0300 Subject: [PATCH 138/528] Fix compability with pg11 --- src/hooks.c | 8 ++--- src/include/compat/pg_compat.h | 53 +++++++++++++++++++++++++++++++++- src/pg_pathman.c | 4 +-- 3 files changed, 58 insertions(+), 7 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 7f77514a..94a46399 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -199,7 +199,7 @@ pathman_join_pathlist_hook(PlannerInfo *root, return; /* could not build it, retreat! */ - required_nestloop = calc_nestloop_required_outer(outer, inner); + required_nestloop = calc_nestloop_required_outer_compat(outer, inner); /* * Check to see if proposed path is still parameterized, and reject if the @@ -230,9 +230,9 @@ pathman_join_pathlist_hook(PlannerInfo *root, nest_path = create_nestloop_path_compat(root, joinrel, jointype, - &workspace, extra, outer, inner, - filtered_joinclauses, pathkeys, - calc_nestloop_required_outer(outer, inner)); + &workspace, extra, outer, inner, + filtered_joinclauses, pathkeys, + calc_nestloop_required_outer_compat(outer, inner)); /* * NOTE: Override 'rows' value produced by standard estimator. diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index f63f1bf9..59a12f74 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -30,6 +30,7 @@ #include "nodes/pg_list.h" #include "optimizer/cost.h" #include "optimizer/paths.h" +#include "optimizer/pathnode.h" #include "utils/memutils.h" /* @@ -38,11 +39,61 @@ * ---------- */ +/* + * calc_nestloop_required_outer() + */ + +#if PG_VERSION_NUM >= 110000 +static inline Relids +calc_nestloop_required_outer_compat(Path *outer_path, Path *inner_path) +{ + RelOptInfo *innerrel = inner_path->parent; + RelOptInfo *outerrel = outer_path->parent; + Relids innerrelids = innerrel->relids; + Relids outerrelids = outerrel->relids; + Relids inner_paramrels = PATH_REQ_OUTER(inner_path); + Relids outer_paramrels = PATH_REQ_OUTER(outer_path); + + return calc_nestloop_required_outer(outerrelids, outer_paramrels, + innerrelids, inner_paramrels); +} +#else +#define calc_nestloop_required_outer_compat(outer_path, inner_path) \ + (calc_nestloop_required_outer((outer_path), (inner_path))) +#endif /* * adjust_appendrel_attrs() */ -#if PG_VERSION_NUM >= 90600 + +#if PG_VERSION_NUM >= 110000 +#define adjust_appendrel_attrs_compat(root, node, appinfo) \ + adjust_appendrel_attrs((root), \ + node, \ + 1, &(appinfo)) +#elif PG_VERSION_NUM >= 90600 +#define adjust_appendrel_attrs_compat(root, node, appinfo) \ + adjust_appendrel_attrs((root), \ + node, \ + (appinfo)) +#elif PG_VERSION_NUM >= 90500 +#define adjust_appendrel_attrs_compat(root, node, appinfo) \ + adjust_appendrel_attrs((root), \ + node, \ + (appinfo)) +#endif + + +#if PG_VERSION_NUM >= 110000 +#define adjust_rel_targetlist_compat(root, dst_rel, src_rel, appinfo) \ + do { \ + (dst_rel)->reltarget->exprs = (List *) \ + adjust_appendrel_attrs((root), \ + (Node *) (src_rel)->reltarget->exprs, \ + 1, \ + &(appinfo)); \ + } while (0) +#elif PG_VERSION_NUM >= 90600 #define adjust_rel_targetlist_compat(root, dst_rel, src_rel, appinfo) \ do { \ (dst_rel)->reltarget->exprs = (List *) \ diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 25308479..3079408f 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -495,7 +495,7 @@ append_child_relation(PlannerInfo *root, AssertState(parent_rel); /* Adjust join quals for this child */ - child_rel->joininfo = (List *) adjust_appendrel_attrs(root, + child_rel->joininfo = (List *) adjust_appendrel_attrs_compat(root, (Node *) parent_rel->joininfo, appinfo); @@ -532,7 +532,7 @@ append_child_relation(PlannerInfo *root, else childquals = get_all_actual_clauses(parent_rel->baserestrictinfo); /* Now it's time to change varnos and rebuld quals */ - childquals = (List *) adjust_appendrel_attrs(root, + childquals = (List *) adjust_appendrel_attrs_compat(root, (Node *) childquals, appinfo); childqual = eval_const_expressions(root, (Node *) From 749ae29124e91dc4c188205439172f9587deee16 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 13 Sep 2017 18:42:47 +0300 Subject: [PATCH 139/528] fixup! Fix compability with pg11 --- src/include/compat/pg_compat.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 59a12f74..7a320213 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -69,17 +69,17 @@ calc_nestloop_required_outer_compat(Path *outer_path, Path *inner_path) #if PG_VERSION_NUM >= 110000 #define adjust_appendrel_attrs_compat(root, node, appinfo) \ adjust_appendrel_attrs((root), \ - node, \ + (node), \ 1, &(appinfo)) #elif PG_VERSION_NUM >= 90600 #define adjust_appendrel_attrs_compat(root, node, appinfo) \ adjust_appendrel_attrs((root), \ - node, \ + (node), \ (appinfo)) #elif PG_VERSION_NUM >= 90500 #define adjust_appendrel_attrs_compat(root, node, appinfo) \ adjust_appendrel_attrs((root), \ - node, \ + (node), \ (appinfo)) #endif From 70f24e8393c2120293af6dfe52d0c890126b9d72 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 14 Sep 2017 11:23:51 +0300 Subject: [PATCH 140/528] Remove unnecessary block of code --- src/include/compat/pg_compat.h | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 7a320213..ba2e8a72 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -71,11 +71,6 @@ calc_nestloop_required_outer_compat(Path *outer_path, Path *inner_path) adjust_appendrel_attrs((root), \ (node), \ 1, &(appinfo)) -#elif PG_VERSION_NUM >= 90600 -#define adjust_appendrel_attrs_compat(root, node, appinfo) \ - adjust_appendrel_attrs((root), \ - (node), \ - (appinfo)) #elif PG_VERSION_NUM >= 90500 #define adjust_appendrel_attrs_compat(root, node, appinfo) \ adjust_appendrel_attrs((root), \ From 1154cfafe03148b601ecf41c68dbcb2d2fc2f453 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 14 Sep 2017 12:23:13 +0300 Subject: [PATCH 141/528] Make clang analyzer quiet --- src/pg_pathman.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 3079408f..cfe24cf7 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -410,6 +410,11 @@ append_child_relation(PlannerInfo *root, } parent_rel = root->simple_rel_array[parent_rti]; + + /* make clang analyzer quiet */ + if (!parent_rel) + elog(ERROR, "parent relation is NULL"); + parent_rte = root->simple_rte_array[parent_rti]; /* Open child relation (we've just locked it) */ From 8f60be7850578ccc48c7e6102b2ff669f4c0abb9 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Fri, 15 Sep 2017 12:42:33 +0300 Subject: [PATCH 142/528] Add support of several levels for RTI bitmapset lists in planner --- expected/pathman_subpartitions.out | 20 +++++++++++++ sql/pathman_subpartitions.sql | 16 +++++++++++ src/compat/pg_compat.c | 6 ++-- src/compat/relation_tags.c | 46 ++++++++++++++++++++++++++++++ src/hooks.c | 5 +++- src/include/compat/relation_tags.h | 3 +- src/include/relation_info.h | 2 -- src/pg_pathman.c | 2 +- 8 files changed, 93 insertions(+), 7 deletions(-) diff --git a/expected/pathman_subpartitions.out b/expected/pathman_subpartitions.out index 1965b7a1..54e93e9e 100644 --- a/expected/pathman_subpartitions.out +++ b/expected/pathman_subpartitions.out @@ -136,6 +136,26 @@ EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a >= 210 AND b >= 210; Filter: (a >= 210) (4 rows) +CREATE OR REPLACE FUNCTION check_multilevel_queries() +RETURNS VOID AS +$$ +BEGIN + IF NOT EXISTS(SELECT * FROM (SELECT tableoid::regclass, * + FROM subpartitions.abc + WHERE a = 215 AND b = 215 + ORDER BY a, b) t1) + THEN + RAISE EXCEPTION 'should be at least one record in result'; + END IF; +END +$$ LANGUAGE plpgsql; +SELECT check_multilevel_queries(); + check_multilevel_queries +-------------------------- + +(1 row) + +DROP FUNCTION check_multilevel_queries(); /* Multilevel partitioning with updates */ CREATE OR REPLACE FUNCTION subpartitions.partitions_tree( rel REGCLASS, diff --git a/sql/pathman_subpartitions.sql b/sql/pathman_subpartitions.sql index 3d48f26a..aefe728d 100644 --- a/sql/pathman_subpartitions.sql +++ b/sql/pathman_subpartitions.sql @@ -28,6 +28,22 @@ EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE b = 215; EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a = 215 AND b = 215; EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a >= 210 AND b >= 210; +CREATE OR REPLACE FUNCTION check_multilevel_queries() +RETURNS VOID AS +$$ +BEGIN + IF NOT EXISTS(SELECT * FROM (SELECT tableoid::regclass, * + FROM subpartitions.abc + WHERE a = 215 AND b = 215 + ORDER BY a, b) t1) + THEN + RAISE EXCEPTION 'should be at least one record in result'; + END IF; +END +$$ LANGUAGE plpgsql; +SELECT check_multilevel_queries(); +DROP FUNCTION check_multilevel_queries(); + /* Multilevel partitioning with updates */ CREATE OR REPLACE FUNCTION subpartitions.partitions_tree( rel REGCLASS, diff --git a/src/compat/pg_compat.c b/src/compat/pg_compat.c index 809dc79f..602102c4 100644 --- a/src/compat/pg_compat.c +++ b/src/compat/pg_compat.c @@ -618,8 +618,7 @@ set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti) /* * Accumulate size information from each live child. */ - Assert(childrel->rows > 0); - + Assert(childrel->rows >= 0); parent_rows += childrel->rows; #if PG_VERSION_NUM >= 90600 @@ -632,6 +631,9 @@ set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti) /* Set 'rows' for append relation */ rel->rows = parent_rows; + if (parent_rows == 0) + parent_rows = 1; + #if PG_VERSION_NUM >= 90600 rel->reltarget->width = rint(parent_size / parent_rows); #else diff --git a/src/compat/relation_tags.c b/src/compat/relation_tags.c index 383dd1f5..288f60ff 100644 --- a/src/compat/relation_tags.c +++ b/src/compat/relation_tags.c @@ -13,6 +13,7 @@ #include "planner_tree_modification.h" #include "nodes/nodes.h" +#include "nodes/pg_list.h" #ifndef NATIVE_RELATION_TAGS @@ -23,6 +24,15 @@ */ static HTAB *per_table_relation_tags = NULL; +/* + * List of bitmapsets, where we keep partitioned RangeTblEntry indexes + * for each level of planner + */ +List *partitioned_rti_sets = NIL; + +/* Points to last bitmapset in list */ +Bitmapset *current_partitioned_rti = NULL; + /* * Single row of 'per_table_relation_tags'. * NOTE: do not reorder these fields. @@ -219,6 +229,11 @@ incr_refcount_relation_tags(void) if (++per_table_relation_tags_refcount <= 0) elog(WARNING, "imbalanced %s", CppAsString(incr_refcount_relation_tags)); + + if (per_table_relation_tags_refcount == 1) + partitioned_rti_sets = NIL; + + current_partitioned_rti = NULL; } /* Return current value of usage counter */ @@ -233,11 +248,26 @@ get_refcount_relation_tags(void) void decr_refcount_relation_tags(void) { + int len; + /* Decrement reference counter */ if (--per_table_relation_tags_refcount < 0) elog(WARNING, "imbalanced %s", CppAsString(decr_refcount_relation_tags)); + /* Partitioned RTEs list management */ + len = list_length(partitioned_rti_sets); + if (len && current_partitioned_rti) + { + bms_free(current_partitioned_rti); + + partitioned_rti_sets = list_truncate(partitioned_rti_sets, len - 1); + if (partitioned_rti_sets) + current_partitioned_rti = llast(partitioned_rti_sets); + else + current_partitioned_rti = NULL; + } + /* Free resources if no one is using them */ if (per_table_relation_tags_refcount == 0) { @@ -249,3 +279,19 @@ decr_refcount_relation_tags(void) #endif } } + +void +MarkPartitionedRTE(Index rti) +{ + bool add = (current_partitioned_rti == NULL); + + current_partitioned_rti = bms_add_member(current_partitioned_rti, rti); + if (add) + partitioned_rti_sets = lappend(partitioned_rti_sets, current_partitioned_rti); +} + +bool +IsPartitionedRTE(Index rti) +{ + return bms_is_member(rti, current_partitioned_rti); +} diff --git a/src/hooks.c b/src/hooks.c index 8d8fc717..fde6ea84 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -36,11 +36,11 @@ #include "utils/typcache.h" #include "utils/lsyscache.h" + /* Borrowed from joinpath.c */ #define PATH_PARAM_BY_REL(path, rel) \ ((path)->param_info && bms_overlap(PATH_REQ_OUTER(path), (rel)->relids)) - static inline bool allow_star_schema_join(PlannerInfo *root, Path *outer_path, @@ -374,6 +374,9 @@ pathman_rel_pathlist_hook(PlannerInfo *root, pathkeyDesc = (PathKey *) linitial(pathkeys); } + /* mark as partitioned table */ + MarkPartitionedRTE(rti); + children = PrelGetChildrenArray(prel); ranges = list_make1_irange_full(prel, IR_COMPLETE); diff --git a/src/include/compat/relation_tags.h b/src/include/compat/relation_tags.h index d5183d32..f391bbd6 100644 --- a/src/include/compat/relation_tags.h +++ b/src/include/compat/relation_tags.h @@ -36,6 +36,8 @@ /* Memory context we're going to use for tags */ #define RELATION_TAG_MCXT TopTransactionContext +extern void MarkPartitionedRTE(Index rti); +extern bool IsPartitionedRTE(Index rti); /* Safe TAG constructor (Integer) */ static inline List * @@ -74,5 +76,4 @@ void incr_refcount_relation_tags(void); uint32 get_refcount_relation_tags(void); void decr_refcount_relation_tags(void); - #endif /* RELATION_TAGS_H */ diff --git a/src/include/relation_info.h b/src/include/relation_info.h index ea6c9abe..b5ac6877 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -214,8 +214,6 @@ typedef enum PPS_NOT_SURE /* can't determine (not transactional state) */ } PartParentSearch; - - /* * PartRelationInfo field access macros & functions. */ diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 2ed81291..704328ba 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -1952,7 +1952,7 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, * table and we've already filled it, skip it. Otherwise build a * pathlist for it */ - if (!childRTE->inh || !childrel->pathlist) + if (!IsPartitionedRTE(childRTindex) || childrel->pathlist == NIL) { /* Compute child's access paths & sizes */ if (childRTE->relkind == RELKIND_FOREIGN_TABLE) From 5e65cef18bf523d89f3f987f70fc00c181cd1845 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Fri, 15 Sep 2017 16:34:56 +0300 Subject: [PATCH 143/528] Refactor tests according to new testgres version --- tests/python/partitioning_test.py | 1866 ++++++++++++++--------------- 1 file changed, 903 insertions(+), 963 deletions(-) diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 32c30492..2f772041 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -7,17 +7,20 @@ Copyright (c) 2015-2017, Postgres Professional """ -import unittest +import json import math -import time import os import re import subprocess import threading +import time +import time +import unittest -from testgres import get_new_node, stop_all, get_config +from distutils.version import LooseVersion +from testgres import get_new_node, get_bin_path, get_pg_config -version = get_config().get("VERSION_NUM") +version = LooseVersion(get_pg_config().get("VERSION_NUM")) # Helper function for json equality @@ -42,124 +45,109 @@ def wrapper(*args, **kwargs): return wrapper -class PartitioningTests(unittest.TestCase): - def setUp(self): - self.setup_cmd = [ - "create table abc(id serial, t text)", - "insert into abc select generate_series(1, 300000)", - "select create_hash_partitions('abc', 'id', 3, partition_data := false)", - ] - - def tearDown(self): - stop_all() - - def start_new_pathman_cluster(self, name='test', allows_streaming=False): +class Tests(unittest.TestCase): + def start_new_pathman_cluster(self, name='test', + allow_streaming=False, test_data=False): node = get_new_node(name) - node.init(allows_streaming=allows_streaming) + node.init(allow_streaming=allow_streaming) node.append_conf("postgresql.conf", "shared_preload_libraries='pg_pathman'\n") node.start() node.psql('postgres', 'create extension pg_pathman') - return node + if test_data: + cmds = ( + "create table abc(id serial, t text)", + "insert into abc select generate_series(1, 300000)", + "select create_hash_partitions('abc', 'id', 3, partition_data := false)", + ) + for cmd in cmds: + node.safe_psql('postgres', cmd) - def init_test_data(self, node): - """ Initialize pg_pathman extension and test data """ - for cmd in self.setup_cmd: - node.safe_psql('postgres', cmd) + return node def catchup_replica(self, master, replica): """ Wait until replica synchronizes with master """ - if version >= 100000: - wait_lsn_query = \ - 'SELECT pg_current_wal_lsn() <= replay_lsn ' \ - 'FROM pg_stat_replication WHERE application_name = \'%s\'' \ - % replica.name + if version >= LooseVersion('10'): + wait_lsn_query = """ + SELECT pg_current_wal_lsn() <= replay_lsn + FROM pg_stat_replication + WHERE application_name = '{0}' + """ else: - wait_lsn_query = \ - 'SELECT pg_current_xlog_location() <= replay_location ' \ - 'FROM pg_stat_replication WHERE application_name = \'%s\'' \ - % replica.name - master.poll_query_until('postgres', wait_lsn_query) + wait_lsn_query = """ + SELECT pg_current_xlog_location() <= replay_location + FROM pg_stat_replication + WHERE application_name = '{0}' + """ + + master.poll_query_until('postgres', + wait_lsn_query.format(replica.name)) def test_concurrent(self): """ Test concurrent partitioning """ - node = self.start_new_pathman_cluster() - self.init_test_data(node) - - node.psql('postgres', "select partition_table_concurrently('abc')") + with self.start_new_pathman_cluster(test_data=True) as node: + node.psql('postgres', "select partition_table_concurrently('abc')") - while True: - # update some rows to check for deadlocks - node.safe_psql('postgres', """ - update abc set t = 'test' - where id in (select (random() * 300000)::int - from generate_series(1, 3000)) - """) - - count = node.execute('postgres', """ - select count(*) from pathman_concurrent_part_tasks - """) - - # if there is no active workers then it means work is done - if count[0][0] == 0: - break - time.sleep(1) - - data = node.execute('postgres', 'select count(*) from only abc') - self.assertEqual(data[0][0], 0) - data = node.execute('postgres', 'select count(*) from abc') - self.assertEqual(data[0][0], 300000) - - node.stop() + while True: + # update some rows to check for deadlocks + node.safe_psql('postgres', """ + update abc set t = 'test' + where id in (select (random() * 300000)::int + from generate_series(1, 3000)) + """) + + count = node.execute('postgres', """ + select count(*) from pathman_concurrent_part_tasks + """) + + # if there is no active workers then it means work is done + if count[0][0] == 0: + break + time.sleep(1) + + data = node.execute('postgres', 'select count(*) from only abc') + self.assertEqual(data[0][0], 0) + data = node.execute('postgres', 'select count(*) from abc') + self.assertEqual(data[0][0], 300000) + node.stop() def test_replication(self): """ Test how pg_pathman works with replication """ - node = get_new_node('master') - replica = get_new_node('repl') - - # initialize master server - node = self.start_new_pathman_cluster(allows_streaming=True) - node.backup('my_backup') - - # initialize replica from backup - replica.init_from_backup(node, 'my_backup', has_streaming=True) - replica.start() - - # initialize pg_pathman extension and some test data - self.init_test_data(node) - - # wait until replica catches up - self.catchup_replica(node, replica) - - # check that results are equal - self.assertEqual( - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc')) - - # enable parent and see if it is enabled in replica - node.psql('postgres', 'select enable_parent(\'abc\'') - - self.catchup_replica(node, replica) - self.assertEqual( - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc')) - self.assertEqual( - node.psql('postgres', 'select * from abc'), - replica.psql('postgres', 'select * from abc')) - self.assertEqual( - node.execute('postgres', 'select count(*) from abc')[0][0], 300000) - - # check that UPDATE in pathman_config_params invalidates cache - node.psql('postgres', 'update pathman_config_params set enable_parent = false') - self.catchup_replica(node, replica) - self.assertEqual( - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc')) - self.assertEqual( - node.psql('postgres', 'select * from abc'), - replica.psql('postgres', 'select * from abc')) - self.assertEqual(node.execute('postgres', 'select count(*) from abc')[0][0], 0) + with self.start_new_pathman_cluster(allow_streaming=True, test_data=True) as node: + with node.replicate('node2') as replica: + replica.start() + # wait until replica catches up + self.catchup_replica(node, replica) + + # check that results are equal + self.assertEqual( + node.psql('postgres', 'explain (costs off) select * from abc'), + replica.psql('postgres', 'explain (costs off) select * from abc')) + + # enable parent and see if it is enabled in replica + node.psql('postgres', 'select enable_parent(\'abc\'') + + self.catchup_replica(node, replica) + self.assertEqual( + node.psql('postgres', 'explain (costs off) select * from abc'), + replica.psql('postgres', 'explain (costs off) select * from abc')) + self.assertEqual( + node.psql('postgres', 'select * from abc'), + replica.psql('postgres', 'select * from abc')) + self.assertEqual( + node.execute('postgres', 'select count(*) from abc')[0][0], 300000) + + # check that UPDATE in pathman_config_params invalidates cache + node.psql('postgres', 'update pathman_config_params set enable_parent = false') + self.catchup_replica(node, replica) + self.assertEqual( + node.psql('postgres', 'explain (costs off) select * from abc'), + replica.psql('postgres', 'explain (costs off) select * from abc')) + self.assertEqual( + node.psql('postgres', 'select * from abc'), + replica.psql('postgres', 'select * from abc')) + self.assertEqual(node.execute('postgres', 'select count(*) from abc')[0][0], 0) def test_locks(self): """ @@ -167,9 +155,6 @@ def test_locks(self): waits for other sessions if they are doing the same """ - import threading - import time - class Flag: def __init__(self, value): self.flag = value @@ -197,62 +182,63 @@ def add_partition(node, flag, query): flag.set(True) # Initialize master server - node = get_new_node('master') + with get_new_node('master') as node: + node.init() + node.append_conf("postgresql.conf", "shared_preload_libraries='pg_pathman'\n") + node.start() + sql = """ + create extension pg_pathman; + create table abc(id serial, t text); + insert into abc select generate_series(1, 100000); + select create_range_partitions('abc', 'id', 1, 50000); + """ + node.safe_psql('postgres', sql) + + # Start transaction that will create partition + with node.connect() as con: + con.begin() + con.execute('select append_range_partition(\'abc\')') + + # Start threads that suppose to add new partitions and wait some + # time + query = ( + "select prepend_range_partition('abc')", + "select append_range_partition('abc')", + "select add_range_partition('abc', 500000, 550000)", + ) + threads = [] + for i in range(3): + thread = threading.Thread( + target=add_partition, args=(node, flags[i], query[i])) + threads.append(thread) + thread.start() + time.sleep(3) + + # This threads should wait until current transaction finished + with lock: + for i in range(3): + self.assertEqual(flags[i].get(), False) + + # Commit transaction. Since then other sessions can create + # partitions + con.commit() + + # Now wait until each thread finishes + for thread in threads: + thread.join() + + # Check flags, it should be true which means that threads are + # finished + with lock: + for i in range(3): + self.assertEqual(flags[i].get(), True) - node.init() - node.append_conf("postgresql.conf", "shared_preload_libraries='pg_pathman'\n") - node.start() - node.safe_psql( - 'postgres', - 'create extension pg_pathman; ' + 'create table abc(id serial, t text); ' + - 'insert into abc select generate_series(1, 100000); ' + - 'select create_range_partitions(\'abc\', \'id\', 1, 50000);') - - # Start transaction that will create partition - con = node.connect() - con.begin() - con.execute('select append_range_partition(\'abc\')') - - # Start threads that suppose to add new partitions and wait some - # time - query = [ - 'select prepend_range_partition(\'abc\')', - 'select append_range_partition(\'abc\')', - 'select add_range_partition(\'abc\', 500000, 550000)', - ] - threads = [] - for i in range(3): - thread = threading.Thread( - target=add_partition, args=(node, flags[i], query[i])) - threads.append(thread) - thread.start() - time.sleep(3) - - # This threads should wait until current transaction finished - with lock: - for i in range(3): - self.assertEqual(flags[i].get(), False) - - # Commit transaction. Since then other sessions can create - # partitions - con.commit() - - # Now wait until each thread finishes - for thread in threads: - thread.join() - - # Check flags, it should be true which means that threads are - # finished - with lock: - for i in range(3): - self.assertEqual(flags[i].get(), True) - - # Check that all partitions are created - self.assertEqual( - node.safe_psql( - 'postgres', - 'select count(*) from pg_inherits where inhparent=\'abc\'::regclass'), - b'6\n') + # Check that all partitions are created + self.assertEqual( + node.safe_psql( + 'postgres', + "select count(*) from pg_inherits where inhparent='abc'::regclass"), + b'6\n') def test_tablespace(self): """ Check tablespace support """ @@ -265,251 +251,301 @@ def check_tablespace(node, tablename, tablespace): return res[0][0] == tablespace - node = get_new_node('master') - node.init() - node.append_conf('postgresql.conf', 'shared_preload_libraries=\'pg_pathman\'\n') - node.start() - node.psql('postgres', 'create extension pg_pathman') + with get_new_node('master') as node: + node.init() + node.append_conf('postgresql.conf', 'shared_preload_libraries=\'pg_pathman\'\n') + node.start() + node.psql('postgres', 'create extension pg_pathman') + + # create tablespace + path = os.path.join(node.data_dir, 'test_space_location') + os.mkdir(path) + node.psql('postgres', 'create tablespace test_space location \'{}\''.format(path)) + + # create table in this tablespace + node.psql('postgres', 'create table abc(a serial, b int) tablespace test_space') + + # create three partitions. Excpect that they will be created in the + # same tablespace as the parent table + node.psql('postgres', 'select create_range_partitions(\'abc\', \'a\', 1, 10, 3)') + self.assertTrue(check_tablespace(node, 'abc', 'test_space')) + + # check tablespace for appended partition + node.psql('postgres', 'select append_range_partition(\'abc\', \'abc_appended\')') + self.assertTrue(check_tablespace(node, 'abc_appended', 'test_space')) + + # check tablespace for prepended partition + node.psql('postgres', + 'select prepend_range_partition(\'abc\', \'abc_prepended\')') + self.assertTrue(check_tablespace(node, 'abc_prepended', 'test_space')) + + # check tablespace for prepended partition + node.psql('postgres', + 'select add_range_partition(\'abc\', 41, 51, \'abc_added\')') + self.assertTrue(check_tablespace(node, 'abc_added', 'test_space')) + + # check tablespace for split + node.psql('postgres', + 'select split_range_partition(\'abc_added\', 45, \'abc_splitted\')') + self.assertTrue(check_tablespace(node, 'abc_splitted', 'test_space')) + + # now let's specify tablespace explicitly + node.psql( + 'postgres', + 'select append_range_partition(\'abc\', \'abc_appended_2\', \'pg_default\')') + node.psql( + 'postgres', + 'select prepend_range_partition(\'abc\', \'abc_prepended_2\', \'pg_default\')' + ) + node.psql( + 'postgres', + 'select add_range_partition(\'abc\', 61, 71, \'abc_added_2\', \'pg_default\')' + ) + node.psql( + 'postgres', + 'select split_range_partition(\'abc_added_2\', 65, \'abc_splitted_2\', \'pg_default\')' + ) - # create tablespace - path = os.path.join(node.data_dir, 'test_space_location') - os.mkdir(path) - node.psql('postgres', 'create tablespace test_space location \'{}\''.format(path)) - - # create table in this tablespace - node.psql('postgres', 'create table abc(a serial, b int) tablespace test_space') - - # create three partitions. Excpect that they will be created in the - # same tablespace as the parent table - node.psql('postgres', 'select create_range_partitions(\'abc\', \'a\', 1, 10, 3)') - self.assertTrue(check_tablespace(node, 'abc', 'test_space')) - - # check tablespace for appended partition - node.psql('postgres', 'select append_range_partition(\'abc\', \'abc_appended\')') - self.assertTrue(check_tablespace(node, 'abc_appended', 'test_space')) - - # check tablespace for prepended partition - node.psql('postgres', - 'select prepend_range_partition(\'abc\', \'abc_prepended\')') - self.assertTrue(check_tablespace(node, 'abc_prepended', 'test_space')) - - # check tablespace for prepended partition - node.psql('postgres', - 'select add_range_partition(\'abc\', 41, 51, \'abc_added\')') - self.assertTrue(check_tablespace(node, 'abc_added', 'test_space')) - - # check tablespace for split - node.psql('postgres', - 'select split_range_partition(\'abc_added\', 45, \'abc_splitted\')') - self.assertTrue(check_tablespace(node, 'abc_splitted', 'test_space')) - - # now let's specify tablespace explicitly - node.psql( - 'postgres', - 'select append_range_partition(\'abc\', \'abc_appended_2\', \'pg_default\')') - node.psql( - 'postgres', - 'select prepend_range_partition(\'abc\', \'abc_prepended_2\', \'pg_default\')' - ) - node.psql( - 'postgres', - 'select add_range_partition(\'abc\', 61, 71, \'abc_added_2\', \'pg_default\')' - ) - node.psql( - 'postgres', - 'select split_range_partition(\'abc_added_2\', 65, \'abc_splitted_2\', \'pg_default\')' - ) - - # yapf: disable - self.assertTrue(check_tablespace(node, 'abc_appended_2', 'pg_default')) - self.assertTrue(check_tablespace(node, 'abc_prepended_2', 'pg_default')) - self.assertTrue(check_tablespace(node, 'abc_added_2', 'pg_default')) - self.assertTrue(check_tablespace(node, 'abc_splitted_2', 'pg_default')) + # yapf: disable + self.assertTrue(check_tablespace(node, 'abc_appended_2', 'pg_default')) + self.assertTrue(check_tablespace(node, 'abc_prepended_2', 'pg_default')) + self.assertTrue(check_tablespace(node, 'abc_added_2', 'pg_default')) + self.assertTrue(check_tablespace(node, 'abc_splitted_2', 'pg_default')) @if_fdw_enabled def test_foreign_table(self): """ Test foreign tables """ # Start master server - master = get_new_node('test') - master.init() - master.append_conf('postgresql.conf', """ - shared_preload_libraries='pg_pathman, postgres_fdw'\n - """) - master.start() - master.psql('postgres', 'create extension pg_pathman') - master.psql('postgres', 'create extension postgres_fdw') - - # RANGE partitioning test with FDW: - # - create range partitioned table in master - # - create foreign server - # - create foreign table and insert some data into it - # - attach foreign table to partitioned one - # - try inserting data into foreign partition via parent - # - drop partitions - master.psql('postgres', """ - create table abc(id serial, name text); - select create_range_partitions('abc', 'id', 0, 10, 2) - """) - - # Current user name (needed for user mapping) - username = master.execute('postgres', 'select current_user')[0][0] - - # Start foreign server - fserv = get_new_node('fserv') - fserv.init().start() - fserv.safe_psql('postgres', "create table ftable(id serial, name text)") - fserv.safe_psql('postgres', "insert into ftable values (25, 'foreign')") - - # Create foreign table and attach it to partitioned table - master.safe_psql('postgres', """ - create server fserv - foreign data wrapper postgres_fdw - options (dbname 'postgres', host '127.0.0.1', port '{}') - """.format(fserv.port)) - - master.safe_psql('postgres', """ - create user mapping for {0} server fserv - options (user '{0}') - """.format(username)) - - master.safe_psql('postgres', """ - import foreign schema public limit to (ftable) - from server fserv into public - """) - - master.safe_psql( - 'postgres', - "select attach_range_partition('abc', 'ftable', 20, 30)") - - # Check that table attached to partitioned table - self.assertEqual( - master.safe_psql('postgres', 'select * from ftable'), - b'25|foreign\n') - - # Check that we can successfully insert new data into foreign partition - master.safe_psql('postgres', 'insert into abc values (26, \'part\')') - self.assertEqual( - master.safe_psql('postgres', 'select * from ftable order by id'), - b'25|foreign\n26|part\n') - - # Testing drop partitions (including foreign partitions) - master.safe_psql('postgres', 'select drop_partitions(\'abc\')') - - # HASH partitioning with FDW: - # - create hash partitioned table in master - # - create foreign table - # - replace local partition with foreign one - # - insert data - # - drop partitions - master.psql('postgres', """ - create table hash_test(id serial, name text); - select create_hash_partitions('hash_test', 'id', 2) - """) - fserv.safe_psql('postgres', - 'create table f_hash_test(id serial, name text)') - - master.safe_psql('postgres', """ - import foreign schema public limit to (f_hash_test) - from server fserv into public - """) - master.safe_psql('postgres', """ - select replace_hash_partition('hash_test_1', 'f_hash_test') - """) - master.safe_psql('postgres', - 'insert into hash_test select generate_series(1,10)') - - self.assertEqual( - master.safe_psql('postgres', 'select * from hash_test'), - b'1|\n2|\n5|\n6|\n8|\n9|\n3|\n4|\n7|\n10|\n') - master.safe_psql('postgres', "select drop_partitions('hash_test')") + with get_new_node('test') as master, get_new_node('fserv') as fserv: + master.init() + master.append_conf('postgresql.conf', """ + shared_preload_libraries='pg_pathman, postgres_fdw'\n + """) + master.start() + master.psql('postgres', 'create extension pg_pathman') + master.psql('postgres', 'create extension postgres_fdw') + + # RANGE partitioning test with FDW: + # - create range partitioned table in master + # - create foreign server + # - create foreign table and insert some data into it + # - attach foreign table to partitioned one + # - try inserting data into foreign partition via parent + # - drop partitions + master.psql('postgres', """ + create table abc(id serial, name text); + select create_range_partitions('abc', 'id', 0, 10, 2) + """) + + # Current user name (needed for user mapping) + username = master.execute('postgres', 'select current_user')[0][0] + + fserv.init().start() + fserv.safe_psql('postgres', "create table ftable(id serial, name text)") + fserv.safe_psql('postgres', "insert into ftable values (25, 'foreign')") + + # Create foreign table and attach it to partitioned table + master.safe_psql('postgres', """ + create server fserv + foreign data wrapper postgres_fdw + options (dbname 'postgres', host '127.0.0.1', port '{}') + """.format(fserv.port)) + + master.safe_psql('postgres', """ + create user mapping for {0} server fserv + options (user '{0}') + """.format(username)) + + master.safe_psql('postgres', """ + import foreign schema public limit to (ftable) + from server fserv into public + """) + + master.safe_psql( + 'postgres', + "select attach_range_partition('abc', 'ftable', 20, 30)") + + # Check that table attached to partitioned table + self.assertEqual( + master.safe_psql('postgres', 'select * from ftable'), + b'25|foreign\n') + + # Check that we can successfully insert new data into foreign partition + master.safe_psql('postgres', 'insert into abc values (26, \'part\')') + self.assertEqual( + master.safe_psql('postgres', 'select * from ftable order by id'), + b'25|foreign\n26|part\n') + + # Testing drop partitions (including foreign partitions) + master.safe_psql('postgres', 'select drop_partitions(\'abc\')') + + # HASH partitioning with FDW: + # - create hash partitioned table in master + # - create foreign table + # - replace local partition with foreign one + # - insert data + # - drop partitions + master.psql('postgres', """ + create table hash_test(id serial, name text); + select create_hash_partitions('hash_test', 'id', 2) + """) + fserv.safe_psql('postgres', + 'create table f_hash_test(id serial, name text)') + + master.safe_psql('postgres', """ + import foreign schema public limit to (f_hash_test) + from server fserv into public + """) + master.safe_psql('postgres', """ + select replace_hash_partition('hash_test_1', 'f_hash_test') + """) + master.safe_psql('postgres', + 'insert into hash_test select generate_series(1,10)') + + self.assertEqual( + master.safe_psql('postgres', 'select * from hash_test'), + b'1|\n2|\n5|\n6|\n8|\n9|\n3|\n4|\n7|\n10|\n') + master.safe_psql('postgres', "select drop_partitions('hash_test')") @if_fdw_enabled def test_parallel_nodes(self): """ Test parallel queries under partitions """ - import json - # Init and start postgres instance with preload pg_pathman module - node = get_new_node('test') - node.init() - node.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman, postgres_fdw\'\n') - node.start() + with get_new_node('test') as node: + node.init() + node.append_conf( + 'postgresql.conf', + 'shared_preload_libraries=\'pg_pathman, postgres_fdw\'\n') + node.start() + + # Check version of postgres server + # If version < 9.6 skip all tests for parallel queries + if version < LooseVersion('9.6.0'): + return + + # Prepare test database + node.psql('postgres', 'create extension pg_pathman') + node.psql('postgres', """ + create table range_partitioned as + select generate_series(1, 1e4::integer) i; + + alter table range_partitioned alter column i set not null; + select create_range_partitions('range_partitioned', 'i', 1, 1e3::integer); + + create table hash_partitioned as + select generate_series(1, 1e4::integer) i; + + alter table hash_partitioned alter column i set not null; + select create_hash_partitions('hash_partitioned', 'i', 10); + """) - # Check version of postgres server - # If version < 9.6 skip all tests for parallel queries - if version < 90600: - return + # create statistics for both partitioned tables + node.psql('postgres', 'vacuum analyze') - # Prepare test database - node.psql('postgres', 'create extension pg_pathman') + node.psql('postgres', """ + create or replace function query_plan(query text) + returns jsonb as $$ + declare + plan jsonb; + begin + execute 'explain (costs off, format json)' || query into plan; + return plan; + end; + $$ language plpgsql; + """) - node.psql('postgres', """ - create table range_partitioned as - select generate_series(1, 1e4::integer) i; - - alter table range_partitioned alter column i set not null; - select create_range_partitions('range_partitioned', 'i', 1, 1e3::integer); - - create table hash_partitioned as - select generate_series(1, 1e4::integer) i; - - alter table hash_partitioned alter column i set not null; - select create_hash_partitions('hash_partitioned', 'i', 10); - """) - - # create statistics for both partitioned tables - node.psql('postgres', 'vacuum analyze') - - node.psql('postgres', """ - create or replace function query_plan(query text) - returns jsonb as $$ - declare - plan jsonb; - begin - execute 'explain (costs off, format json)' || query into plan; - return plan; - end; - $$ language plpgsql; - """) - - # Test parallel select - with node.connect() as con: - con.execute('set max_parallel_workers_per_gather = 2') - if version >= 100000: - con.execute('set min_parallel_table_scan_size = 0') - else: - con.execute('set min_parallel_relation_size = 0') - con.execute('set parallel_setup_cost = 0') - con.execute('set parallel_tuple_cost = 0') - - # Check parallel aggregate plan - test_query = 'select count(*) from range_partitioned where i < 1500' - plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] - expected = json.loads(""" - [ - { - "Plan": { - "Node Type": "Aggregate", - "Strategy": "Plain", - "Partial Mode": "Finalize", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Gather", - "Parent Relationship": "Outer", + # Test parallel select + with node.connect() as con: + con.execute('set max_parallel_workers_per_gather = 2') + if version >= LooseVersion('10'): + con.execute('set min_parallel_table_scan_size = 0') + else: + con.execute('set min_parallel_relation_size = 0') + con.execute('set parallel_setup_cost = 0') + con.execute('set parallel_tuple_cost = 0') + + # Check parallel aggregate plan + test_query = 'select count(*) from range_partitioned where i < 1500' + plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] + expected = json.loads(""" + [ + { + "Plan": { + "Node Type": "Aggregate", + "Strategy": "Plain", + "Partial Mode": "Finalize", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Gather", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Workers Planned": 2, + "Single Copy": false, + "Plans": [ + { + "Node Type": "Aggregate", + "Strategy": "Plain", + "Partial Mode": "Partial", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Append", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_2", + "Alias": "range_partitioned_2", + "Filter": "(i < 1500)" + }, + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_1", + "Alias": "range_partitioned_1" + } + ] + } + ] + } + ] + } + ] + } + } + ] + """) + self.assertEqual(ordered(plan), ordered(expected)) + + # Check count of returned tuples + count = con.execute( + 'select count(*) from range_partitioned where i < 1500')[0][0] + self.assertEqual(count, 1499) + + # Check simple parallel seq scan plan with limit + test_query = 'select * from range_partitioned where i < 1500 limit 5' + plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] + expected = json.loads(""" + [ + { + "Plan": { + "Node Type": "Limit", "Parallel Aware": false, - "Workers Planned": 2, - "Single Copy": false, "Plans": [ { - "Node Type": "Aggregate", - "Strategy": "Plain", - "Partial Mode": "Partial", + "Node Type": "Gather", "Parent Relationship": "Outer", "Parallel Aware": false, + "Workers Planned": 2, + "Single Copy": false, "Plans": [ { "Node Type": "Append", @@ -537,354 +573,279 @@ def test_parallel_nodes(self): } ] } - ] + } + ] + """) + self.assertEqual(ordered(plan), ordered(expected)) + + # Check tuples returned by query above + res_tuples = con.execute( + 'select * from range_partitioned where i < 1500 limit 5') + res_tuples = sorted(map(lambda x: x[0], res_tuples)) + expected = [1, 2, 3, 4, 5] + self.assertEqual(res_tuples, expected) + + # Check the case when none partition is selected in result plan + test_query = 'select * from range_partitioned where i < 1' + plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] + expected = json.loads(""" + [ + { + "Plan": { + "Node Type": "Result", + "Parallel Aware": false, + "One-Time Filter": "false" } } ] - """) - self.assertEqual(ordered(plan), ordered(expected)) - - # Check count of returned tuples - count = con.execute( - 'select count(*) from range_partitioned where i < 1500')[0][0] - self.assertEqual(count, 1499) - - # Check simple parallel seq scan plan with limit - test_query = 'select * from range_partitioned where i < 1500 limit 5' - plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] - expected = json.loads(""" - [ - { - "Plan": { - "Node Type": "Limit", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Gather", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Workers Planned": 2, - "Single Copy": false, - "Plans": [ - { - "Node Type": "Append", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Seq Scan", - "Parent Relationship": "Member", - "Parallel Aware": true, - "Relation Name": "range_partitioned_2", - "Alias": "range_partitioned_2", - "Filter": "(i < 1500)" - }, - { - "Node Type": "Seq Scan", - "Parent Relationship": "Member", - "Parallel Aware": true, - "Relation Name": "range_partitioned_1", - "Alias": "range_partitioned_1" - } - ] - } - ] - } - ] - } - } - ] - """) - self.assertEqual(ordered(plan), ordered(expected)) - - # Check tuples returned by query above - res_tuples = con.execute( - 'select * from range_partitioned where i < 1500 limit 5') - res_tuples = sorted(map(lambda x: x[0], res_tuples)) - expected = [1, 2, 3, 4, 5] - self.assertEqual(res_tuples, expected) - - # Check the case when none partition is selected in result plan - test_query = 'select * from range_partitioned where i < 1' - plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] - expected = json.loads(""" - [ - { - "Plan": { - "Node Type": "Result", - "Parallel Aware": false, - "One-Time Filter": "false" - } - } - ] - """) - self.assertEqual(ordered(plan), ordered(expected)) - - # Remove all objects for testing - node.psql('postgres', 'drop table range_partitioned cascade') - node.psql('postgres', 'drop table hash_partitioned cascade') - node.psql('postgres', 'drop extension pg_pathman cascade') + """) + self.assertEqual(ordered(plan), ordered(expected)) - # Stop instance and finish work - node.stop() - node.cleanup() + # Remove all objects for testing + node.psql('postgres', 'drop table range_partitioned cascade') + node.psql('postgres', 'drop table hash_partitioned cascade') + node.psql('postgres', 'drop extension pg_pathman cascade') def test_conc_part_drop_runtime_append(self): """ Test concurrent partition drop + SELECT (RuntimeAppend) """ # Create and start new instance - node = self.start_new_pathman_cluster(allows_streaming=False) - - # Create table 'drop_test' and partition it - with node.connect() as con0: - # yapf: disable - con0.begin() - con0.execute("create table drop_test(val int not null)") - con0.execute("insert into drop_test select generate_series(1, 1000)") - con0.execute("select create_range_partitions('drop_test', 'val', 1, 10)") - con0.commit() - - # Create two separate connections for this test - with node.connect() as con1, node.connect() as con2: - - try: - from queue import Queue - except ImportError: - from Queue import Queue - - # return values from thread - queue = Queue() - - # Thread for connection #2 (it has to wait) - def con2_thread(): + with self.start_new_pathman_cluster(allow_streaming=False) as node: + # Create table 'drop_test' and partition it + with node.connect() as con0: + # yapf: disable + con0.begin() + con0.execute("create table drop_test(val int not null)") + con0.execute("insert into drop_test select generate_series(1, 1000)") + con0.execute("select create_range_partitions('drop_test', 'val', 1, 10)") + con0.commit() + + # Create two separate connections for this test + with node.connect() as con1, node.connect() as con2: + try: + from queue import Queue + except ImportError: + from Queue import Queue + + # return values from thread + queue = Queue() + + # Thread for connection #2 (it has to wait) + def con2_thread(): + con1.begin() + con2.execute('set enable_hashjoin = f') + con2.execute('set enable_mergejoin = f') + + res = con2.execute(""" + explain (analyze, costs off, timing off) + select * from drop_test + where val = any (select generate_series(1, 40, 34)) + """) # query selects from drop_test_1 and drop_test_4 + + con2.commit() + + has_runtime_append = False + has_drop_test_1 = False + has_drop_test_4 = False + + for row in res: + if row[0].find('RuntimeAppend') >= 0: + has_runtime_append = True + continue + + if row[0].find('drop_test_1') >= 0: + has_drop_test_1 = True + continue + + if row[0].find('drop_test_4') >= 0: + has_drop_test_4 = True + continue + + # return all values in tuple + queue.put((has_runtime_append, has_drop_test_1, has_drop_test_4)) + + + # Step 1: cache partitioned table in con1 con1.begin() - con2.execute('set enable_hashjoin = f') - con2.execute('set enable_mergejoin = f') - - res = con2.execute(""" - explain (analyze, costs off, timing off) - select * from drop_test - where val = any (select generate_series(1, 40, 34)) - """) # query selects from drop_test_1 and drop_test_4 + con1.execute('select count(*) from drop_test') # load pathman's cache + con1.commit() + # Step 2: cache partitioned table in con2 + con2.begin() + con2.execute('select count(*) from drop_test') # load pathman's cache con2.commit() - has_runtime_append = False - has_drop_test_1 = False - has_drop_test_4 = False - - for row in res: - if row[0].find('RuntimeAppend') >= 0: - has_runtime_append = True - continue - - if row[0].find('drop_test_1') >= 0: - has_drop_test_1 = True - continue - - if row[0].find('drop_test_4') >= 0: - has_drop_test_4 = True - continue - - # return all values in tuple - queue.put((has_runtime_append, has_drop_test_1, has_drop_test_4)) - - - # Step 1: cache partitioned table in con1 - con1.begin() - con1.execute('select count(*) from drop_test') # load pathman's cache - con1.commit() - - # Step 2: cache partitioned table in con2 - con2.begin() - con2.execute('select count(*) from drop_test') # load pathman's cache - con2.commit() - - # Step 3: drop first partition of 'drop_test' - con1.begin() - con1.execute('drop table drop_test_1') - - # Step 4: try executing select (RuntimeAppend) - t = threading.Thread(target=con2_thread) - t.start() + # Step 3: drop first partition of 'drop_test' + con1.begin() + con1.execute('drop table drop_test_1') - # Step 5: wait until 't' locks - while True: - with node.connect() as con0: - locks = con0.execute(""" - select count(*) from pg_locks where granted = 'f' - """) + # Step 4: try executing select (RuntimeAppend) + t = threading.Thread(target=con2_thread) + t.start() - if int(locks[0][0]) > 0: - break + # Step 5: wait until 't' locks + while True: + with node.connect() as con0: + locks = con0.execute(""" + select count(*) from pg_locks where granted = 'f' + """) - # Step 6: commit 'DROP TABLE' - con1.commit() + if int(locks[0][0]) > 0: + break - # Step 7: wait for con2 - t.join() + # Step 6: commit 'DROP TABLE' + con1.commit() - rows = con1.execute(""" - select * from pathman_partition_list - where parent = 'drop_test'::regclass - order by range_min, range_max - """) + # Step 7: wait for con2 + t.join() - # check number of partitions - self.assertEqual(len(rows), 99) + rows = con1.execute(""" + select * from pathman_partition_list + where parent = 'drop_test'::regclass + order by range_min, range_max + """) - # check RuntimeAppend + selected partitions - (has_runtime_append, has_drop_test_1, has_drop_test_4) = queue.get() - self.assertTrue(has_runtime_append) - self.assertFalse(has_drop_test_1) - self.assertTrue(has_drop_test_4) + # check number of partitions + self.assertEqual(len(rows), 99) - # Stop instance and finish work - node.stop() - node.cleanup() + # check RuntimeAppend + selected partitions + (has_runtime_append, has_drop_test_1, has_drop_test_4) = queue.get() + self.assertTrue(has_runtime_append) + self.assertFalse(has_drop_test_1) + self.assertTrue(has_drop_test_4) def test_conc_part_creation_insert(self): """ Test concurrent partition creation on INSERT """ # Create and start new instance - node = self.start_new_pathman_cluster(allows_streaming=False) - - # Create table 'ins_test' and partition it - with node.connect() as con0: - # yapf: disable - con0.begin() - con0.execute("create table ins_test(val int not null)") - con0.execute("insert into ins_test select generate_series(1, 50)") - con0.execute("select create_range_partitions('ins_test', 'val', 1, 10)") - con0.commit() - - # Create two separate connections for this test - with node.connect() as con1, node.connect() as con2: - - # Thread for connection #2 (it has to wait) - def con2_thread(): - con2.execute('insert into ins_test values(51)') - con2.commit() - - # Step 1: lock partitioned table in con1 - con1.begin() - con1.execute('select count(*) from ins_test') # load pathman's cache - con1.execute('lock table ins_test in share update exclusive mode') - - # Step 2: try inserting new value in con2 (waiting) - con2.begin() - con2.execute('select count(*) from ins_test') # load pathman's cache - t = threading.Thread(target=con2_thread) - t.start() - - # Step 3: wait until 't' locks - while True: - with node.connect() as con0: - locks = con0.execute(""" - select count(*) from pg_locks where granted = 'f' - """) - - if int(locks[0][0]) > 0: - break - - # Step 4: try inserting new value in con1 (success, unlock) - con1.execute('insert into ins_test values(52)') - con1.commit() - - # Step 5: wait for con2 - t.join() - - rows = con1.execute(""" - select * from pathman_partition_list - where parent = 'ins_test'::regclass - order by range_min, range_max - """) - - # check number of partitions - self.assertEqual(len(rows), 6) - - # check range_max of partitions - self.assertEqual(int(rows[0][5]), 11) - self.assertEqual(int(rows[1][5]), 21) - self.assertEqual(int(rows[2][5]), 31) - self.assertEqual(int(rows[3][5]), 41) - self.assertEqual(int(rows[4][5]), 51) - self.assertEqual(int(rows[5][5]), 61) + with self.start_new_pathman_cluster(allow_streaming=False) as node: + # Create table 'ins_test' and partition it + with node.connect() as con0: + # yapf: disable + con0.begin() + con0.execute("create table ins_test(val int not null)") + con0.execute("insert into ins_test select generate_series(1, 50)") + con0.execute("select create_range_partitions('ins_test', 'val', 1, 10)") + con0.commit() + + # Create two separate connections for this test + with node.connect() as con1, node.connect() as con2: + + # Thread for connection #2 (it has to wait) + def con2_thread(): + con2.execute('insert into ins_test values(51)') + con2.commit() + + # Step 1: lock partitioned table in con1 + con1.begin() + con1.execute('select count(*) from ins_test') # load pathman's cache + con1.execute('lock table ins_test in share update exclusive mode') - # Stop instance and finish work - node.stop() - node.cleanup() + # Step 2: try inserting new value in con2 (waiting) + con2.begin() + con2.execute('select count(*) from ins_test') # load pathman's cache + t = threading.Thread(target=con2_thread) + t.start() + + # Step 3: wait until 't' locks + while True: + with node.connect() as con0: + locks = con0.execute(""" + select count(*) from pg_locks where granted = 'f' + """) + + if int(locks[0][0]) > 0: + break + + # Step 4: try inserting new value in con1 (success, unlock) + con1.execute('insert into ins_test values(52)') + con1.commit() + + # Step 5: wait for con2 + t.join() + + rows = con1.execute(""" + select * from pathman_partition_list + where parent = 'ins_test'::regclass + order by range_min, range_max + """) + + # check number of partitions + self.assertEqual(len(rows), 6) + + # check range_max of partitions + self.assertEqual(int(rows[0][5]), 11) + self.assertEqual(int(rows[1][5]), 21) + self.assertEqual(int(rows[2][5]), 31) + self.assertEqual(int(rows[3][5]), 41) + self.assertEqual(int(rows[4][5]), 51) + self.assertEqual(int(rows[5][5]), 61) def test_conc_part_merge_insert(self): """ Test concurrent merge_range_partitions() + INSERT """ # Create and start new instance - node = self.start_new_pathman_cluster(allows_streaming=False) - - # Create table 'ins_test' and partition it - with node.connect() as con0: - # yapf: disable - con0.begin() - con0.execute("create table ins_test(val int not null)") - con0.execute("select create_range_partitions('ins_test', 'val', 1, 10, 10)") - con0.commit() - - # Create two separate connections for this test - with node.connect() as con1, node.connect() as con2: + with self.start_new_pathman_cluster(allow_streaming=False) as node: + # Create table 'ins_test' and partition it + with node.connect() as con0: + # yapf: disable + con0.begin() + con0.execute("create table ins_test(val int not null)") + con0.execute("select create_range_partitions('ins_test', 'val', 1, 10, 10)") + con0.commit() + + # Create two separate connections for this test + with node.connect() as con1, node.connect() as con2: + + # Thread for connection #2 (it has to wait) + def con2_thread(): + con2.begin() + con2.execute('insert into ins_test values(20)') + con2.commit() + + # Step 1: initilize con1 + con1.begin() + con1.execute('select count(*) from ins_test') # load pathman's cache - # Thread for connection #2 (it has to wait) - def con2_thread(): + # Step 2: initilize con2 con2.begin() - con2.execute('insert into ins_test values(20)') - con2.commit() - - # Step 1: initilize con1 - con1.begin() - con1.execute('select count(*) from ins_test') # load pathman's cache - - # Step 2: initilize con2 - con2.begin() - con2.execute('select count(*) from ins_test') # load pathman's cache - con2.commit() # unlock relations + con2.execute('select count(*) from ins_test') # load pathman's cache + con2.commit() # unlock relations - # Step 3: merge 'ins_test1' + 'ins_test_2' in con1 (success) - con1.execute( - "select merge_range_partitions('ins_test_1', 'ins_test_2')") + # Step 3: merge 'ins_test1' + 'ins_test_2' in con1 (success) + con1.execute( + "select merge_range_partitions('ins_test_1', 'ins_test_2')") - # Step 4: try inserting new value in con2 (waiting) - t = threading.Thread(target=con2_thread) - t.start() - - # Step 5: wait until 't' locks - while True: - with node.connect() as con0: - locks = con0.execute(""" - select count(*) from pg_locks where granted = 'f' - """) + # Step 4: try inserting new value in con2 (waiting) + t = threading.Thread(target=con2_thread) + t.start() - if int(locks[0][0]) > 0: - break + # Step 5: wait until 't' locks + while True: + with node.connect() as con0: + locks = con0.execute(""" + select count(*) from pg_locks where granted = 'f' + """) - # Step 6: finish merge in con1 (success, unlock) - con1.commit() + if int(locks[0][0]) > 0: + break - # Step 7: wait for con2 - t.join() + # Step 6: finish merge in con1 (success, unlock) + con1.commit() - rows = con1.execute("select *, tableoid::regclass::text from ins_test") + # Step 7: wait for con2 + t.join() - # check number of rows in table - self.assertEqual(len(rows), 1) + rows = con1.execute("select *, tableoid::regclass::text from ins_test") - # check value that has been inserted - self.assertEqual(int(rows[0][0]), 20) + # check number of rows in table + self.assertEqual(len(rows), 1) - # check partition that was chosen for insert - self.assertEqual(str(rows[0][1]), 'ins_test_1') + # check value that has been inserted + self.assertEqual(int(rows[0][0]), 20) - # Stop instance and finish work - node.stop() - node.cleanup() + # check partition that was chosen for insert + self.assertEqual(str(rows[0][1]), 'ins_test_1') def test_pg_dump(self): """ @@ -904,220 +865,206 @@ def test_pg_dump(self): the rest of data - in child tables. """ - import subprocess - # Init and start postgres instance with preload pg_pathman module - node = get_new_node('test') - node.init() - node.append_conf('postgresql.conf', """ - shared_preload_libraries=\'pg_pathman\' - pg_pathman.override_copy=false - """) - node.start() - - # Init two databases: initial and copy - node.psql('postgres', 'create database initial') - node.psql('postgres', 'create database copy') - node.psql('initial', 'create extension pg_pathman') - - # Create and fillin partitioned table in initial database - with node.connect('initial') as con: - - # create and initailly fillin tables - con.execute('create table range_partitioned (i integer not null)') - con.execute( - 'insert into range_partitioned select i from generate_series(1, 500) i' - ) - con.execute('create table hash_partitioned (i integer not null)') - con.execute( - 'insert into hash_partitioned select i from generate_series(1, 500) i' - ) - - # partition table keeping data in base table - # enable_parent parameter automatically becames true - con.execute( - 'select create_range_partitions(\'range_partitioned\', \'i\', 1, 200, partition_data := false)' - ) - con.execute( - 'select create_hash_partitions(\'hash_partitioned\', \'i\', 5, false)' - ) - - # fillin child tables with remain data - con.execute( - 'insert into range_partitioned select i from generate_series(501, 1000) i' - ) - con.execute( - 'insert into hash_partitioned select i from generate_series(501, 1000) i' - ) - - # set init callback - con.execute(""" - create or replace function init_partition_stub_callback(args jsonb) - returns void as $$ - begin - end - $$ language plpgsql; + with get_new_node('test') as node: + node.init() + node.append_conf('postgresql.conf', """ + shared_preload_libraries=\'pg_pathman\' + pg_pathman.override_copy=false """) - con.execute( - 'select set_init_callback(\'range_partitioned\', \'init_partition_stub_callback(jsonb)\')' - ) - con.execute( - 'select set_init_callback(\'hash_partitioned\', \'init_partition_stub_callback(jsonb)\')' - ) - - # turn off enable_parent option - con.execute( - 'select set_enable_parent(\'range_partitioned\', false)') - con.execute('select set_enable_parent(\'hash_partitioned\', false)') - - con.commit() - - # compare strategies - CMP_OK, PLANS_MISMATCH, CONTENTS_MISMATCH = range(3) - - def cmp_full(con1, con2): - """ - Compare selection partitions in plan - and contents in partitioned tables - """ - - plan_query = 'explain (costs off, format json) select * from %s' - content_query = 'select * from %s order by i' - table_refs = [ - 'range_partitioned', 'only range_partitioned', - 'hash_partitioned', 'only hash_partitioned' - ] - for table_ref in table_refs: - plan_initial = con1.execute( - plan_query % table_ref)[0][0][0]['Plan'] - plan_copy = con2.execute( - plan_query % table_ref)[0][0][0]['Plan'] - if ordered(plan_initial) != ordered(plan_copy): - return PLANS_MISMATCH - - content_initial = [ - x[0] for x in con1.execute(content_query % table_ref) + node.start() + + # Init two databases: initial and copy + node.psql('postgres', 'create database initial') + node.psql('postgres', 'create database copy') + node.psql('initial', 'create extension pg_pathman') + + # Create and fillin partitioned table in initial database + with node.connect('initial') as con: + + # create and initailly fillin tables + con.execute('create table range_partitioned (i integer not null)') + con.execute( + 'insert into range_partitioned select i from generate_series(1, 500) i' + ) + con.execute('create table hash_partitioned (i integer not null)') + con.execute( + 'insert into hash_partitioned select i from generate_series(1, 500) i' + ) + + # partition table keeping data in base table + # enable_parent parameter automatically becames true + con.execute( + "select create_range_partitions('range_partitioned', 'i', 1, 200, partition_data := false)" + ) + con.execute( + "select create_hash_partitions('hash_partitioned', 'i', 5, false)" + ) + + # fillin child tables with remain data + con.execute( + 'insert into range_partitioned select i from generate_series(501, 1000) i' + ) + con.execute( + 'insert into hash_partitioned select i from generate_series(501, 1000) i' + ) + + # set init callback + con.execute(""" + create or replace function init_partition_stub_callback(args jsonb) + returns void as $$ + begin + end + $$ language plpgsql; + """) + con.execute( + "select set_init_callback('range_partitioned', 'init_partition_stub_callback(jsonb)')" + ) + con.execute( + "select set_init_callback('hash_partitioned', 'init_partition_stub_callback(jsonb)')" + ) + + # turn off enable_parent option + con.execute( + "select set_enable_parent('range_partitioned', false)") + con.execute("select set_enable_parent('hash_partitioned', false)") + con.commit() + + # compare strategies + CMP_OK, PLANS_MISMATCH, CONTENTS_MISMATCH = range(3) + + def cmp_full(con1, con2): + """ + Compare selection partitions in plan + and contents in partitioned tables + """ + + plan_query = 'explain (costs off, format json) select * from %s' + content_query = 'select * from %s order by i' + table_refs = [ + 'range_partitioned', 'only range_partitioned', + 'hash_partitioned', 'only hash_partitioned' ] - content_copy = [ - x[0] for x in con2.execute(content_query % table_ref) - ] - if content_initial != content_copy: - return CONTENTS_MISMATCH - - return CMP_OK - - def turnoff_pathman(node): - node.psql('initial', 'alter system set pg_pathman.enable to off') - node.reload() - - def turnon_pathman(node): - node.psql('initial', 'alter system set pg_pathman.enable to on') - node.psql('copy', 'alter system set pg_pathman.enable to on') - node.psql('initial', - 'alter system set pg_pathman.override_copy to off') - node.psql('copy', - 'alter system set pg_pathman.override_copy to off') - node.reload() - - # Test dump/restore from init database to copy functionality - test_params = [ - (None, None, [ - node.get_bin_path("pg_dump"), "-p {}".format(node.port), - "initial" - ], [node.get_bin_path("psql"), "-p {}".format(node.port), "copy"], - cmp_full), # dump as plain text and restore via COPY - (turnoff_pathman, turnon_pathman, [ - node.get_bin_path("pg_dump"), "-p {}".format(node.port), - "--inserts", "initial" - ], [node.get_bin_path("psql"), "-p {}".format(node.port), "copy"], - cmp_full), # dump as plain text and restore via INSERTs - (None, None, [ - node.get_bin_path("pg_dump"), "-p {}".format(node.port), - "--format=custom", "initial" - ], [ - node.get_bin_path("pg_restore"), "-p {}".format(node.port), - "--dbname=copy" - ], cmp_full), # dump in archive format - ] - - try: - FNULL = open(os.devnull, 'w') - - for preproc, postproc, pg_dump_params, pg_restore_params, cmp_dbs in test_params: - - dump_restore_cmd = " | ".join((' '.join(pg_dump_params), - ' '.join(pg_restore_params))) - - if (preproc != None): - preproc(node) - - # transfer and restore data - p1 = subprocess.Popen(pg_dump_params, stdout=subprocess.PIPE) - stdoutdata, _ = p1.communicate() - p2 = subprocess.Popen( - pg_restore_params, - stdin=subprocess.PIPE, - stdout=FNULL, - stderr=FNULL) - p2.communicate(input=stdoutdata) - - if (postproc != None): - postproc(node) - - # validate data - with node.connect('initial') as con1, \ - node.connect('copy') as con2: - - # compare plans and contents of initial and copy - cmp_result = cmp_dbs(con1, con2) - self.assertNotEqual( - cmp_result, PLANS_MISMATCH, - "mismatch in plans of select query on partitioned tables under the command: %s" - % dump_restore_cmd) - self.assertNotEqual( - cmp_result, CONTENTS_MISMATCH, - "mismatch in contents of partitioned tables under the command: %s" - % dump_restore_cmd) - - # compare enable_parent flag and callback function - config_params_query = """ - select partrel, enable_parent, init_callback from pathman_config_params - """ - config_params_initial, config_params_copy = {}, {} - for row in con1.execute(config_params_query): - config_params_initial[row[0]] = row[1:] - for row in con2.execute(config_params_query): - config_params_copy[row[0]] = row[1:] - self.assertEqual(config_params_initial, config_params_copy, \ - "mismatch in pathman_config_params under the command: %s" % dump_restore_cmd) - - # compare constraints on each partition - constraints_query = """ - select r.relname, c.conname, c.consrc from - pg_constraint c join pg_class r on c.conrelid=r.oid - where relname similar to '(range|hash)_partitioned_\d+' - """ - constraints_initial, constraints_copy = {}, {} - for row in con1.execute(constraints_query): - constraints_initial[row[0]] = row[1:] - for row in con2.execute(constraints_query): - constraints_copy[row[0]] = row[1:] - self.assertEqual(constraints_initial, constraints_copy, \ - "mismatch in partitions' constraints under the command: %s" % dump_restore_cmd) - - # clear copy database - node.psql('copy', 'drop schema public cascade') - node.psql('copy', 'create schema public') - node.psql('copy', 'drop extension pg_pathman cascade') - - except: - raise - finally: - FNULL.close() - - # Stop instance and finish work - node.stop() - node.cleanup() + for table_ref in table_refs: + plan_initial = con1.execute( + plan_query % table_ref)[0][0][0]['Plan'] + plan_copy = con2.execute( + plan_query % table_ref)[0][0][0]['Plan'] + if ordered(plan_initial) != ordered(plan_copy): + return PLANS_MISMATCH + + content_initial = [ + x[0] for x in con1.execute(content_query % table_ref) + ] + content_copy = [ + x[0] for x in con2.execute(content_query % table_ref) + ] + if content_initial != content_copy: + return CONTENTS_MISMATCH + + return CMP_OK + + def turnoff_pathman(node): + node.psql('initial', 'alter system set pg_pathman.enable to off') + node.reload() + + def turnon_pathman(node): + node.psql('initial', 'alter system set pg_pathman.enable to on') + node.psql('copy', 'alter system set pg_pathman.enable to on') + node.psql('initial', + 'alter system set pg_pathman.override_copy to off') + node.psql('copy', + 'alter system set pg_pathman.override_copy to off') + node.reload() + + # Test dump/restore from init database to copy functionality + test_params = [ + (None, None, [ + get_bin_path("pg_dump"), "-p {}".format(node.port), + "initial" + ], [get_bin_path("psql"), "-p {}".format(node.port), "copy"], + cmp_full), # dump as plain text and restore via COPY + (turnoff_pathman, turnon_pathman, [ + get_bin_path("pg_dump"), "-p {}".format(node.port), + "--inserts", "initial" + ], [get_bin_path("psql"), "-p {}".format(node.port), "copy"], + cmp_full), # dump as plain text and restore via INSERTs + (None, None, [ + get_bin_path("pg_dump"), "-p {}".format(node.port), + "--format=custom", "initial" + ], [ + get_bin_path("pg_restore"), "-p {}".format(node.port), + "--dbname=copy" + ], cmp_full), # dump in archive format + ] + + with open(os.devnull, 'w') as fnull: + for preproc, postproc, pg_dump_params, pg_restore_params, cmp_dbs in test_params: + + dump_restore_cmd = " | ".join((' '.join(pg_dump_params), + ' '.join(pg_restore_params))) + + if (preproc != None): + preproc(node) + + # transfer and restore data + p1 = subprocess.Popen(pg_dump_params, stdout=subprocess.PIPE) + stdoutdata, _ = p1.communicate() + p2 = subprocess.Popen( + pg_restore_params, + stdin=subprocess.PIPE, + stdout=fnull, + stderr=fnull) + p2.communicate(input=stdoutdata) + + if (postproc != None): + postproc(node) + + # validate data + with node.connect('initial') as con1, \ + node.connect('copy') as con2: + + # compare plans and contents of initial and copy + cmp_result = cmp_dbs(con1, con2) + self.assertNotEqual( + cmp_result, PLANS_MISMATCH, + "mismatch in plans of select query on partitioned tables under the command: %s" + % dump_restore_cmd) + self.assertNotEqual( + cmp_result, CONTENTS_MISMATCH, + "mismatch in contents of partitioned tables under the command: %s" + % dump_restore_cmd) + + # compare enable_parent flag and callback function + config_params_query = """ + select partrel, enable_parent, init_callback from pathman_config_params + """ + config_params_initial, config_params_copy = {}, {} + for row in con1.execute(config_params_query): + config_params_initial[row[0]] = row[1:] + for row in con2.execute(config_params_query): + config_params_copy[row[0]] = row[1:] + self.assertEqual(config_params_initial, config_params_copy, \ + "mismatch in pathman_config_params under the command: %s" % dump_restore_cmd) + + # compare constraints on each partition + constraints_query = """ + select r.relname, c.conname, c.consrc from + pg_constraint c join pg_class r on c.conrelid=r.oid + where relname similar to '(range|hash)_partitioned_\d+' + """ + constraints_initial, constraints_copy = {}, {} + for row in con1.execute(constraints_query): + constraints_initial[row[0]] = row[1:] + for row in con2.execute(constraints_query): + constraints_copy[row[0]] = row[1:] + self.assertEqual(constraints_initial, constraints_copy, \ + "mismatch in partitions' constraints under the command: %s" % dump_restore_cmd) + + # clear copy database + node.psql('copy', 'drop schema public cascade') + node.psql('copy', 'create schema public') + node.psql('copy', 'drop extension pg_pathman cascade') def test_concurrent_detach(self): """ @@ -1141,73 +1088,66 @@ def test_concurrent_detach(self): self.assertTrue( os.path.isfile(insert_pgbench_script), msg="pgbench script with insert timestamp doesn't exist") + self.assertTrue( os.path.isfile(detach_pgbench_script), msg="pgbench script with detach letfmost partition doesn't exist") # Create and start new instance - node = self.start_new_pathman_cluster(allows_streaming=False) - - # Create partitioned table for testing that spawns new partition on each next *detach_timeout* sec - with node.connect() as con0: - con0.begin() - con0.execute( - 'create table ts_range_partitioned(ts timestamp not null)') - - # yapf: disable - con0.execute(""" - select create_range_partitions('ts_range_partitioned', - 'ts', - current_timestamp, - interval '%f', - 1) - """ % detach_timeout) - con0.commit() - - # Run in background inserts and detachs processes - FNULL = open(os.devnull, 'w') - - # init pgbench's utility tables - init_pgbench = node.pgbench(stdout=FNULL, stderr=FNULL, options=["-i"]) - init_pgbench.wait() - - inserts = node.pgbench( - stdout=FNULL, - stderr=subprocess.PIPE, - options=[ - "-j", - "%i" % num_insert_workers, "-c", - "%i" % num_insert_workers, "-f", insert_pgbench_script, "-T", - "%i" % (test_interval + inserts_advance) - ]) - time.sleep(inserts_advance) - detachs = node.pgbench( - stdout=FNULL, - stderr=FNULL, - options=[ - "-D", - "timeout=%f" % detach_timeout, "-f", detach_pgbench_script, - "-T", - "%i" % test_interval - ]) - - # Wait for completion of processes - _, stderrdata = inserts.communicate() - detachs.wait() - - # Obtain error log from inserts process - self.assertIsNone( - re.search("ERROR|FATAL|PANIC", str(stderrdata)), - msg=""" - Race condition between detach and concurrent - inserts with append partition is expired - """) - - # Stop instance and finish work - node.stop() - node.cleanup() - FNULL.close() - + with self.start_new_pathman_cluster(allow_streaming=False) as node: + # Create partitioned table for testing that spawns new partition on each next *detach_timeout* sec + with node.connect() as con0: + con0.begin() + con0.execute( + 'create table ts_range_partitioned(ts timestamp not null)') + + # yapf: disable + con0.execute(""" + select create_range_partitions('ts_range_partitioned', + 'ts', + current_timestamp, + interval '%f', + 1) + """ % detach_timeout) + con0.commit() + + # Run in background inserts and detachs processes + with open(os.devnull, 'w') as fnull: + # init pgbench's utility tables + init_pgbench = node.pgbench(stdout=fnull, stderr=fnull, options=["-i"]) + init_pgbench.wait() + + inserts = node.pgbench( + stdout=fnull, + stderr=subprocess.PIPE, + options=[ + "-j", + "%i" % num_insert_workers, "-c", + "%i" % num_insert_workers, "-f", insert_pgbench_script, "-T", + "%i" % (test_interval + inserts_advance) + ]) + time.sleep(inserts_advance) + detachs = node.pgbench( + stdout=fnull, + stderr=fnull, + options=[ + "-D", + "timeout=%f" % detach_timeout, "-f", detach_pgbench_script, + "-T", + "%i" % test_interval + ]) + + # Wait for completion of processes + _, stderrdata = inserts.communicate() + detachs.wait() + + # Obtain error log from inserts process + self.assertIsNone( + re.search("ERROR|FATAL|PANIC", str(stderrdata)), + msg=""" + Race condition between detach and concurrent + inserts with append partition is expired + """) if __name__ == "__main__": unittest.main() From ce19675e7e935a2f8d2bed3d03cbd089d6d85e72 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Fri, 15 Sep 2017 16:50:55 +0300 Subject: [PATCH 144/528] Fix formatting of partitioning_test.py --- tests/python/partitioning_test.py | 68 +++++++++++++++++-------------- 1 file changed, 38 insertions(+), 30 deletions(-) diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 2f772041..1425149c 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -46,8 +46,10 @@ def wrapper(*args, **kwargs): class Tests(unittest.TestCase): - def start_new_pathman_cluster(self, name='test', - allow_streaming=False, test_data=False): + def start_new_pathman_cluster(self, + name='test', + allow_streaming=False, + test_data=False): node = get_new_node(name) node.init(allow_streaming=allow_streaming) node.append_conf("postgresql.conf", "shared_preload_libraries='pg_pathman'\n") @@ -79,8 +81,7 @@ def catchup_replica(self, master, replica): WHERE application_name = '{0}' """ - master.poll_query_until('postgres', - wait_lsn_query.format(replica.name)) + master.poll_query_until('postgres', wait_lsn_query.format(replica.name)) def test_concurrent(self): """ Test concurrent partitioning """ @@ -126,7 +127,7 @@ def test_replication(self): replica.psql('postgres', 'explain (costs off) select * from abc')) # enable parent and see if it is enabled in replica - node.psql('postgres', 'select enable_parent(\'abc\'') + node.psql('postgres', "select enable_parent('abc')") self.catchup_replica(node, replica) self.assertEqual( @@ -139,7 +140,8 @@ def test_replication(self): node.execute('postgres', 'select count(*) from abc')[0][0], 300000) # check that UPDATE in pathman_config_params invalidates cache - node.psql('postgres', 'update pathman_config_params set enable_parent = false') + node.psql('postgres', + 'update pathman_config_params set enable_parent = false') self.catchup_replica(node, replica) self.assertEqual( node.psql('postgres', 'explain (costs off) select * from abc'), @@ -147,7 +149,8 @@ def test_replication(self): self.assertEqual( node.psql('postgres', 'select * from abc'), replica.psql('postgres', 'select * from abc')) - self.assertEqual(node.execute('postgres', 'select count(*) from abc')[0][0], 0) + self.assertEqual( + node.execute('postgres', 'select count(*) from abc')[0][0], 0) def test_locks(self): """ @@ -197,15 +200,14 @@ def add_partition(node, flag, query): # Start transaction that will create partition with node.connect() as con: con.begin() - con.execute('select append_range_partition(\'abc\')') + con.execute("select append_range_partition('abc')") # Start threads that suppose to add new partitions and wait some # time query = ( "select prepend_range_partition('abc')", "select append_range_partition('abc')", - "select add_range_partition('abc', 500000, 550000)", - ) + "select add_range_partition('abc', 500000, 550000)", ) threads = [] for i in range(3): thread = threading.Thread( @@ -245,7 +247,7 @@ def test_tablespace(self): def check_tablespace(node, tablename, tablespace): res = node.execute('postgres', - 'select get_tablespace(\'{}\')'.format(tablename)) + "select get_tablespace('{}')".format(tablename)) if len(res) == 0: return False @@ -253,57 +255,63 @@ def check_tablespace(node, tablename, tablespace): with get_new_node('master') as node: node.init() - node.append_conf('postgresql.conf', 'shared_preload_libraries=\'pg_pathman\'\n') + node.append_conf('postgresql.conf', + "shared_preload_libraries='pg_pathman'\n") node.start() node.psql('postgres', 'create extension pg_pathman') # create tablespace path = os.path.join(node.data_dir, 'test_space_location') os.mkdir(path) - node.psql('postgres', 'create tablespace test_space location \'{}\''.format(path)) + node.psql('postgres', + "create tablespace test_space location '{}'".format(path)) # create table in this tablespace - node.psql('postgres', 'create table abc(a serial, b int) tablespace test_space') + node.psql('postgres', + 'create table abc(a serial, b int) tablespace test_space') # create three partitions. Excpect that they will be created in the # same tablespace as the parent table - node.psql('postgres', 'select create_range_partitions(\'abc\', \'a\', 1, 10, 3)') + node.psql('postgres', + "select create_range_partitions('abc', 'a', 1, 10, 3)") self.assertTrue(check_tablespace(node, 'abc', 'test_space')) # check tablespace for appended partition - node.psql('postgres', 'select append_range_partition(\'abc\', \'abc_appended\')') + node.psql('postgres', + "select append_range_partition('abc', 'abc_appended')") self.assertTrue(check_tablespace(node, 'abc_appended', 'test_space')) # check tablespace for prepended partition node.psql('postgres', - 'select prepend_range_partition(\'abc\', \'abc_prepended\')') + "select prepend_range_partition('abc', 'abc_prepended')") self.assertTrue(check_tablespace(node, 'abc_prepended', 'test_space')) # check tablespace for prepended partition node.psql('postgres', - 'select add_range_partition(\'abc\', 41, 51, \'abc_added\')') + "select add_range_partition('abc', 41, 51, 'abc_added')") self.assertTrue(check_tablespace(node, 'abc_added', 'test_space')) # check tablespace for split node.psql('postgres', - 'select split_range_partition(\'abc_added\', 45, \'abc_splitted\')') + "select split_range_partition('abc_added', 45, 'abc_splitted')") self.assertTrue(check_tablespace(node, 'abc_splitted', 'test_space')) # now let's specify tablespace explicitly node.psql( 'postgres', - 'select append_range_partition(\'abc\', \'abc_appended_2\', \'pg_default\')') + "select append_range_partition('abc', 'abc_appended_2', 'pg_default')" + ) node.psql( 'postgres', - 'select prepend_range_partition(\'abc\', \'abc_prepended_2\', \'pg_default\')' + "select prepend_range_partition('abc', 'abc_prepended_2', 'pg_default')" ) node.psql( 'postgres', - 'select add_range_partition(\'abc\', 61, 71, \'abc_added_2\', \'pg_default\')' + "select add_range_partition('abc', 61, 71, 'abc_added_2', 'pg_default')" ) node.psql( 'postgres', - 'select split_range_partition(\'abc_added_2\', 65, \'abc_splitted_2\', \'pg_default\')' + "select split_range_partition('abc_added_2', 65, 'abc_splitted_2', 'pg_default')" ) # yapf: disable @@ -372,13 +380,13 @@ def test_foreign_table(self): b'25|foreign\n') # Check that we can successfully insert new data into foreign partition - master.safe_psql('postgres', 'insert into abc values (26, \'part\')') + master.safe_psql('postgres', "insert into abc values (26, 'part')") self.assertEqual( master.safe_psql('postgres', 'select * from ftable order by id'), b'25|foreign\n26|part\n') # Testing drop partitions (including foreign partitions) - master.safe_psql('postgres', 'select drop_partitions(\'abc\')') + master.safe_psql('postgres', "select drop_partitions('abc')") # HASH partitioning with FDW: # - create hash partitioned table in master @@ -417,7 +425,7 @@ def test_parallel_nodes(self): node.init() node.append_conf( 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman, postgres_fdw\'\n') + "shared_preload_libraries='pg_pathman, postgres_fdw'\n") node.start() # Check version of postgres server @@ -468,7 +476,7 @@ def test_parallel_nodes(self): # Check parallel aggregate plan test_query = 'select count(*) from range_partitioned where i < 1500' - plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] + plan = con.execute("select query_plan('%s')" % test_query)[0][0] expected = json.loads(""" [ { @@ -532,7 +540,7 @@ def test_parallel_nodes(self): # Check simple parallel seq scan plan with limit test_query = 'select * from range_partitioned where i < 1500 limit 5' - plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] + plan = con.execute("select query_plan('%s')" % test_query)[0][0] expected = json.loads(""" [ { @@ -587,7 +595,7 @@ def test_parallel_nodes(self): # Check the case when none partition is selected in result plan test_query = 'select * from range_partitioned where i < 1' - plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] + plan = con.execute("select query_plan('%s')" % test_query)[0][0] expected = json.loads(""" [ { @@ -869,7 +877,7 @@ def test_pg_dump(self): with get_new_node('test') as node: node.init() node.append_conf('postgresql.conf', """ - shared_preload_libraries=\'pg_pathman\' + shared_preload_libraries='pg_pathman' pg_pathman.override_copy=false """) node.start() From bc4d271cd7d76e89278b3eaf09ab5a860e9577b9 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Fri, 15 Sep 2017 17:17:39 +0300 Subject: [PATCH 145/528] Fix include error --- src/pg_pathman.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 704328ba..bc957212 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -23,6 +23,7 @@ #include "postgres.h" #include "access/sysattr.h" #include "catalog/pg_type.h" +#include "compat/relation_tags.h" #include "foreign/fdwapi.h" #include "miscadmin.h" #include "optimizer/clauses.h" From 713c5f9164e142aae1f257558f2d18ed88af9a67 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Tue, 19 Sep 2017 14:10:32 +0300 Subject: [PATCH 146/528] Fix compability with pg10 (#123) --- src/include/compat/pg_compat.h | 11 ++++++++++- src/partition_filter.c | 4 +++- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 45f1a6c5..22a3d5ff 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -59,6 +59,16 @@ } while (0) #endif +/* + * CheckValidResultRel() + */ +#if PG_VERSION_NUM >= 100000 +#define CheckValidResultRelCompat(rri, cmd) \ + CheckValidResultRel((rri), (cmd)) +#elif PG_VERSION_NUM >= 90500 +#define CheckValidResultRelCompat(rri, cmd) \ + CheckValidResultRel((rri)->ri_RelationDesc, (cmd)) +#endif /* * BeginCopyFrom() @@ -590,7 +600,6 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, #endif - /* * ------------- * Common code diff --git a/src/partition_filter.c b/src/partition_filter.c index a8cbf5ea..214b926a 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -268,7 +268,6 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) /* Open relation and check if it is a valid target */ child_rel = heap_open(partid, NoLock); - CheckValidResultRel(child_rel, parts_storage->command_type); /* Build Var translation list for 'inserted_cols' */ make_inh_translation_list(parent_rel, child_rel, 0, &translated_vars); @@ -318,6 +317,9 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) /* ri_ConstraintExprs will be initialized by ExecRelCheck() */ child_result_rel_info->ri_ConstraintExprs = NULL; + /* Check that this partition is a valid result relation */ + CheckValidResultRelCompat(child_result_rel_info, parts_storage->command_type); + /* Fill the ResultRelInfo holder */ rri_holder->partid = partid; rri_holder->result_rel_info = child_result_rel_info; From d38e7c93b1bcddedeb2c435bb2a0c62288a805ad Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Wed, 20 Sep 2017 14:29:43 +0300 Subject: [PATCH 147/528] Fix segfault on DELETE .. USING with joins of partitioned tables --- expected/pathman_upd_del.out | 12 +++++++++++- sql/pathman_upd_del.sql | 6 +++++- src/hooks.c | 17 +++++++++++++++++ src/pg_pathman.c | 3 +++ 4 files changed, 36 insertions(+), 2 deletions(-) diff --git a/expected/pathman_upd_del.out b/expected/pathman_upd_del.out index 9f590a9f..21f284e6 100644 --- a/expected/pathman_upd_del.out +++ b/expected/pathman_upd_del.out @@ -13,6 +13,13 @@ SET enable_seqscan = OFF; /* Temporary table for JOINs */ CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); INSERT INTO test.tmp VALUES (1, 1), (2, 2); +CREATE TABLE test.tmp2 (id INTEGER NOT NULL, value INTEGER NOT NULL); +SELECT pathman.create_range_partitions('test.tmp2', 'id', 1, 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + /* Partition table by RANGE */ CREATE TABLE test.range_rel ( id SERIAL PRIMARY KEY, @@ -258,7 +265,10 @@ WITH q AS (DELETE FROM test.tmp t RETURNING *) DELETE FROM test.tmp USING q; ROLLBACK; +/* Test special rule for CTE; DELETE + USING with partitioned table */ +DELETE FROM test.range_rel r USING test.tmp2 t WHERE t.id = r.id; +ERROR: pg_pathman doesn't support DELETE queries with joining of partitioned tables DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 15 other objects +NOTICE: drop cascades to 27 other objects DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman CASCADE; diff --git a/sql/pathman_upd_del.sql b/sql/pathman_upd_del.sql index 16d7ebfd..aae6f466 100644 --- a/sql/pathman_upd_del.sql +++ b/sql/pathman_upd_del.sql @@ -21,6 +21,9 @@ SET enable_seqscan = OFF; CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); INSERT INTO test.tmp VALUES (1, 1), (2, 2); +CREATE TABLE test.tmp2 (id INTEGER NOT NULL, value INTEGER NOT NULL); +SELECT pathman.create_range_partitions('test.tmp2', 'id', 1, 1, 10); + /* Partition table by RANGE */ CREATE TABLE test.range_rel ( @@ -164,7 +167,8 @@ WITH q AS (DELETE FROM test.tmp t DELETE FROM test.tmp USING q; ROLLBACK; - +/* Test special rule for CTE; DELETE + USING with partitioned table */ +DELETE FROM test.range_rel r USING test.tmp2 t WHERE t.id = r.id; DROP SCHEMA test CASCADE; DROP EXTENSION pg_pathman CASCADE; diff --git a/src/hooks.c b/src/hooks.c index abe6face..e5234f4f 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -99,6 +99,23 @@ pathman_join_pathlist_hook(PlannerInfo *root, if (innerrel->reloptkind != RELOPT_BASEREL) return; + /* check if query DELETE FROM .. USING .. */ + if (root->parse->commandType == CMD_DELETE && jointype == JOIN_INNER) + { + int x = -1; + int count = 0; + + while ((x = bms_next_member(joinrel->relids, x)) >= 0) + if (get_pathman_relation_info(root->simple_rte_array[x]->relid)) + count += 1; + + if (count > 1) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("pg_pathman doesn't support DELETE queries with "\ + "joining of partitioned tables"))); + } + /* We shouldn't process tables with active children */ if (inner_rte->inh) return; diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 41090f3f..37a2d3f1 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -1916,6 +1916,9 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, childRTE = root->simple_rte_array[childRTindex]; childrel = root->simple_rel_array[childRTindex]; + if (!childrel) + elog(ERROR, "could not make access paths to a relation"); + #if PG_VERSION_NUM >= 90600 /* * If parallelism is allowable for this query in general and for parent From a17aeef48df6e19a4bbe16aab6d714a3fc0956e0 Mon Sep 17 00:00:00 2001 From: Sokolov Yura Date: Fri, 22 Sep 2017 17:40:09 +0300 Subject: [PATCH 148/528] Fix out of source build When included into postgresql contribs, and postgresql configured "out of source", pg_pathman could not build because it misses include path. Fix it by referring top_srcdir and subdir if build without USE_PGXS. --- Makefile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Makefile b/Makefile index 40738ddf..4a94480e 100644 --- a/Makefile +++ b/Makefile @@ -10,7 +10,11 @@ OBJS = src/init.o src/relation_info.o src/utils.o src/partition_filter.o \ src/compat/pg_compat.o src/compat/relation_tags.o src/compat/rowmarks_fix.o \ $(WIN32RES) +ifdef USE_PGXS override PG_CPPFLAGS += -I$(CURDIR)/src/include +else +override PG_CPPFLAGS += -I$(top_srcdir)/$(subdir)/src/include +endif EXTENSION = pg_pathman From 03058fb2b0666a4e003ca6e4b623ff70f09f1f40 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 22 Sep 2017 18:12:51 +0300 Subject: [PATCH 149/528] use pip instead of pip3 in run_tests.sh --- run_tests.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/run_tests.sh b/run_tests.sh index 6622ae39..49c481b9 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -39,8 +39,8 @@ fi virtualenv env export VIRTUAL_ENV_DISABLE_PROMPT=1 source env/bin/activate -pip3 install testgres -pip3 freeze | grep testgres +pip install testgres +pip freeze | grep testgres # don't forget to "make clean" make USE_PGXS=1 clean From 4d29720b50b3961bd372763696428572412d6e81 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 22 Sep 2017 20:16:58 +0300 Subject: [PATCH 150/528] improve checks for DELETE FROM part_table USING part_table --- expected/pathman_upd_del.out | 197 +++++++++++++++++++++++++---------- sql/pathman_upd_del.sql | 68 +++++++++++- src/hooks.c | 52 ++++++--- 3 files changed, 242 insertions(+), 75 deletions(-) diff --git a/expected/pathman_upd_del.out b/expected/pathman_upd_del.out index 21f284e6..147ee2e6 100644 --- a/expected/pathman_upd_del.out +++ b/expected/pathman_upd_del.out @@ -10,7 +10,7 @@ CREATE EXTENSION pg_pathman SCHEMA pathman; CREATE SCHEMA test; SET enable_indexscan = ON; SET enable_seqscan = OFF; -/* Temporary table for JOINs */ +/* Temporary tables for JOINs */ CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); INSERT INTO test.tmp VALUES (1, 1), (2, 2); CREATE TABLE test.tmp2 (id INTEGER NOT NULL, value INTEGER NOT NULL); @@ -35,6 +35,7 @@ SELECT pathman.create_range_partitions('test.range_rel', 'dt', 12 (1 row) +VACUUM ANALYZE; /* * Test UPDATE and DELETE */ @@ -111,16 +112,15 @@ ROLLBACK; EXPLAIN (COSTS OFF) UPDATE test.range_rel r SET value = t.value FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; - QUERY PLAN --------------------------------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------------- Update on range_rel_1 r - -> Hash Join - Hash Cond: (t.id = r.id) + -> Nested Loop + Join Filter: (r.id = t.id) + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) -> Seq Scan on tmp t - -> Hash - -> Index Scan using range_rel_1_pkey on range_rel_1 r - Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) -(7 rows) +(6 rows) BEGIN; UPDATE test.range_rel r SET value = t.value @@ -130,17 +130,16 @@ ROLLBACK; EXPLAIN (COSTS OFF) UPDATE test.tmp t SET value = r.value FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; - QUERY PLAN --------------------------------------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------------------- Update on tmp t - -> Hash Join - Hash Cond: (t.id = r.id) + -> Nested Loop -> Seq Scan on tmp t - -> Hash - -> Append - -> Index Scan using range_rel_1_pkey on range_rel_1 r - Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) -(8 rows) + -> Append + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) +(7 rows) BEGIN; UPDATE test.tmp t SET value = r.value @@ -150,16 +149,15 @@ ROLLBACK; EXPLAIN (COSTS OFF) DELETE FROM test.range_rel r USING test.tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id; - QUERY PLAN --------------------------------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------------- Delete on range_rel_1 r - -> Hash Join - Hash Cond: (t.id = r.id) + -> Nested Loop + Join Filter: (r.id = t.id) + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) -> Seq Scan on tmp t - -> Hash - -> Index Scan using range_rel_1_pkey on range_rel_1 r - Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) -(7 rows) +(6 rows) BEGIN; DELETE FROM test.range_rel r USING test.tmp t @@ -169,22 +167,118 @@ ROLLBACK; EXPLAIN (COSTS OFF) DELETE FROM test.tmp t USING test.range_rel r WHERE r.dt = '2010-01-02' AND r.id = t.id; - QUERY PLAN --------------------------------------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------------------- Delete on tmp t - -> Hash Join - Hash Cond: (t.id = r.id) + -> Nested Loop -> Seq Scan on tmp t - -> Hash - -> Append - -> Index Scan using range_rel_1_pkey on range_rel_1 r - Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) -(8 rows) + -> Append + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) +(7 rows) BEGIN; DELETE FROM test.tmp t USING test.range_rel r WHERE r.dt = '2010-01-02' AND r.id = t.id; ROLLBACK; +/* DELETE + USING, two partitioned tables */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* DELETE + USING, partitioned table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* DELETE + USING, single table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; + QUERY PLAN +------------------------------------------------------ + Delete on tmp r + -> Nested Loop + Join Filter: (a1.id = a2.id) + -> Append + -> Seq Scan on tmp2_1 a2 + -> Seq Scan on tmp2_2 a2_1 + -> Seq Scan on tmp2_3 a2_2 + -> Seq Scan on tmp2_4 a2_3 + -> Seq Scan on tmp2_5 a2_4 + -> Seq Scan on tmp2_6 a2_5 + -> Seq Scan on tmp2_7 a2_6 + -> Seq Scan on tmp2_8 a2_7 + -> Seq Scan on tmp2_9 a2_8 + -> Seq Scan on tmp2_10 a2_9 + -> Materialize + -> Nested Loop + -> Seq Scan on tmp r + -> Custom Scan (RuntimeAppend) + Prune by: (r.id = a1.id) + -> Seq Scan on tmp2_1 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_2 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_3 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_4 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_5 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_6 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_7 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_8 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_9 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_10 a1 + Filter: (r.id = id) +(39 rows) + +BEGIN; +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ROLLBACK; +/* UPDATE + FROM, two partitioned tables */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; /* Test special rule for CTE; SELECT (PostgreSQL 9.5) */ EXPLAIN (COSTS OFF) WITH q AS (SELECT * FROM test.range_rel r @@ -198,10 +292,9 @@ DELETE FROM test.tmp USING q; -> Seq Scan on range_rel_1 r Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) -> Nested Loop + -> Seq Scan on tmp -> CTE Scan on q - -> Materialize - -> Seq Scan on tmp -(9 rows) +(8 rows) BEGIN; WITH q AS (SELECT * FROM test.range_rel r @@ -222,10 +315,9 @@ DELETE FROM test.tmp USING q; -> Seq Scan on range_rel_1 r Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) -> Nested Loop + -> Seq Scan on tmp -> CTE Scan on q - -> Materialize - -> Seq Scan on tmp -(9 rows) +(8 rows) BEGIN; WITH q AS (DELETE FROM test.range_rel r @@ -240,23 +332,21 @@ WITH q AS (DELETE FROM test.tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id RETURNING *) DELETE FROM test.tmp USING q; - QUERY PLAN ----------------------------------------------------------------------------------------------------------- + QUERY PLAN +---------------------------------------------------------------------------------------------------- Delete on tmp CTE q -> Delete on tmp t - -> Hash Join - Hash Cond: (t.id = r.id) + -> Nested Loop -> Seq Scan on tmp t - -> Hash - -> Append - -> Index Scan using range_rel_1_pkey on range_rel_1 r - Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Append + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) -> Nested Loop + -> Seq Scan on tmp -> CTE Scan on q - -> Materialize - -> Seq Scan on tmp -(14 rows) +(12 rows) BEGIN; WITH q AS (DELETE FROM test.tmp t @@ -265,9 +355,6 @@ WITH q AS (DELETE FROM test.tmp t RETURNING *) DELETE FROM test.tmp USING q; ROLLBACK; -/* Test special rule for CTE; DELETE + USING with partitioned table */ -DELETE FROM test.range_rel r USING test.tmp2 t WHERE t.id = r.id; -ERROR: pg_pathman doesn't support DELETE queries with joining of partitioned tables DROP SCHEMA test CASCADE; NOTICE: drop cascades to 27 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_upd_del.sql b/sql/pathman_upd_del.sql index aae6f466..bc51f815 100644 --- a/sql/pathman_upd_del.sql +++ b/sql/pathman_upd_del.sql @@ -17,7 +17,7 @@ SET enable_indexscan = ON; SET enable_seqscan = OFF; -/* Temporary table for JOINs */ +/* Temporary tables for JOINs */ CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); INSERT INTO test.tmp VALUES (1, 1), (2, 2); @@ -39,6 +39,9 @@ SELECT pathman.create_range_partitions('test.range_rel', 'dt', 12); +VACUUM ANALYZE; + + /* * Test UPDATE and DELETE */ @@ -123,6 +126,66 @@ WHERE r.dt = '2010-01-02' AND r.id = t.id; ROLLBACK; +/* DELETE + USING, two partitioned tables */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; + +BEGIN; +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ROLLBACK; + + +/* DELETE + USING, partitioned table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; + +BEGIN; +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ROLLBACK; + + +/* DELETE + USING, single table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; + +BEGIN; +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ROLLBACK; + + +/* UPDATE + FROM, two partitioned tables */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; + +BEGIN; +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ROLLBACK; + + /* Test special rule for CTE; SELECT (PostgreSQL 9.5) */ EXPLAIN (COSTS OFF) WITH q AS (SELECT * FROM test.range_rel r @@ -167,8 +230,7 @@ WITH q AS (DELETE FROM test.tmp t DELETE FROM test.tmp USING q; ROLLBACK; -/* Test special rule for CTE; DELETE + USING with partitioned table */ -DELETE FROM test.range_rel r USING test.tmp2 t WHERE t.id = r.id; + DROP SCHEMA test CASCADE; DROP EXTENSION pg_pathman CASCADE; diff --git a/src/hooks.c b/src/hooks.c index e5234f4f..e1f34b8b 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -99,23 +99,6 @@ pathman_join_pathlist_hook(PlannerInfo *root, if (innerrel->reloptkind != RELOPT_BASEREL) return; - /* check if query DELETE FROM .. USING .. */ - if (root->parse->commandType == CMD_DELETE && jointype == JOIN_INNER) - { - int x = -1; - int count = 0; - - while ((x = bms_next_member(joinrel->relids, x)) >= 0) - if (get_pathman_relation_info(root->simple_rte_array[x]->relid)) - count += 1; - - if (count > 1) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("pg_pathman doesn't support DELETE queries with "\ - "joining of partitioned tables"))); - } - /* We shouldn't process tables with active children */ if (inner_rte->inh) return; @@ -129,6 +112,41 @@ pathman_join_pathlist_hook(PlannerInfo *root, !(inner_prel = get_pathman_relation_info(inner_rte->relid))) return; + /* + * Check if query is: + * 1) UPDATE part_table SET = .. FROM part_table. + * 2) DELETE FROM part_table USING part_table. + * + * Either outerrel or innerrel may be a result relation. + */ + if ((root->parse->resultRelation == outerrel->relid || + root->parse->resultRelation == innerrel->relid) && + (root->parse->commandType == CMD_UPDATE || + root->parse->commandType == CMD_DELETE)) + { + int rti = -1, + count = 0; + + /* Inner relation must be partitioned */ + Assert(inner_prel); + + /* Check each base rel of outer relation */ + while ((rti = bms_next_member(outerrel->relids, rti)) >= 0) + { + Oid outer_baserel = root->simple_rte_array[rti]->relid; + + /* Is it partitioned? */ + if (get_pathman_relation_info(outer_baserel)) + count++; + } + + if (count > 0) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("DELETE and UPDATE queries with a join " + "of partitioned tables are not supported"))); + } + /* Skip if inner table is not allowed to act as parent (e.g. FROM ONLY) */ if (PARENTHOOD_DISALLOWED == get_rel_parenthood_status(root->parse->queryId, inner_rte)) From a40fc5aa6acdc652772cb2ffe8a6b4a5a3d1501b Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 22 Sep 2017 20:21:41 +0300 Subject: [PATCH 151/528] use pip instead of pip3 in pg-travis-test.sh --- travis/pg-travis-test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/travis/pg-travis-test.sh b/travis/pg-travis-test.sh index 5c0ec44e..bdae1541 100755 --- a/travis/pg-travis-test.sh +++ b/travis/pg-travis-test.sh @@ -97,7 +97,7 @@ virtualenv /tmp/envs/pg_pathman source /tmp/envs/pg_pathman/bin/activate # install pip packages -pip3 install $pip_packages +pip install $pip_packages # run python tests make USE_PGXS=1 PG_CONFIG=$config_path python_tests || status=$? From 64ebf7f690629c14a27ec11145562113187f0fca Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 22 Sep 2017 20:29:12 +0300 Subject: [PATCH 152/528] use python instead of python3 in tests/python/Makefile --- tests/python/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/python/Makefile b/tests/python/Makefile index cb2bc50d..bb548928 100644 --- a/tests/python/Makefile +++ b/tests/python/Makefile @@ -1,2 +1,2 @@ partitioning_tests: - python3 -m unittest partitioning_test.py + python -m unittest partitioning_test.py From 4bdc79290086cf51a866846a6685f4deb0f43c36 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Mon, 25 Sep 2017 12:29:03 +0300 Subject: [PATCH 153/528] Update make_images.py --- make_images.py | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/make_images.py b/make_images.py index 4de7d40e..9c9b6e43 100755 --- a/make_images.py +++ b/make_images.py @@ -15,30 +15,33 @@ ''' How to create this patch: - 1) put `import ipdb; ipdb.set_trace()` in make_alpine_image, after `open(patch_name)..` - 2) run the script - 3) in temporary folder run `cp Dockerfile Dockerfile.1 && vim Dockerfile.1 && diff -Naur Dockerfile Dockerfile.1 > ./cassert.patch` - 4) contents of cassert.patch put to variable below - 5) change Dockerfile.1 to Dockerfile in text, change `\` symbols to `\\` + * put `import ipdb; ipdb.set_trace()` in make_alpine_image, just before `open(patch_name)..` + * run the script + * in temporary folder run `cp Dockerfile Dockerfile.1 && vim Dockerfile.1` + * uncomment --enable-debug, add --enable-cassert, add `CFLAGS="-g3 -O0"` before ./configure + * run `diff -Naur Dockerfile Dockerfile.1 > ./cassert.patch` + * contents of cassert.patch put to variable below + * change Dockerfile.1 to Dockerfile in text, change `\` symbols to `\\` ''' ALPINE_PATCH = b''' ---- Dockerfile 2017-07-27 14:54:10.403971867 +0300 -+++ Dockerfile 2017-07-27 14:56:01.132503106 +0300 -@@ -79,7 +79,7 @@ +--- Dockerfile 2017-09-25 12:01:24.597813507 +0300 ++++ Dockerfile 2017-09-25 12:09:06.104059704 +0300 +@@ -79,15 +79,15 @@ && wget -O config/config.sub 'https://git.savannah.gnu.org/cgit/config.git/plain/config.sub?id=7d3d27baf8107b630586c962c057e22149653deb' \\ # configure options taken from: # https://anonscm.debian.org/cgit/pkg-postgresql/postgresql.git/tree/debian/rules?h=9.5 - && ./configure \\ -+ && CFLAGS="-O0" ./configure \\ ++ && CFLAGS="-g3 -O0" ./configure \\ --build="$gnuArch" \\ # "/usr/src/postgresql/src/backend/access/common/tupconvert.c:105: undefined reference to `libintl_gettext'" # --enable-nls \\ -@@ -87,7 +87,7 @@ + --enable-integer-datetimes \\ --enable-thread-safety \\ --enable-tap-tests \\ - # skip debugging info -- we want tiny size instead +-# skip debugging info -- we want tiny size instead -# --enable-debug \\ + --enable-debug \\ ++ --enable-cassert \\ --disable-rpath \\ --with-uuid=e2fs \\ --with-gnu-ld \\ From aee0875c1bff6cc893a01f433d70214148a7bfdd Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Mon, 25 Sep 2017 17:31:01 +0300 Subject: [PATCH 154/528] Deny split and merge for subpartitions --- expected/pathman_subpartitions.out | 40 +++++++++++++++++++++++++++++- range.sql | 23 +++++++++++------ sql/pathman_subpartitions.sql | 8 ++++++ src/init.c | 8 +++--- src/pl_range_funcs.c | 15 +++++++++-- 5 files changed, 80 insertions(+), 14 deletions(-) diff --git a/expected/pathman_subpartitions.out b/expected/pathman_subpartitions.out index 54e93e9e..36bf0919 100644 --- a/expected/pathman_subpartitions.out +++ b/expected/pathman_subpartitions.out @@ -300,8 +300,46 @@ SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should create subpartiti subpartitions.abc_2_3 | 125 | 125 (10 rows) +/* split_range_partition */ +SELECT split_range_partition('subpartitions.abc_2', 150); +ERROR: could not split partition if it has children +SELECT split_range_partition('subpartitions.abc_2_2', 75); + split_range_partition +----------------------- + {50,100} +(1 row) + +SELECT subpartitions.partitions_tree('subpartitions.abc'); + partitions_tree +-------------------------- + subpartitions.abc + subpartitions.abc_1 + subpartitions.abc_1_1 + subpartitions.abc_1_2 + subpartitions.abc_2 + subpartitions.abc_2_1 + subpartitions.abc_2_2 + subpartitions.abc_2_4 + subpartitions.abc_2_3 +(9 rows) + +/* merge_range_partitions */ +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); + append_range_partition +------------------------ + subpartitions.abc_3 +(1 row) + +select merge_range_partitions('subpartitions.abc_2', 'subpartitions.abc_3'); +ERROR: cannot merge partitions +select merge_range_partitions('subpartitions.abc_2_1', 'subpartitions.abc_2_2'); + merge_range_partitions +------------------------ + +(1 row) + DROP TABLE subpartitions.abc CASCADE; -NOTICE: drop cascades to 10 other objects +NOTICE: drop cascades to 11 other objects DROP SCHEMA subpartitions CASCADE; NOTICE: drop cascades to function subpartitions.partitions_tree(regclass,text) DROP EXTENSION pg_pathman; diff --git a/range.sql b/range.sql index 67cf3d7a..c03a04d5 100644 --- a/range.sql +++ b/range.sql @@ -313,13 +313,14 @@ CREATE OR REPLACE FUNCTION @extschema@.split_range_partition( OUT p_range ANYARRAY) RETURNS ANYARRAY AS $$ DECLARE - parent_relid REGCLASS; - part_type INTEGER; - part_expr TEXT; - part_expr_type REGTYPE; - check_name TEXT; - check_cond TEXT; - new_partition TEXT; + parent_relid REGCLASS; + inhparent REGCLASS; + part_type INTEGER; + part_expr TEXT; + part_expr_type REGTYPE; + check_name TEXT; + check_cond TEXT; + new_partition TEXT; BEGIN parent_relid = @extschema@.get_parent_of_partition(partition_relid); @@ -327,6 +328,14 @@ BEGIN PERFORM @extschema@.validate_relname(parent_relid); PERFORM @extschema@.validate_relname(partition_relid); + EXECUTE format('SELECT inhparent::REGCLASS FROM pg_inherits WHERE inhparent = $1 LIMIT 1') + USING partition_relid + INTO inhparent; + + if inhparent IS NOT NULL THEN + RAISE EXCEPTION 'could not split partition if it has children'; + END IF; + /* Acquire lock on parent */ PERFORM @extschema@.prevent_part_modification(parent_relid); diff --git a/sql/pathman_subpartitions.sql b/sql/pathman_subpartitions.sql index aefe728d..e5ed87c0 100644 --- a/sql/pathman_subpartitions.sql +++ b/sql/pathman_subpartitions.sql @@ -97,7 +97,15 @@ SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitio UPDATE subpartitions.abc SET b = 125 WHERE a = 125 and b = 75; SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should create subpartitions.abc_2_3 */ +/* split_range_partition */ +SELECT split_range_partition('subpartitions.abc_2', 150); +SELECT split_range_partition('subpartitions.abc_2_2', 75); +SELECT subpartitions.partitions_tree('subpartitions.abc'); +/* merge_range_partitions */ +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); +select merge_range_partitions('subpartitions.abc_2', 'subpartitions.abc_3'); +select merge_range_partitions('subpartitions.abc_2_1', 'subpartitions.abc_2_2'); DROP TABLE subpartitions.abc CASCADE; DROP SCHEMA subpartitions CASCADE; diff --git a/src/init.c b/src/init.c index 13487f7e..c556c485 100644 --- a/src/init.c +++ b/src/init.c @@ -406,7 +406,7 @@ fini_local_cache(void) * find_inheritance_children * * Returns an array containing the OIDs of all relations which - * inherit *directly* from the relation with OID 'parentrelId'. + * inherit *directly* from the relation with OID 'parent_relid'. * * The specified lock type is acquired on each child relation (but not on the * given rel; caller should already have locked it). If lockmode is NoLock @@ -416,7 +416,7 @@ fini_local_cache(void) * borrowed from pg_inherits.c */ find_children_status -find_inheritance_children_array(Oid parentrelId, +find_inheritance_children_array(Oid parent_relid, LOCKMODE lockmode, bool nowait, uint32 *children_size, /* ret value #1 */ @@ -444,7 +444,7 @@ find_inheritance_children_array(Oid parentrelId, * Can skip the scan if pg_class shows the * relation has never had a subclass. */ - if (!has_subclass(parentrelId)) + if (!has_subclass(parent_relid)) return FCS_NO_CHILDREN; /* @@ -459,7 +459,7 @@ find_inheritance_children_array(Oid parentrelId, ScanKeyInit(&key[0], Anum_pg_inherits_inhparent, BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(parentrelId)); + ObjectIdGetDatum(parent_relid)); scan = systable_beginscan(relation, InheritsParentIndexId, true, NULL, 1, key); diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 716c5d17..805af65c 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -16,9 +16,10 @@ #include "xact_handling.h" #include "access/xact.h" +#include "catalog/heap.h" #include "catalog/namespace.h" +#include "catalog/pg_inherits_fn.h" #include "catalog/pg_type.h" -#include "catalog/heap.h" #include "commands/tablecmds.h" #include "executor/spi.h" #include "nodes/nodeFuncs.h" @@ -636,7 +637,17 @@ merge_range_partitions(PG_FUNCTION_ARGS) /* Extract partition Oids from array */ partitions = palloc(sizeof(Oid) * nparts); for (i = 0; i < nparts; i++) - partitions[i] = DatumGetObjectId(datums[i]); + { + Oid partition_relid; + partition_relid = DatumGetObjectId(datums[i]); + + /* check that is not has subpartitions */ + if (has_subclass(partition_relid)) + ereport(ERROR, (errmsg("cannot merge partitions"), + errdetail("at least one of specified partitions has children"))); + + partitions[i] = partition_relid; + } if (nparts < 2) ereport(ERROR, (errmsg("cannot merge partitions"), From 533b00b7db7cfaf1bafdfd409b3cf8b548912f6f Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 26 Sep 2017 14:27:02 +0300 Subject: [PATCH 155/528] show alias definitions for python and pip --- travis/pg-travis-test.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/travis/pg-travis-test.sh b/travis/pg-travis-test.sh index bdae1541..db1feb3e 100755 --- a/travis/pg-travis-test.sh +++ b/travis/pg-travis-test.sh @@ -95,6 +95,8 @@ set +u # create virtual environment and activate it virtualenv /tmp/envs/pg_pathman source /tmp/envs/pg_pathman/bin/activate +type python +type pip # install pip packages pip install $pip_packages From 9949eeee35018fd3b00a7c335373174c0c572f4a Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 26 Sep 2017 15:02:22 +0300 Subject: [PATCH 156/528] use python3 for tests --- travis/pg-travis-test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/travis/pg-travis-test.sh b/travis/pg-travis-test.sh index db1feb3e..97fa5ea9 100755 --- a/travis/pg-travis-test.sh +++ b/travis/pg-travis-test.sh @@ -93,7 +93,7 @@ if test -f regression.diffs; then cat regression.diffs; fi set +u # create virtual environment and activate it -virtualenv /tmp/envs/pg_pathman +virtualenv /tmp/envs/pg_pathman --python=python3 source /tmp/envs/pg_pathman/bin/activate type python type pip From ef1e4440b3e6451222a4121d8216a869e5baae7b Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 26 Sep 2017 16:19:35 +0300 Subject: [PATCH 157/528] fix upd_del tests for 9.5 --- expected/pathman_upd_del_1.out | 205 +++++++++++++++++++++++---------- 1 file changed, 143 insertions(+), 62 deletions(-) diff --git a/expected/pathman_upd_del_1.out b/expected/pathman_upd_del_1.out index a019285b..a1eeda7f 100644 --- a/expected/pathman_upd_del_1.out +++ b/expected/pathman_upd_del_1.out @@ -10,9 +10,16 @@ CREATE EXTENSION pg_pathman SCHEMA pathman; CREATE SCHEMA test; SET enable_indexscan = ON; SET enable_seqscan = OFF; -/* Temporary table for JOINs */ +/* Temporary tables for JOINs */ CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); INSERT INTO test.tmp VALUES (1, 1), (2, 2); +CREATE TABLE test.tmp2 (id INTEGER NOT NULL, value INTEGER NOT NULL); +SELECT pathman.create_range_partitions('test.tmp2', 'id', 1, 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + /* Partition table by RANGE */ CREATE TABLE test.range_rel ( id SERIAL PRIMARY KEY, @@ -28,6 +35,7 @@ SELECT pathman.create_range_partitions('test.range_rel', 'dt', 12 (1 row) +VACUUM ANALYZE; /* * Test UPDATE and DELETE */ @@ -104,16 +112,15 @@ ROLLBACK; EXPLAIN (COSTS OFF) UPDATE test.range_rel r SET value = t.value FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; - QUERY PLAN --------------------------------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------------- Update on range_rel_1 r - -> Hash Join - Hash Cond: (t.id = r.id) + -> Nested Loop + Join Filter: (r.id = t.id) + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) -> Seq Scan on tmp t - -> Hash - -> Index Scan using range_rel_1_pkey on range_rel_1 r - Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) -(7 rows) +(6 rows) BEGIN; UPDATE test.range_rel r SET value = t.value @@ -123,19 +130,21 @@ ROLLBACK; EXPLAIN (COSTS OFF) UPDATE test.tmp t SET value = r.value FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; - QUERY PLAN --------------------------------------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------------------- Update on tmp t - -> Hash Join - Hash Cond: (t.id = r.id) - -> Seq Scan on tmp t - -> Hash - -> Append - -> Index Scan using range_rel_pkey on range_rel r - Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) - -> Index Scan using range_rel_1_pkey on range_rel_1 r_1 - Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) -(10 rows) + -> Merge Join + Merge Cond: (r.id = t.id) + -> Merge Append + Sort Key: r.id + -> Index Scan using range_rel_pkey on range_rel r + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) + -> Index Scan using range_rel_1_pkey on range_rel_1 r_1 + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) + -> Sort + Sort Key: t.id + -> Seq Scan on tmp t +(12 rows) BEGIN; UPDATE test.tmp t SET value = r.value @@ -145,16 +154,15 @@ ROLLBACK; EXPLAIN (COSTS OFF) DELETE FROM test.range_rel r USING test.tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id; - QUERY PLAN --------------------------------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------------- Delete on range_rel_1 r - -> Hash Join - Hash Cond: (t.id = r.id) + -> Nested Loop + Join Filter: (r.id = t.id) + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) -> Seq Scan on tmp t - -> Hash - -> Index Scan using range_rel_1_pkey on range_rel_1 r - Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) -(7 rows) +(6 rows) BEGIN; DELETE FROM test.range_rel r USING test.tmp t @@ -164,24 +172,98 @@ ROLLBACK; EXPLAIN (COSTS OFF) DELETE FROM test.tmp t USING test.range_rel r WHERE r.dt = '2010-01-02' AND r.id = t.id; - QUERY PLAN --------------------------------------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------------------- Delete on tmp t - -> Hash Join - Hash Cond: (t.id = r.id) - -> Seq Scan on tmp t - -> Hash - -> Append - -> Index Scan using range_rel_pkey on range_rel r - Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) - -> Index Scan using range_rel_1_pkey on range_rel_1 r_1 - Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) -(10 rows) + -> Merge Join + Merge Cond: (r.id = t.id) + -> Merge Append + Sort Key: r.id + -> Index Scan using range_rel_pkey on range_rel r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Index Scan using range_rel_1_pkey on range_rel_1 r_1 + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Sort + Sort Key: t.id + -> Seq Scan on tmp t +(12 rows) BEGIN; DELETE FROM test.tmp t USING test.range_rel r WHERE r.dt = '2010-01-02' AND r.id = t.id; ROLLBACK; +/* DELETE + USING, two partitioned tables */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* DELETE + USING, partitioned table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* DELETE + USING, single table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; + QUERY PLAN +--------------------------------------------- + Delete on tmp r + -> Merge Join + Merge Cond: (a1.id = a2.id) + -> Merge Join + Merge Cond: (r.id = a1.id) + -> Sort + Sort Key: r.id + -> Seq Scan on tmp r + -> Sort + Sort Key: a1.id + -> Seq Scan on tmp2 a1 + -> Sort + Sort Key: a2.id + -> Seq Scan on tmp2 a2 +(14 rows) + +BEGIN; +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ROLLBACK; +/* UPDATE + FROM, two partitioned tables */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; /* Test special rule for CTE; SELECT (PostgreSQL 9.5) */ EXPLAIN (COSTS OFF) WITH q AS (SELECT * FROM test.range_rel r @@ -195,10 +277,9 @@ DELETE FROM test.tmp USING q; -> Seq Scan on range_rel_1 r Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) -> Nested Loop + -> Seq Scan on tmp -> CTE Scan on q - -> Materialize - -> Seq Scan on tmp -(9 rows) +(8 rows) BEGIN; WITH q AS (SELECT * FROM test.range_rel r @@ -219,10 +300,9 @@ DELETE FROM test.tmp USING q; -> Seq Scan on range_rel_1 r Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) -> Nested Loop + -> Seq Scan on tmp -> CTE Scan on q - -> Materialize - -> Seq Scan on tmp -(9 rows) +(8 rows) BEGIN; WITH q AS (DELETE FROM test.range_rel r @@ -237,25 +317,26 @@ WITH q AS (DELETE FROM test.tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id RETURNING *) DELETE FROM test.tmp USING q; - QUERY PLAN ----------------------------------------------------------------------------------------------------------- + QUERY PLAN +---------------------------------------------------------------------------------------------------- Delete on tmp CTE q -> Delete on tmp t - -> Hash Join - Hash Cond: (t.id = r.id) - -> Seq Scan on tmp t - -> Hash - -> Append - -> Index Scan using range_rel_pkey on range_rel r - Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) - -> Index Scan using range_rel_1_pkey on range_rel_1 r_1 - Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Merge Join + Merge Cond: (r.id = t.id) + -> Merge Append + Sort Key: r.id + -> Index Scan using range_rel_pkey on range_rel r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Index Scan using range_rel_1_pkey on range_rel_1 r_1 + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Sort + Sort Key: t.id + -> Seq Scan on tmp t -> Nested Loop + -> Seq Scan on tmp -> CTE Scan on q - -> Materialize - -> Seq Scan on tmp -(16 rows) +(17 rows) BEGIN; WITH q AS (DELETE FROM test.tmp t @@ -265,6 +346,6 @@ WITH q AS (DELETE FROM test.tmp t DELETE FROM test.tmp USING q; ROLLBACK; DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 15 other objects +NOTICE: drop cascades to 27 other objects DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman CASCADE; From cfcb53da159a8dfe1da0dc55719487d7524020eb Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 26 Sep 2017 17:47:35 +0300 Subject: [PATCH 158/528] use testgres==0.4.0 for master branch --- travis/pg-travis-test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/travis/pg-travis-test.sh b/travis/pg-travis-test.sh index 97fa5ea9..be0e645e 100755 --- a/travis/pg-travis-test.sh +++ b/travis/pg-travis-test.sh @@ -7,7 +7,7 @@ sudo apt-get update # required packages apt_packages="postgresql-$PG_VER postgresql-server-dev-$PG_VER postgresql-common python-pip python-dev build-essential" -pip_packages="testgres" +pip_packages="testgres==0.4.0" # exit code status=0 From 280c26a67125944e398bec7966c3a5c852a5f999 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Wed, 27 Sep 2017 16:29:10 +0300 Subject: [PATCH 159/528] Add more checks and fixes for subpartitions --- init.sql | 30 +++++++++++++ range.sql | 84 ++++++++++++++++++++++++++++++++-- sql/pathman_subpartitions.sql | 9 ++-- src/include/relation_info.h | 4 ++ src/pl_funcs.c | 44 ++++++++++++++++++ src/relation_info.c | 85 +++++++++++++++++++++++++++++++++++ 6 files changed, 250 insertions(+), 6 deletions(-) diff --git a/init.sql b/init.sql index 96c537d3..5dd808ec 100644 --- a/init.sql +++ b/init.sql @@ -862,6 +862,36 @@ CREATE OR REPLACE FUNCTION @extschema@.invoke_on_partition_created_callback( RETURNS VOID AS 'pg_pathman', 'invoke_on_partition_created_callback' LANGUAGE C; +/* + * Get parent of pg_pathman's partition. + */ +CREATE OR REPLACE FUNCTION @extschema@.is_equal_to_partitioning_expression( + parent_relid REGCLASS, + expression TEXT, + value_type OID) +RETURNS BOOL AS 'pg_pathman', 'is_equal_to_partitioning_expression_pl' +LANGUAGE C STRICT; + +/* + * Get lower bound of a partitioned relation + * bound_value is used to determine the type of bound + */ +CREATE OR REPLACE FUNCTION @extschema@.get_lower_bound( + relid REGCLASS, + bound_value ANYELEMENT +) +RETURNS ANYELEMENT AS 'pg_pathman', 'get_lower_bound_pl' +LANGUAGE C STRICT; + +/* + * Get upper bound of a partition + */ +CREATE OR REPLACE FUNCTION @extschema@.get_upper_bound( + relid REGCLASS, + bound_value ANYELEMENT +) +RETURNS ANYELEMENT AS 'pg_pathman', 'get_upper_bound_pl' +LANGUAGE C STRICT; /* * DEBUG: Place this inside some plpgsql fuction and set breakpoint. diff --git a/range.sql b/range.sql index c03a04d5..77b6c4de 100644 --- a/range.sql +++ b/range.sql @@ -158,24 +158,47 @@ CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( partition_data BOOLEAN DEFAULT TRUE) RETURNS INTEGER AS $$ DECLARE + relid REGCLASS; rows_count BIGINT; max_value start_value%TYPE; cur_value start_value%TYPE := start_value; end_value start_value%TYPE; + lower_bound start_value%TYPE = NULL; + upper_bound start_value%TYPE = NULL; part_count INTEGER := 0; i INTEGER; - + part_type INT4; BEGIN PERFORM @extschema@.prepare_for_partitioning(parent_relid, expression, partition_data); + /* + * Check that we're trying to make subpartitions. + * If expressions are same then we set and use upper bound. + * We change start_value if it's greater than lower bound. + */ + relid := @extschema@.get_parent_of_partition(parent_relid, false); + IF relid IS NOT NULL THEN + part_type := get_partition_type(relid); + IF (part_type = 2) AND @extschema@.is_equal_to_partitioning_expression( + relid, expression, pg_typeof(start_value)) + THEN + lower_bound := @extschema@.get_lower_bound(parent_relid, start_value); + upper_bound := @extschema@.get_upper_bound(parent_relid, start_value); + IF lower_bound != start_value THEN + start_value := lower_bound; + RAISE NOTICE '"start_value" was set to %', start_value; + END IF; + END IF; + END IF; + IF p_count < 0 THEN RAISE EXCEPTION 'partitions count must not be less than zero'; END IF; /* Try to determine partitions count if not set */ - IF p_count IS NULL THEN + IF p_count IS NULL OR (relid IS NOT NULL AND p_count = 0) THEN EXECUTE format('SELECT count(*), max(%s) FROM %s', expression, parent_relid) INTO rows_count, max_value; @@ -189,6 +212,7 @@ BEGIN p_count := 0; WHILE cur_value <= max_value + OR (upper_bound IS NOT NULL AND cur_value < upper_bound) LOOP cur_value := cur_value + p_interval; p_count := p_count + 1; @@ -205,6 +229,20 @@ BEGIN FOR i IN 1..p_count LOOP end_value := end_value + p_interval; + IF upper_bound IS NOT NULL AND end_value >= upper_bound THEN + part_count := i; + IF end_value > upper_bound THEN + RAISE WARNING '"p_interval" is not multiple of range (%, %)', + start_value, end_value; + END IF; + IF p_count != part_count THEN + p_count := part_count; + RAISE NOTICE '"p_count" was set %', p_count; + END IF; + + /* we got our partitions count */ + EXIT; + END IF; END LOOP; /* check boundaries */ @@ -460,6 +498,26 @@ BEGIN END $$ LANGUAGE plpgsql; + +/* + * NOTE: we need this function just to determine the type + * of "upper_bound" var + */ +CREATE OR REPLACE FUNCTION @extschema@.check_against_upper_bound_internal( + relid REGCLASS, + bound_value ANYELEMENT, + error_message TEXT) +RETURNS VOID AS $$ +DECLARE + upper_bound bound_value%TYPE; +BEGIN + upper_bound := get_upper_bound(relid, bound_value); + IF bound_value > upper_bound THEN + RAISE EXCEPTION '%', error_message; + END IF; +END +$$ LANGUAGE plpgsql; + /* * Spawn logic for append_partition(). We have to * separate this in order to pass the 'p_range'. @@ -475,10 +533,12 @@ CREATE OR REPLACE FUNCTION @extschema@.append_partition_internal( tablespace TEXT DEFAULT NULL) RETURNS TEXT AS $$ DECLARE + relid REGCLASS; part_expr_type REGTYPE; part_name TEXT; v_args_format TEXT; - + part_expr TEXT; + part_type INTEGER; BEGIN IF @extschema@.get_number_of_partitions(parent_relid) = 0 THEN RAISE EXCEPTION 'cannot append to empty partitions set'; @@ -496,6 +556,24 @@ BEGIN RAISE EXCEPTION 'Cannot append partition because last partition''s range is half open'; END IF; + /* + * In case a user has used same expression on two levels, we need to check + * that we've not reached upper bound of higher partitioned table + */ + relid := @extschema@.get_parent_of_partition(parent_relid, false); + IF relid IS NOT NULL THEN + SELECT expr FROM @extschema@.pathman_config WHERE partrel = parent_relid + INTO part_expr; + + part_type := get_partition_type(relid); + IF (part_type = 2) AND @extschema@.is_equal_to_partitioning_expression( + relid, part_expr, part_expr_type) + THEN + PERFORM @extschema@.check_against_upper_bound_internal(parent_relid, + p_range[2], 'reached upper bound in the current level of subpartitions'); + END IF; + END IF; + IF @extschema@.is_date_type(p_atttype) THEN v_args_format := format('$1, $2, ($2 + $3::interval)::%s, $4, $5', part_expr_type::TEXT); ELSE diff --git a/sql/pathman_subpartitions.sql b/sql/pathman_subpartitions.sql index e5ed87c0..91abd021 100644 --- a/sql/pathman_subpartitions.sql +++ b/sql/pathman_subpartitions.sql @@ -82,8 +82,8 @@ SET pg_pathman.enable_partitionrouter = ON; CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); -SELECT create_range_partitions('subpartitions.abc_1', 'b', 0, 50, 2); -SELECT create_range_partitions('subpartitions.abc_2', 'b', 0, 50, 2); +SELECT create_range_partitions('subpartitions.abc_1', 'b', 0, 50, 2); /* 0 - 100 */ +SELECT create_range_partitions('subpartitions.abc_2', 'b', 0, 50, 2); /* 100 - 200 */ INSERT INTO subpartitions.abc SELECT 25, 25 FROM generate_series(1, 10); SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitions.abc_1_1 */ @@ -103,10 +103,13 @@ SELECT split_range_partition('subpartitions.abc_2_2', 75); SELECT subpartitions.partitions_tree('subpartitions.abc'); /* merge_range_partitions */ -SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); /* 200 - 300 */ select merge_range_partitions('subpartitions.abc_2', 'subpartitions.abc_3'); select merge_range_partitions('subpartitions.abc_2_1', 'subpartitions.abc_2_2'); +/* create subpartitions but use same expression */ +SELECT create_range_partitions('subpartitions.abc_3', 'a', 150, 50, 2); + DROP TABLE subpartitions.abc CASCADE; DROP SCHEMA subpartitions CASCADE; DROP EXTENSION pg_pathman; diff --git a/src/include/relation_info.h b/src/include/relation_info.h index b5ac6877..0c5428ba 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -296,6 +296,8 @@ Datum cook_partitioning_expression(const Oid relid, char *canonicalize_partitioning_expression(const Oid relid, const char *expr_cstr); +bool is_equal_to_partitioning_expression(Oid relid, char *expression, + Oid value_type); /* Global invalidation routines */ void delay_pathman_shutdown(void); @@ -312,6 +314,8 @@ Oid get_parent_of_partition(Oid partition, PartParentSearch *status); void forget_bounds_of_partition(Oid partition); PartBoundInfo *get_bounds_of_partition(Oid partition, const PartRelationInfo *prel); +Datum get_lower_bound(Oid parent_relid, Oid value_type); +Datum get_upper_bound(Oid relid, Oid value_type); /* PartType wrappers */ diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 1b6cf9c2..d8e48540 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -72,6 +72,10 @@ PG_FUNCTION_INFO_V1( check_security_policy ); PG_FUNCTION_INFO_V1( debug_capture ); PG_FUNCTION_INFO_V1( pathman_version ); +PG_FUNCTION_INFO_V1( get_lower_bound_pl ); +PG_FUNCTION_INFO_V1( get_upper_bound_pl ); +PG_FUNCTION_INFO_V1( is_equal_to_partitioning_expression_pl ); + /* User context for function show_partition_list_internal() */ typedef struct @@ -145,6 +149,46 @@ get_parent_of_partition_pl(PG_FUNCTION_ARGS) PG_RETURN_NULL(); } +/* + * Get parent of a specified partition. + */ +Datum +is_equal_to_partitioning_expression_pl(PG_FUNCTION_ARGS) +{ + bool result; + Oid parent_relid = PG_GETARG_OID(0); + char *expr = TextDatumGetCString(PG_GETARG_TEXT_P(1)); + Oid value_type = PG_GETARG_OID(2); + + result = is_equal_to_partitioning_expression(parent_relid, expr, + value_type); + PG_RETURN_BOOL(result); +} + +/* + * Get min bound value for parent relation + */ +Datum +get_lower_bound_pl(PG_FUNCTION_ARGS) +{ + Oid relid = PG_GETARG_OID(0); + Oid value_type = get_fn_expr_argtype(fcinfo->flinfo, 1); + + PG_RETURN_POINTER(get_lower_bound(relid, value_type)); +} + +/* + * Get min bound value for parent relation + */ +Datum +get_upper_bound_pl(PG_FUNCTION_ARGS) +{ + Oid relid = PG_GETARG_OID(0); + Oid value_type = get_fn_expr_argtype(fcinfo->flinfo, 1); + + PG_RETURN_POINTER(get_upper_bound(relid, value_type)); +} + /* * Extract basic type of a domain. */ diff --git a/src/relation_info.c b/src/relation_info.c index 2e0ce598..0b29ac18 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -1040,6 +1040,91 @@ get_parent_of_partition(Oid partition, PartParentSearch *status) return get_parent_of_partition_internal(partition, status, HASH_FIND); } +/* Check that expression is equal to expression of some partitioned table */ +bool +is_equal_to_partitioning_expression(Oid relid, char *expression, + Oid value_type) +{ + const PartRelationInfo *prel; + char *cexpr; + Oid expr_type; + + /* + * Cook and get a canonicalized expression, + * we don't need a result of the cooking + */ + cook_partitioning_expression(relid, expression, &expr_type); + cexpr = canonicalize_partitioning_expression(relid, expression); + + prel = get_pathman_relation_info(relid); + + /* caller should have been check it already */ + Assert(prel != NULL); + + return (expr_type == value_type) && + (strcmp(cexpr, prel->expr_cstr) == 0); +} + +/* Get lower bound of a partition */ +Datum +get_lower_bound(Oid relid, Oid value_type) +{ + Oid parent_relid; + Datum result; + const PartRelationInfo *prel; + PartBoundInfo *pbin; + PartParentSearch parent_search; + + parent_relid = get_parent_of_partition(relid, &parent_search); + if (parent_search != PPS_ENTRY_PART_PARENT) + elog(ERROR, "relation \"%s\" is not a partition", + get_rel_name_or_relid(relid)); + + prel = get_pathman_relation_info(parent_relid); + Assert(prel && prel->parttype == PT_RANGE); + pbin = get_bounds_of_partition(relid, prel); + Assert(prel != NULL); + + if (IsInfinite(&pbin->range_min)) + return PointerGetDatum(NULL); + + result = BoundGetValue(&pbin->range_min); + if (value_type != prel->ev_type) + result = perform_type_cast(result, prel->ev_type, value_type, NULL); + + return result; +} + +/* Get upper bound of a partition */ +Datum +get_upper_bound(Oid relid, Oid value_type) +{ + Oid parent_relid; + Datum result; + const PartRelationInfo *prel; + PartBoundInfo *pbin; + PartParentSearch parent_search; + + parent_relid = get_parent_of_partition(relid, &parent_search); + if (parent_search != PPS_ENTRY_PART_PARENT) + elog(ERROR, "relation \"%s\" is not a partition", + get_rel_name_or_relid(relid)); + + prel = get_pathman_relation_info(parent_relid); + Assert(prel && prel->parttype == PT_RANGE); + pbin = get_bounds_of_partition(relid, prel); + Assert(prel != NULL); + + if (IsInfinite(&pbin->range_max)) + return PointerGetDatum(NULL); + + result = BoundGetValue(&pbin->range_max); + if (value_type != prel->ev_type) + result = perform_type_cast(result, prel->ev_type, value_type, NULL); + + return result; +} + /* * Get [and remove] "partition+parent" pair from cache, * also check syscache if 'status' is provided. From 89622778f2b9b5af4d5d707e4593eaf13ff7d9a0 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Mon, 2 Oct 2017 18:19:07 +0300 Subject: [PATCH 160/528] Add more subpartitions fixes and tests --- expected/pathman_basic.out | 2 +- expected/pathman_expressions.out | 4 +- expected/pathman_subpartitions.out | 149 +++++++++++++++++++++++++++- range.sql | 151 ++++++++++++++++++++++++----- sql/pathman_subpartitions.sql | 37 ++++++- src/relation_info.c | 2 +- 6 files changed, 312 insertions(+), 33 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index f809eebc..de3bf727 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -147,7 +147,7 @@ PL/pgSQL function pathman.prepare_for_partitioning(regclass,text,boolean) line 9 SQL statement "SELECT pathman.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function pathman.create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 12 at PERFORM +PL/pgSQL function pathman.create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 13 at PERFORM \set VERBOSITY terse ALTER TABLE test.range_rel ALTER COLUMN dt SET NOT NULL; SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL, 2); diff --git a/expected/pathman_expressions.out b/expected/pathman_expressions.out index c0f4b0e9..9e19d217 100644 --- a/expected/pathman_expressions.out +++ b/expected/pathman_expressions.out @@ -371,7 +371,7 @@ PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERF SQL statement "SELECT public.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 12 at PERFORM +PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 13 at PERFORM /* Try using mutable expression */ SELECT create_range_partitions('test_exprs.range_rel', 'RANDOM()', '15 years'::INTERVAL, '1 year'::INTERVAL, 10); @@ -382,7 +382,7 @@ PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERF SQL statement "SELECT public.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 12 at PERFORM +PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 13 at PERFORM /* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy SELECT * FROM test_exprs.canary WHERE val = 1; diff --git a/expected/pathman_subpartitions.out b/expected/pathman_subpartitions.out index 36bf0919..3a7d4706 100644 --- a/expected/pathman_subpartitions.out +++ b/expected/pathman_subpartitions.out @@ -224,13 +224,13 @@ SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); 2 (1 row) -SELECT create_range_partitions('subpartitions.abc_1', 'b', 0, 50, 2); +SELECT create_range_partitions('subpartitions.abc_1', 'b', 0, 50, 2); /* 0 - 100 */ create_range_partitions ------------------------- 2 (1 row) -SELECT create_range_partitions('subpartitions.abc_2', 'b', 0, 50, 2); +SELECT create_range_partitions('subpartitions.abc_2', 'b', 0, 50, 2); /* 100 - 200 */ create_range_partitions ------------------------- 2 @@ -324,7 +324,7 @@ SELECT subpartitions.partitions_tree('subpartitions.abc'); (9 rows) /* merge_range_partitions */ -SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); /* 200 - 300 */ append_range_partition ------------------------ subpartitions.abc_3 @@ -340,6 +340,149 @@ select merge_range_partitions('subpartitions.abc_2_1', 'subpartitions.abc_2_2'); DROP TABLE subpartitions.abc CASCADE; NOTICE: drop cascades to 11 other objects +/* subpartitions on same expressions */ +CREATE TABLE subpartitions.abc(a INTEGER NOT NULL); +INSERT INTO subpartitions.abc SELECT i FROM generate_series(1, 200, 20) as i; +SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +SELECT create_range_partitions('subpartitions.abc_1', 'a', 0, 11, 9); /* not multiple */ + create_range_partitions +------------------------- + 9 +(1 row) + +SELECT create_range_partitions('subpartitions.abc_2', 'a', 150, 11, 8); /* start_value should be lower */ +WARNING: "start_value" was set to 100 + create_range_partitions +------------------------- + 8 +(1 row) + +SELECT create_range_partitions('subpartitions.abc_3', 'a', 200, 11, 20); /* too big p_count */ +WARNING: "p_interval" is not multiple of range (200, 310) +NOTICE: "p_count" was limited to 10 + create_range_partitions +------------------------- + 10 +(1 row) + +SELECT create_range_partitions('subpartitions.abc_4', 'a', ARRAY[301, 350, 400]); /* bounds check */ +ERROR: Bounds should start from 300 +SELECT create_range_partitions('subpartitions.abc_4', 'a', ARRAY[300, 450, 500]); /* bounds check */ +ERROR: Lower bound of rightmost partition should be less than 400 +SELECT create_range_partitions('subpartitions.abc_4', 'a', ARRAY[300, 350, 450]); /* bounds check */ + create_range_partitions +------------------------- + 2 +(1 row) + +\d+ subpartitions.abc_1 + Table "subpartitions.abc_1" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + a | integer | | not null | | plain | | +Check constraints: + "pathman_abc_1_check" CHECK (a >= 0 AND a < 100) +Inherits: subpartitions.abc +Child tables: subpartitions.abc_1_1, + subpartitions.abc_1_2, + subpartitions.abc_1_3, + subpartitions.abc_1_4, + subpartitions.abc_1_5, + subpartitions.abc_1_6, + subpartitions.abc_1_7, + subpartitions.abc_1_8, + subpartitions.abc_1_9 + +SELECT * FROM pathman_partition_list; + parent | partition | parttype | expr | range_min | range_max +---------------------+------------------------+----------+------+-----------+----------- + subpartitions.abc | subpartitions.abc_1 | 2 | a | 0 | 100 + subpartitions.abc | subpartitions.abc_2 | 2 | a | 100 | 200 + subpartitions.abc | subpartitions.abc_3 | 2 | a | 200 | 300 + subpartitions.abc | subpartitions.abc_4 | 2 | a | 300 | 400 + subpartitions.abc_1 | subpartitions.abc_1_1 | 2 | a | 0 | 11 + subpartitions.abc_1 | subpartitions.abc_1_2 | 2 | a | 11 | 22 + subpartitions.abc_1 | subpartitions.abc_1_3 | 2 | a | 22 | 33 + subpartitions.abc_1 | subpartitions.abc_1_4 | 2 | a | 33 | 44 + subpartitions.abc_1 | subpartitions.abc_1_5 | 2 | a | 44 | 55 + subpartitions.abc_1 | subpartitions.abc_1_6 | 2 | a | 55 | 66 + subpartitions.abc_1 | subpartitions.abc_1_7 | 2 | a | 66 | 77 + subpartitions.abc_1 | subpartitions.abc_1_8 | 2 | a | 77 | 88 + subpartitions.abc_1 | subpartitions.abc_1_9 | 2 | a | 88 | 99 + subpartitions.abc_2 | subpartitions.abc_2_1 | 2 | a | 100 | 111 + subpartitions.abc_2 | subpartitions.abc_2_2 | 2 | a | 111 | 122 + subpartitions.abc_2 | subpartitions.abc_2_3 | 2 | a | 122 | 133 + subpartitions.abc_2 | subpartitions.abc_2_4 | 2 | a | 133 | 144 + subpartitions.abc_2 | subpartitions.abc_2_5 | 2 | a | 144 | 155 + subpartitions.abc_2 | subpartitions.abc_2_6 | 2 | a | 155 | 166 + subpartitions.abc_2 | subpartitions.abc_2_7 | 2 | a | 166 | 177 + subpartitions.abc_2 | subpartitions.abc_2_8 | 2 | a | 177 | 188 + subpartitions.abc_3 | subpartitions.abc_3_1 | 2 | a | 200 | 211 + subpartitions.abc_3 | subpartitions.abc_3_2 | 2 | a | 211 | 222 + subpartitions.abc_3 | subpartitions.abc_3_3 | 2 | a | 222 | 233 + subpartitions.abc_3 | subpartitions.abc_3_4 | 2 | a | 233 | 244 + subpartitions.abc_3 | subpartitions.abc_3_5 | 2 | a | 244 | 255 + subpartitions.abc_3 | subpartitions.abc_3_6 | 2 | a | 255 | 266 + subpartitions.abc_3 | subpartitions.abc_3_7 | 2 | a | 266 | 277 + subpartitions.abc_3 | subpartitions.abc_3_8 | 2 | a | 277 | 288 + subpartitions.abc_3 | subpartitions.abc_3_9 | 2 | a | 288 | 299 + subpartitions.abc_3 | subpartitions.abc_3_10 | 2 | a | 299 | 310 + subpartitions.abc_4 | subpartitions.abc_4_1 | 2 | a | 300 | 350 + subpartitions.abc_4 | subpartitions.abc_4_2 | 2 | a | 350 | 450 +(33 rows) + +SELECT append_range_partition('subpartitions.abc_1'::regclass); + append_range_partition +------------------------ + subpartitions.abc_1_10 +(1 row) + +SELECT append_range_partition('subpartitions.abc_1'::regclass); +ERROR: reached upper bound in the current level of subpartitions +DROP TABLE subpartitions.abc_1_10; +/* detach_range_partition */ +SELECt detach_range_partition('subpartitions.abc_1'); +ERROR: could not detach partition if it has children +/* attach_range_partition */ +CREATE TABLE subpartitions.abc_c(LIKE subpartitions.abc_1 INCLUDING ALL); +SELECT attach_range_partition('subpartitions.abc_1', 'subpartitions.abc_c', 98, 110); /* fail */ +ERROR: specified range [98, 110) overlaps with existing partitions +SELECT attach_range_partition('subpartitions.abc_1', 'subpartitions.abc_c', 100, 110); /* fail */ +ERROR: "start value" exceeds upper bound of the current level of subpartitions +SELECT attach_range_partition('subpartitions.abc_1', 'subpartitions.abc_c', 99, 110); /* ok */ + attach_range_partition +------------------------ + subpartitions.abc_c +(1 row) + +DROP TABLE subpartitions.abc CASCADE; +NOTICE: drop cascades to 39 other objects +/* subpartitions on same expression but dates */ +CREATE TABLE subpartitions.abc(a DATE NOT NULL); +INSERT INTO subpartitions.abc SELECT current_date + i FROM generate_series(1, 200, 20) as i; +SELECT create_range_partitions('subpartitions.abc', 'a', current_date, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 6 +(1 row) + +SELECT create_range_partitions('subpartitions.abc_1', 'a', current_date + 1, + '32 day'::INTERVAL, 10); /* not multiple, and limited p_count */ +WARNING: "start_value" was set to 10-02-2017 +WARNING: "p_interval" is not multiple of range (10-02-2017, 11-03-2017) +NOTICE: "p_count" was limited to 1 + create_range_partitions +------------------------- + 1 +(1 row) + +DROP TABLE subpartitions.abc CASCADE; +NOTICE: drop cascades to 9 other objects DROP SCHEMA subpartitions CASCADE; NOTICE: drop cascades to function subpartitions.partitions_tree(regclass,text) DROP EXTENSION pg_pathman; diff --git a/range.sql b/range.sql index 77b6c4de..fa72df8d 100644 --- a/range.sql +++ b/range.sql @@ -46,6 +46,29 @@ BEGIN END $$ LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION @extschema@.has_parent_partitioned_by_expression( + parent_relid REGCLASS, + expression TEXT, + expr_type REGTYPE) +RETURNS BOOL AS $$ +DECLARE + relid REGCLASS; + part_type INTEGER; +BEGIN + relid := @extschema@.get_parent_of_partition(parent_relid, false); + IF relid IS NOT NULL THEN + part_type := @extschema@.get_partition_type(relid); + IF (part_type = 2) AND @extschema@.is_equal_to_partitioning_expression( + relid, expression, expr_type) + THEN + RETURN TRUE; + END IF; + END IF; + + RETURN FALSE; +END +$$ LANGUAGE plpgsql; + /* * Creates RANGE partitions for specified relation based on datetime attribute */ @@ -63,14 +86,33 @@ DECLARE max_value start_value%TYPE; cur_value start_value%TYPE := start_value; end_value start_value%TYPE; + lower_bound start_value%TYPE = NULL; + upper_bound start_value%TYPE = NULL; part_count INTEGER := 0; i INTEGER; - BEGIN PERFORM @extschema@.prepare_for_partitioning(parent_relid, expression, partition_data); + value_type := @extschema@.get_base_type(pg_typeof(start_value)); + + /* + * Check that we're trying to make subpartitions. + * If expressions are same then we set and use upper bound. + * We change start_value if it's greater than lower bound. + */ + IF @extschema@.has_parent_partitioned_by_expression(parent_relid, + expression, value_type) + THEN + lower_bound := @extschema@.get_lower_bound(parent_relid, start_value); + upper_bound := @extschema@.get_upper_bound(parent_relid, start_value); + IF lower_bound != start_value THEN + start_value := lower_bound; + RAISE WARNING '"start_value" was set to %', start_value; + END IF; + END IF; + IF p_count < 0 THEN RAISE EXCEPTION '"p_count" must not be less than 0'; END IF; @@ -86,14 +128,13 @@ BEGIN p_count := 0; WHILE cur_value <= max_value + OR (upper_bound IS NOT NULL AND cur_value < upper_bound) LOOP cur_value := cur_value + p_interval; p_count := p_count + 1; END LOOP; END IF; - value_type := @extschema@.get_base_type(pg_typeof(start_value)); - /* * In case when user doesn't want to automatically create partitions * and specifies partition count as 0 then do not check boundaries @@ -104,6 +145,20 @@ BEGIN FOR i IN 1..p_count LOOP end_value := end_value + p_interval; + IF upper_bound IS NOT NULL AND end_value >= upper_bound THEN + part_count := i; + IF end_value > upper_bound THEN + RAISE WARNING '"p_interval" is not multiple of range (%, %)', + start_value, end_value; + END IF; + IF p_count != part_count THEN + p_count := part_count; + RAISE NOTICE '"p_count" was limited to %', p_count; + END IF; + + /* we got our partitions count */ + EXIT; + END IF; END LOOP; /* Check boundaries */ @@ -158,7 +213,7 @@ CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( partition_data BOOLEAN DEFAULT TRUE) RETURNS INTEGER AS $$ DECLARE - relid REGCLASS; + value_type REGTYPE; rows_count BIGINT; max_value start_value%TYPE; cur_value start_value%TYPE := start_value; @@ -167,29 +222,26 @@ DECLARE upper_bound start_value%TYPE = NULL; part_count INTEGER := 0; i INTEGER; - part_type INT4; BEGIN PERFORM @extschema@.prepare_for_partitioning(parent_relid, expression, partition_data); + value_type := @extschema@.get_base_type(pg_typeof(start_value)); + /* * Check that we're trying to make subpartitions. * If expressions are same then we set and use upper bound. * We change start_value if it's greater than lower bound. */ - relid := @extschema@.get_parent_of_partition(parent_relid, false); - IF relid IS NOT NULL THEN - part_type := get_partition_type(relid); - IF (part_type = 2) AND @extschema@.is_equal_to_partitioning_expression( - relid, expression, pg_typeof(start_value)) - THEN - lower_bound := @extschema@.get_lower_bound(parent_relid, start_value); - upper_bound := @extschema@.get_upper_bound(parent_relid, start_value); - IF lower_bound != start_value THEN - start_value := lower_bound; - RAISE NOTICE '"start_value" was set to %', start_value; - END IF; + IF @extschema@.has_parent_partitioned_by_expression(parent_relid, + expression, value_type) + THEN + lower_bound := @extschema@.get_lower_bound(parent_relid, start_value); + upper_bound := @extschema@.get_upper_bound(parent_relid, start_value); + IF lower_bound != start_value THEN + start_value := lower_bound; + RAISE WARNING '"start_value" was set to %', start_value; END IF; END IF; @@ -198,7 +250,7 @@ BEGIN END IF; /* Try to determine partitions count if not set */ - IF p_count IS NULL OR (relid IS NOT NULL AND p_count = 0) THEN + IF p_count IS NULL THEN EXECUTE format('SELECT count(*), max(%s) FROM %s', expression, parent_relid) INTO rows_count, max_value; @@ -237,7 +289,7 @@ BEGIN END IF; IF p_count != part_count THEN p_count := part_count; - RAISE NOTICE '"p_count" was set %', p_count; + RAISE NOTICE '"p_count" was limited to %', p_count; END IF; /* we got our partitions count */ @@ -294,6 +346,7 @@ CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( RETURNS INTEGER AS $$ DECLARE part_count INTEGER := 0; + part_bounds bounds%TYPE; BEGIN IF array_ndims(bounds) > 1 THEN @@ -308,11 +361,31 @@ BEGIN expression, partition_data); + /* + * Subpartitions checks, in array version of create_range_partitions + * we raise exception instead of notice + */ + IF @extschema@.has_parent_partitioned_by_expression(parent_relid, + expression, pg_typeof(bounds[1])) + THEN + part_bounds[1] := @extschema@.get_lower_bound(parent_relid, bounds[1]); + part_bounds[2] := @extschema@.get_upper_bound(parent_relid, bounds[1]); + IF part_bounds[1] != bounds[1] THEN + RAISE EXCEPTION 'Bounds should start from %', part_bounds[1]; + END IF; + END IF; + + IF part_bounds[2] IS NOT NULL AND + bounds[array_length(bounds, 1) - 1] > part_bounds[2] + THEN + RAISE EXCEPTION 'Lower bound of rightmost partition should be less than %', part_bounds[2]; + END IF; + /* Check boundaries */ PERFORM @extschema@.check_boundaries(parent_relid, expression, - bounds[0], - bounds[array_length(bounds, 1) - 1]); + bounds[1], + bounds[array_length(bounds, 1)]); /* Create sequence for child partitions names */ PERFORM @extschema@.create_naming_sequence(parent_relid); @@ -512,7 +585,7 @@ DECLARE upper_bound bound_value%TYPE; BEGIN upper_bound := get_upper_bound(relid, bound_value); - IF bound_value > upper_bound THEN + IF bound_value >= upper_bound THEN RAISE EXCEPTION '%', error_message; END IF; END @@ -565,7 +638,7 @@ BEGIN SELECT expr FROM @extschema@.pathman_config WHERE partrel = parent_relid INTO part_expr; - part_type := get_partition_type(relid); + part_type := @extschema@.get_partition_type(relid); IF (part_type = 2) AND @extschema@.is_equal_to_partitioning_expression( relid, part_expr, part_expr_type) THEN @@ -813,9 +886,11 @@ CREATE OR REPLACE FUNCTION @extschema@.attach_range_partition( RETURNS TEXT AS $$ DECLARE part_expr TEXT; + part_expr_type REGTYPE; + part_type INTEGER; rel_persistence CHAR; v_init_callback REGPROCEDURE; - + relid REGCLASS; BEGIN PERFORM @extschema@.validate_relname(parent_relid); PERFORM @extschema@.validate_relname(partition_relid); @@ -839,6 +914,25 @@ BEGIN RAISE EXCEPTION 'partition must have a compatible tuple format'; END IF; + /* + * In case a user has used same expression on two levels, we need to check + * that we've not reached upper bound of higher partitioned table + */ + relid := @extschema@.get_parent_of_partition(parent_relid, false); + IF relid IS NOT NULL THEN + part_expr_type := @extschema@.get_partition_key_type(parent_relid); + SELECT expr FROM @extschema@.pathman_config WHERE partrel = parent_relid + INTO part_expr; + + part_type := @extschema@.get_partition_type(relid); + IF (part_type = 2) AND @extschema@.is_equal_to_partitioning_expression( + relid, part_expr, part_expr_type) + THEN + PERFORM @extschema@.check_against_upper_bound_internal(parent_relid, + start_value, '"start value" exceeds upper bound of the current level of subpartitions'); + END IF; + END IF; + /* Set inheritance */ EXECUTE format('ALTER TABLE %s INHERIT %s', partition_relid, parent_relid); @@ -884,6 +978,7 @@ CREATE OR REPLACE FUNCTION @extschema@.detach_range_partition( RETURNS TEXT AS $$ DECLARE parent_relid REGCLASS; + inhparent REGCLASS; part_type INTEGER; BEGIN @@ -892,6 +987,14 @@ BEGIN PERFORM @extschema@.validate_relname(parent_relid); PERFORM @extschema@.validate_relname(partition_relid); + EXECUTE format('SELECT inhparent::REGCLASS FROM pg_inherits WHERE inhparent = $1 LIMIT 1') + USING partition_relid + INTO inhparent; + + if inhparent IS NOT NULL THEN + RAISE EXCEPTION 'could not detach partition if it has children'; + END IF; + /* Acquire lock on parent */ PERFORM @extschema@.prevent_data_modification(parent_relid); diff --git a/sql/pathman_subpartitions.sql b/sql/pathman_subpartitions.sql index 91abd021..72e38c60 100644 --- a/sql/pathman_subpartitions.sql +++ b/sql/pathman_subpartitions.sql @@ -107,8 +107,41 @@ SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); /* 20 select merge_range_partitions('subpartitions.abc_2', 'subpartitions.abc_3'); select merge_range_partitions('subpartitions.abc_2_1', 'subpartitions.abc_2_2'); -/* create subpartitions but use same expression */ -SELECT create_range_partitions('subpartitions.abc_3', 'a', 150, 50, 2); +DROP TABLE subpartitions.abc CASCADE; + +/* subpartitions on same expressions */ +CREATE TABLE subpartitions.abc(a INTEGER NOT NULL); +INSERT INTO subpartitions.abc SELECT i FROM generate_series(1, 200, 20) as i; +SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 4); +SELECT create_range_partitions('subpartitions.abc_1', 'a', 0, 11, 9); /* not multiple */ +SELECT create_range_partitions('subpartitions.abc_2', 'a', 150, 11, 8); /* start_value should be lower */ +SELECT create_range_partitions('subpartitions.abc_3', 'a', 200, 11, 20); /* too big p_count */ +SELECT create_range_partitions('subpartitions.abc_4', 'a', ARRAY[301, 350, 400]); /* bounds check */ +SELECT create_range_partitions('subpartitions.abc_4', 'a', ARRAY[300, 450, 500]); /* bounds check */ +SELECT create_range_partitions('subpartitions.abc_4', 'a', ARRAY[300, 350, 450]); /* bounds check */ +\d+ subpartitions.abc_1 +SELECT * FROM pathman_partition_list; +SELECT append_range_partition('subpartitions.abc_1'::regclass); +SELECT append_range_partition('subpartitions.abc_1'::regclass); +DROP TABLE subpartitions.abc_1_10; + +/* detach_range_partition */ +SELECt detach_range_partition('subpartitions.abc_1'); + +/* attach_range_partition */ +CREATE TABLE subpartitions.abc_c(LIKE subpartitions.abc_1 INCLUDING ALL); +SELECT attach_range_partition('subpartitions.abc_1', 'subpartitions.abc_c', 98, 110); /* fail */ +SELECT attach_range_partition('subpartitions.abc_1', 'subpartitions.abc_c', 100, 110); /* fail */ +SELECT attach_range_partition('subpartitions.abc_1', 'subpartitions.abc_c', 99, 110); /* ok */ + +DROP TABLE subpartitions.abc CASCADE; + +/* subpartitions on same expression but dates */ +CREATE TABLE subpartitions.abc(a DATE NOT NULL); +INSERT INTO subpartitions.abc SELECT current_date + i FROM generate_series(1, 200, 20) as i; +SELECT create_range_partitions('subpartitions.abc', 'a', current_date, '1 month'::INTERVAL); +SELECT create_range_partitions('subpartitions.abc_1', 'a', current_date + 1, + '32 day'::INTERVAL, 10); /* not multiple, and limited p_count */ DROP TABLE subpartitions.abc CASCADE; DROP SCHEMA subpartitions CASCADE; diff --git a/src/relation_info.c b/src/relation_info.c index 0b29ac18..d7d69116 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -1061,7 +1061,7 @@ is_equal_to_partitioning_expression(Oid relid, char *expression, /* caller should have been check it already */ Assert(prel != NULL); - return (expr_type == value_type) && + return (getBaseType(expr_type) == value_type) && (strcmp(cexpr, prel->expr_cstr) == 0); } From 65d2f20200e71eac41a464f8f0d60b908969b0f4 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Tue, 3 Oct 2017 12:32:55 +0300 Subject: [PATCH 161/528] fixup! Add more subpartitions fixes and tests --- expected/pathman_subpartitions.out | 24 +++--------------------- sql/pathman_subpartitions.sql | 7 +++---- 2 files changed, 6 insertions(+), 25 deletions(-) diff --git a/expected/pathman_subpartitions.out b/expected/pathman_subpartitions.out index 3a7d4706..27be6b1e 100644 --- a/expected/pathman_subpartitions.out +++ b/expected/pathman_subpartitions.out @@ -380,24 +380,6 @@ SELECT create_range_partitions('subpartitions.abc_4', 'a', ARRAY[300, 350, 450]) 2 (1 row) -\d+ subpartitions.abc_1 - Table "subpartitions.abc_1" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+---------+--------------+------------- - a | integer | | not null | | plain | | -Check constraints: - "pathman_abc_1_check" CHECK (a >= 0 AND a < 100) -Inherits: subpartitions.abc -Child tables: subpartitions.abc_1_1, - subpartitions.abc_1_2, - subpartitions.abc_1_3, - subpartitions.abc_1_4, - subpartitions.abc_1_5, - subpartitions.abc_1_6, - subpartitions.abc_1_7, - subpartitions.abc_1_8, - subpartitions.abc_1_9 - SELECT * FROM pathman_partition_list; parent | partition | parttype | expr | range_min | range_max ---------------------+------------------------+----------+------+-----------+----------- @@ -464,14 +446,14 @@ DROP TABLE subpartitions.abc CASCADE; NOTICE: drop cascades to 39 other objects /* subpartitions on same expression but dates */ CREATE TABLE subpartitions.abc(a DATE NOT NULL); -INSERT INTO subpartitions.abc SELECT current_date + i FROM generate_series(1, 200, 20) as i; -SELECT create_range_partitions('subpartitions.abc', 'a', current_date, '1 month'::INTERVAL); +INSERT INTO subpartitions.abc SELECT '2017-10-02'::DATE + i FROM generate_series(1, 200, 20) as i; +SELECT create_range_partitions('subpartitions.abc', 'a', '2017-10-02'::DATE, '1 month'::INTERVAL); create_range_partitions ------------------------- 6 (1 row) -SELECT create_range_partitions('subpartitions.abc_1', 'a', current_date + 1, +SELECT create_range_partitions('subpartitions.abc_1', 'a', '2017-10-02'::DATE + 1, '32 day'::INTERVAL, 10); /* not multiple, and limited p_count */ WARNING: "start_value" was set to 10-02-2017 WARNING: "p_interval" is not multiple of range (10-02-2017, 11-03-2017) diff --git a/sql/pathman_subpartitions.sql b/sql/pathman_subpartitions.sql index 72e38c60..7f38f629 100644 --- a/sql/pathman_subpartitions.sql +++ b/sql/pathman_subpartitions.sql @@ -119,7 +119,6 @@ SELECT create_range_partitions('subpartitions.abc_3', 'a', 200, 11, 20); /* too SELECT create_range_partitions('subpartitions.abc_4', 'a', ARRAY[301, 350, 400]); /* bounds check */ SELECT create_range_partitions('subpartitions.abc_4', 'a', ARRAY[300, 450, 500]); /* bounds check */ SELECT create_range_partitions('subpartitions.abc_4', 'a', ARRAY[300, 350, 450]); /* bounds check */ -\d+ subpartitions.abc_1 SELECT * FROM pathman_partition_list; SELECT append_range_partition('subpartitions.abc_1'::regclass); SELECT append_range_partition('subpartitions.abc_1'::regclass); @@ -138,9 +137,9 @@ DROP TABLE subpartitions.abc CASCADE; /* subpartitions on same expression but dates */ CREATE TABLE subpartitions.abc(a DATE NOT NULL); -INSERT INTO subpartitions.abc SELECT current_date + i FROM generate_series(1, 200, 20) as i; -SELECT create_range_partitions('subpartitions.abc', 'a', current_date, '1 month'::INTERVAL); -SELECT create_range_partitions('subpartitions.abc_1', 'a', current_date + 1, +INSERT INTO subpartitions.abc SELECT '2017-10-02'::DATE + i FROM generate_series(1, 200, 20) as i; +SELECT create_range_partitions('subpartitions.abc', 'a', '2017-10-02'::DATE, '1 month'::INTERVAL); +SELECT create_range_partitions('subpartitions.abc_1', 'a', '2017-10-02'::DATE + 1, '32 day'::INTERVAL, 10); /* not multiple, and limited p_count */ DROP TABLE subpartitions.abc CASCADE; From 6fb5b84b0d67cc67239acc9f5b4ea156e7be05f9 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 3 Oct 2017 12:48:05 +0300 Subject: [PATCH 162/528] bump lib version to 1.4.6 --- META.json | 4 ++-- expected/pathman_calamity.out | 2 +- src/include/init.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/META.json b/META.json index b05c65a4..31e669e8 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_pathman", "abstract": "Partitioning tool", "description": "The `pg_pathman` module provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.4.5", + "version": "1.4.6", "maintainer": [ "Ildar Musin ", "Dmitry Ivanov ", @@ -24,7 +24,7 @@ "pg_pathman": { "file": "pg_pathman--1.4.sql", "docfile": "README.md", - "version": "1.4.5", + "version": "1.4.6", "abstract": "Partitioning tool" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 66925628..b887d37b 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -12,7 +12,7 @@ SELECT debug_capture(); SELECT get_pathman_lib_version(); get_pathman_lib_version ------------------------- - 10405 + 10406 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index e43747e1..8addc1f4 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -157,7 +157,7 @@ simpify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT 0x010400 /* Current version of native C library (0xAA_BB_CC) */ -#define CURRENT_LIB_VERSION 0x010405 +#define CURRENT_LIB_VERSION 0x010406 void *pathman_cache_search_relid(HTAB *cache_table, From d75175a3945891e2f3c738300f160856d1a678da Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 3 Oct 2017 14:59:21 +0300 Subject: [PATCH 163/528] more tests for pathman_rel_pathlist_hook() --- expected/pathman_upd_del.out | 127 +++++++++++++++++++++++---------- expected/pathman_upd_del_1.out | 41 +++++++++++ sql/pathman_upd_del.sql | 12 ++++ src/hooks.c | 11 +++ 4 files changed, 152 insertions(+), 39 deletions(-) diff --git a/expected/pathman_upd_del.out b/expected/pathman_upd_del.out index 147ee2e6..863418b3 100644 --- a/expected/pathman_upd_del.out +++ b/expected/pathman_upd_del.out @@ -14,6 +14,7 @@ SET enable_seqscan = OFF; CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); INSERT INTO test.tmp VALUES (1, 1), (2, 2); CREATE TABLE test.tmp2 (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp2 SELECT i % 10 + 1, i FROM generate_series(1, 100) i; SELECT pathman.create_range_partitions('test.tmp2', 'id', 1, 1, 10); create_range_partitions ------------------------- @@ -218,48 +219,57 @@ USING (SELECT * JOIN test.tmp2 a2 USING(id)) t WHERE t.id = r.id; - QUERY PLAN ------------------------------------------------------- + QUERY PLAN +------------------------------------------------ Delete on tmp r -> Nested Loop - Join Filter: (a1.id = a2.id) - -> Append + -> Nested Loop + -> Seq Scan on tmp r + -> Custom Scan (RuntimeAppend) + Prune by: (r.id = a1.id) + -> Seq Scan on tmp2_1 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_2 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_3 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_4 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_5 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_6 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_7 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_8 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_9 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_10 a1 + Filter: (r.id = id) + -> Custom Scan (RuntimeAppend) + Prune by: (a1.id = a2.id) -> Seq Scan on tmp2_1 a2 - -> Seq Scan on tmp2_2 a2_1 - -> Seq Scan on tmp2_3 a2_2 - -> Seq Scan on tmp2_4 a2_3 - -> Seq Scan on tmp2_5 a2_4 - -> Seq Scan on tmp2_6 a2_5 - -> Seq Scan on tmp2_7 a2_6 - -> Seq Scan on tmp2_8 a2_7 - -> Seq Scan on tmp2_9 a2_8 - -> Seq Scan on tmp2_10 a2_9 - -> Materialize - -> Nested Loop - -> Seq Scan on tmp r - -> Custom Scan (RuntimeAppend) - Prune by: (r.id = a1.id) - -> Seq Scan on tmp2_1 a1 - Filter: (r.id = id) - -> Seq Scan on tmp2_2 a1 - Filter: (r.id = id) - -> Seq Scan on tmp2_3 a1 - Filter: (r.id = id) - -> Seq Scan on tmp2_4 a1 - Filter: (r.id = id) - -> Seq Scan on tmp2_5 a1 - Filter: (r.id = id) - -> Seq Scan on tmp2_6 a1 - Filter: (r.id = id) - -> Seq Scan on tmp2_7 a1 - Filter: (r.id = id) - -> Seq Scan on tmp2_8 a1 - Filter: (r.id = id) - -> Seq Scan on tmp2_9 a1 - Filter: (r.id = id) - -> Seq Scan on tmp2_10 a1 - Filter: (r.id = id) -(39 rows) + Filter: (a1.id = id) + -> Seq Scan on tmp2_2 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_3 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_4 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_5 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_6 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_7 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_8 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_9 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_10 a2 + Filter: (a1.id = id) +(48 rows) BEGIN; DELETE FROM test.tmp r @@ -279,6 +289,45 @@ UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t WHERE t.id = r.id; ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported ROLLBACK; +/* + * UPDATE + subquery with partitioned table (PG 9.5). + * See pathman_rel_pathlist_hook() + RELOPT_OTHER_MEMBER_REL. + */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = 2 +WHERE t.id IN (SELECT id + FROM test.tmp2 t2 + WHERE id = t.id); + QUERY PLAN +-------------------------------------------- + Update on tmp t + -> Seq Scan on tmp t + Filter: (SubPlan 1) + SubPlan 1 + -> Custom Scan (RuntimeAppend) + Prune by: (t2.id = t.id) + -> Seq Scan on tmp2_1 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_2 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_3 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_4 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_5 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_6 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_7 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_8 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_9 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_10 t2 + Filter: (id = t.id) +(26 rows) + /* Test special rule for CTE; SELECT (PostgreSQL 9.5) */ EXPLAIN (COSTS OFF) WITH q AS (SELECT * FROM test.range_rel r diff --git a/expected/pathman_upd_del_1.out b/expected/pathman_upd_del_1.out index a1eeda7f..cce19b10 100644 --- a/expected/pathman_upd_del_1.out +++ b/expected/pathman_upd_del_1.out @@ -14,6 +14,7 @@ SET enable_seqscan = OFF; CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); INSERT INTO test.tmp VALUES (1, 1), (2, 2); CREATE TABLE test.tmp2 (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp2 SELECT i % 10 + 1, i FROM generate_series(1, 100) i; SELECT pathman.create_range_partitions('test.tmp2', 'id', 1, 1, 10); create_range_partitions ------------------------- @@ -264,6 +265,46 @@ UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t WHERE t.id = r.id; ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported ROLLBACK; +/* + * UPDATE + subquery with partitioned table (PG 9.5). + * See pathman_rel_pathlist_hook() + RELOPT_OTHER_MEMBER_REL. + */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = 2 +WHERE t.id IN (SELECT id + FROM test.tmp2 t2 + WHERE id = t.id); + QUERY PLAN +----------------------------------------------- + Update on tmp t + -> Seq Scan on tmp t + Filter: (SubPlan 1) + SubPlan 1 + -> Append + -> Seq Scan on tmp2 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_1 t2_1 + Filter: (id = t.id) + -> Seq Scan on tmp2_2 t2_2 + Filter: (id = t.id) + -> Seq Scan on tmp2_3 t2_3 + Filter: (id = t.id) + -> Seq Scan on tmp2_4 t2_4 + Filter: (id = t.id) + -> Seq Scan on tmp2_5 t2_5 + Filter: (id = t.id) + -> Seq Scan on tmp2_6 t2_6 + Filter: (id = t.id) + -> Seq Scan on tmp2_7 t2_7 + Filter: (id = t.id) + -> Seq Scan on tmp2_8 t2_8 + Filter: (id = t.id) + -> Seq Scan on tmp2_9 t2_9 + Filter: (id = t.id) + -> Seq Scan on tmp2_10 t2_10 + Filter: (id = t.id) +(27 rows) + /* Test special rule for CTE; SELECT (PostgreSQL 9.5) */ EXPLAIN (COSTS OFF) WITH q AS (SELECT * FROM test.range_rel r diff --git a/sql/pathman_upd_del.sql b/sql/pathman_upd_del.sql index bc51f815..034f942a 100644 --- a/sql/pathman_upd_del.sql +++ b/sql/pathman_upd_del.sql @@ -22,6 +22,7 @@ CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); INSERT INTO test.tmp VALUES (1, 1), (2, 2); CREATE TABLE test.tmp2 (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp2 SELECT i % 10 + 1, i FROM generate_series(1, 100) i; SELECT pathman.create_range_partitions('test.tmp2', 'id', 1, 1, 10); @@ -186,6 +187,17 @@ WHERE t.id = r.id; ROLLBACK; +/* + * UPDATE + subquery with partitioned table (PG 9.5). + * See pathman_rel_pathlist_hook() + RELOPT_OTHER_MEMBER_REL. + */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = 2 +WHERE t.id IN (SELECT id + FROM test.tmp2 t2 + WHERE id = t.id); + + /* Test special rule for CTE; SELECT (PostgreSQL 9.5) */ EXPLAIN (COSTS OFF) WITH q AS (SELECT * FROM test.range_rel r diff --git a/src/hooks.c b/src/hooks.c index e1f34b8b..884cea09 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -358,6 +358,17 @@ pathman_rel_pathlist_hook(PlannerInfo *root, /* * Check that this child is not the parent table itself. * This is exactly how standard inheritance works. + * + * Helps with queries like this one: + * + * UPDATE test.tmp t SET value = 2 + * WHERE t.id IN (SELECT id + * FROM test.tmp2 t2 + * WHERE id = t.id); + * + * Since we disable optimizations on 9.5, we + * have to skip parent table that has already + * been expanded by standard inheritance. */ if (rel->reloptkind == RELOPT_OTHER_MEMBER_REL) { From 51f39abb49fac3d094896a0fee64edd1863ec2c5 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 9 Oct 2017 15:01:18 +0300 Subject: [PATCH 164/528] disable pruning optimizations for SELECT .. FOR UPDATE/SHARE/etc on PostgreSQL 9.5 --- expected/pathman_rowmarks.out | 102 +++++++++----- expected/pathman_rowmarks_1.out | 219 ++++++++++++++++++------------ sql/pathman_rowmarks.sql | 17 +++ src/compat/rowmarks_fix.c | 157 --------------------- src/hooks.c | 3 - src/include/compat/rowmarks_fix.h | 10 +- src/planner_tree_modification.c | 7 +- 7 files changed, 224 insertions(+), 291 deletions(-) diff --git a/expected/pathman_rowmarks.out b/expected/pathman_rowmarks.out index 3e37c57f..52fd3347 100644 --- a/expected/pathman_rowmarks.out +++ b/expected/pathman_rowmarks.out @@ -15,6 +15,7 @@ SELECT create_hash_partitions('rowmarks.first', 'id', 5); 5 (1 row) +VACUUM ANALYZE; /* Not partitioned */ SELECT * FROM rowmarks.second ORDER BY id FOR UPDATE; id @@ -173,23 +174,64 @@ FOR SHARE; 6 (1 row) +/* JOIN (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + QUERY PLAN +--------------------------------------------------- + LockRows + -> Sort + Sort Key: first_0.id + -> Hash Join + Hash Cond: (first_0.id = second.id) + -> Append + -> Seq Scan on first_0 + -> Seq Scan on first_1 + -> Seq Scan on first_2 + -> Seq Scan on first_3 + -> Seq Scan on first_4 + -> Hash + -> Seq Scan on second +(13 rows) + +/* JOIN (execution) */ +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + /* Check updates (plan) */ SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ EXPLAIN (COSTS OFF) UPDATE rowmarks.second SET id = 2 WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); - QUERY PLAN ---------------------------------------------- + QUERY PLAN +--------------------------------------- Update on second -> Nested Loop Semi Join -> Seq Scan on second Filter: (id = 1) - -> Materialize - -> Append - -> Seq Scan on first_0 - Filter: (id = 1) -(8 rows) + -> Append + -> Seq Scan on first_0 + Filter: (id = 1) +(7 rows) EXPLAIN (COSTS OFF) UPDATE rowmarks.second SET id = 2 @@ -197,10 +239,10 @@ WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); QUERY PLAN ----------------------------------------------- Update on second - -> Nested Loop + -> Nested Loop Semi Join Join Filter: (second.id = first_0.id) - -> HashAggregate - Group Key: first_0.id + -> Seq Scan on second + -> Materialize -> Append -> Seq Scan on first_0 Filter: (id < 1) @@ -212,9 +254,7 @@ WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); Filter: (id < 1) -> Seq Scan on first_4 Filter: (id < 1) - -> Materialize - -> Seq Scan on second -(18 rows) +(16 rows) EXPLAIN (COSTS OFF) UPDATE rowmarks.second SET id = 2 @@ -237,17 +277,16 @@ EXPLAIN (COSTS OFF) UPDATE rowmarks.second SET id = 2 WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1) RETURNING *, tableoid::regclass; - QUERY PLAN ---------------------------------------------- + QUERY PLAN +--------------------------------------- Update on second -> Nested Loop Semi Join -> Seq Scan on second Filter: (id = 1) - -> Materialize - -> Append - -> Seq Scan on first_0 - Filter: (id = 1) -(8 rows) + -> Append + -> Seq Scan on first_0 + Filter: (id = 1) +(7 rows) SET enable_hashjoin = t; SET enable_mergejoin = t; @@ -267,17 +306,16 @@ SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ EXPLAIN (COSTS OFF) DELETE FROM rowmarks.second WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); - QUERY PLAN ---------------------------------------------- + QUERY PLAN +--------------------------------------- Delete on second -> Nested Loop Semi Join -> Seq Scan on second Filter: (id = 1) - -> Materialize - -> Append - -> Seq Scan on first_0 - Filter: (id = 1) -(8 rows) + -> Append + -> Seq Scan on first_0 + Filter: (id = 1) +(7 rows) EXPLAIN (COSTS OFF) DELETE FROM rowmarks.second @@ -285,10 +323,10 @@ WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); QUERY PLAN ----------------------------------------------- Delete on second - -> Nested Loop + -> Nested Loop Semi Join Join Filter: (second.id = first_0.id) - -> HashAggregate - Group Key: first_0.id + -> Seq Scan on second + -> Materialize -> Append -> Seq Scan on first_0 Filter: (id < 1) @@ -300,9 +338,7 @@ WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); Filter: (id < 1) -> Seq Scan on first_4 Filter: (id < 1) - -> Materialize - -> Seq Scan on second -(18 rows) +(16 rows) EXPLAIN (COSTS OFF) DELETE FROM rowmarks.second diff --git a/expected/pathman_rowmarks_1.out b/expected/pathman_rowmarks_1.out index cbc0a1c6..bd21d42f 100644 --- a/expected/pathman_rowmarks_1.out +++ b/expected/pathman_rowmarks_1.out @@ -15,6 +15,7 @@ SELECT create_hash_partitions('rowmarks.first', 'id', 5); 5 (1 row) +VACUUM ANALYZE; /* Not partitioned */ SELECT * FROM rowmarks.second ORDER BY id FOR UPDATE; id @@ -38,14 +39,15 @@ SELECT * FROM rowmarks.first ORDER BY id FOR UPDATE; --------------------------------------- LockRows -> Sort - Sort Key: first_0.id + Sort Key: first.id -> Append + -> Seq Scan on first -> Seq Scan on first_0 -> Seq Scan on first_1 -> Seq Scan on first_2 -> Seq Scan on first_3 -> Seq Scan on first_4 -(9 rows) +(10 rows) /* Simple case (execution) */ SELECT * FROM rowmarks.first ORDER BY id FOR UPDATE; @@ -90,33 +92,35 @@ WHERE id = (SELECT id FROM rowmarks.first OFFSET 10 LIMIT 1 FOR UPDATE) FOR SHARE; - QUERY PLAN ------------------------------------------------------ + QUERY PLAN +--------------------------------------------------------------- LockRows InitPlan 1 (returns $1) -> Limit -> LockRows -> Sort - Sort Key: first_0.id + Sort Key: first_5.id -> Append - -> Seq Scan on first_0 - -> Seq Scan on first_1 - -> Seq Scan on first_2 - -> Seq Scan on first_3 - -> Seq Scan on first_4 - -> Custom Scan (RuntimeAppend) - Prune by: (first.id = $1) - -> Seq Scan on first_0 first + -> Seq Scan on first first_5 + -> Seq Scan on first_0 first_0_1 + -> Seq Scan on first_1 first_1_1 + -> Seq Scan on first_2 first_2_1 + -> Seq Scan on first_3 first_3_1 + -> Seq Scan on first_4 first_4_1 + -> Append + -> Seq Scan on first + Filter: (id = $1) + -> Seq Scan on first_0 Filter: (id = $1) - -> Seq Scan on first_1 first + -> Seq Scan on first_1 Filter: (id = $1) - -> Seq Scan on first_2 first + -> Seq Scan on first_2 Filter: (id = $1) - -> Seq Scan on first_3 first + -> Seq Scan on first_3 Filter: (id = $1) - -> Seq Scan on first_4 first + -> Seq Scan on first_4 Filter: (id = $1) -(24 rows) +(26 rows) /* A little harder (execution) */ SELECT * FROM rowmarks.first @@ -147,19 +151,20 @@ FOR SHARE; -> Sort Sort Key: second.id -> Seq Scan on second - -> Custom Scan (RuntimeAppend) - Prune by: (first.id = $1) - -> Seq Scan on first_0 first + -> Append + -> Seq Scan on first Filter: (id = $1) - -> Seq Scan on first_1 first + -> Seq Scan on first_0 Filter: (id = $1) - -> Seq Scan on first_2 first + -> Seq Scan on first_1 Filter: (id = $1) - -> Seq Scan on first_3 first + -> Seq Scan on first_2 Filter: (id = $1) - -> Seq Scan on first_4 first + -> Seq Scan on first_3 Filter: (id = $1) -(19 rows) + -> Seq Scan on first_4 + Filter: (id = $1) +(20 rows) /* Two tables (execution) */ SELECT * FROM rowmarks.first @@ -173,33 +178,75 @@ FOR SHARE; 6 (1 row) +/* JOIN (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + QUERY PLAN +------------------------------------------------- + LockRows + -> Sort + Sort Key: first.id + -> Hash Join + Hash Cond: (first.id = second.id) + -> Append + -> Seq Scan on first + -> Seq Scan on first_0 + -> Seq Scan on first_1 + -> Seq Scan on first_2 + -> Seq Scan on first_3 + -> Seq Scan on first_4 + -> Hash + -> Seq Scan on second +(14 rows) + +/* JOIN (execution) */ +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + /* Check updates (plan) */ SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ EXPLAIN (COSTS OFF) UPDATE rowmarks.second SET id = 2 WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); - QUERY PLAN ---------------------------------------------- + QUERY PLAN +--------------------------------------- Update on second -> Nested Loop Semi Join -> Seq Scan on second Filter: (id = 1) - -> Materialize - -> Append - -> Seq Scan on first - Filter: (id = 1) - -> Seq Scan on first_0 - Filter: (id = 1) - -> Seq Scan on first_1 - Filter: (id = 1) - -> Seq Scan on first_2 - Filter: (id = 1) - -> Seq Scan on first_3 - Filter: (id = 1) - -> Seq Scan on first_4 - Filter: (id = 1) -(18 rows) + -> Append + -> Seq Scan on first + Filter: (id = 1) + -> Seq Scan on first_0 + Filter: (id = 1) + -> Seq Scan on first_1 + Filter: (id = 1) + -> Seq Scan on first_2 + Filter: (id = 1) + -> Seq Scan on first_3 + Filter: (id = 1) + -> Seq Scan on first_4 + Filter: (id = 1) +(17 rows) EXPLAIN (COSTS OFF) UPDATE rowmarks.second SET id = 2 @@ -207,10 +254,10 @@ WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); QUERY PLAN --------------------------------------------- Update on second - -> Nested Loop + -> Nested Loop Semi Join Join Filter: (second.id = first.id) - -> HashAggregate - Group Key: first.id + -> Seq Scan on second + -> Materialize -> Append -> Seq Scan on first Filter: (id < 1) @@ -224,9 +271,7 @@ WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); Filter: (id < 1) -> Seq Scan on first_4 Filter: (id < 1) - -> Materialize - -> Seq Scan on second -(20 rows) +(18 rows) EXPLAIN (COSTS OFF) UPDATE rowmarks.second SET id = 2 @@ -257,27 +302,26 @@ EXPLAIN (COSTS OFF) UPDATE rowmarks.second SET id = 2 WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1) RETURNING *, tableoid::regclass; - QUERY PLAN ---------------------------------------------- + QUERY PLAN +--------------------------------------- Update on second -> Nested Loop Semi Join -> Seq Scan on second Filter: (id = 1) - -> Materialize - -> Append - -> Seq Scan on first - Filter: (id = 1) - -> Seq Scan on first_0 - Filter: (id = 1) - -> Seq Scan on first_1 - Filter: (id = 1) - -> Seq Scan on first_2 - Filter: (id = 1) - -> Seq Scan on first_3 - Filter: (id = 1) - -> Seq Scan on first_4 - Filter: (id = 1) -(18 rows) + -> Append + -> Seq Scan on first + Filter: (id = 1) + -> Seq Scan on first_0 + Filter: (id = 1) + -> Seq Scan on first_1 + Filter: (id = 1) + -> Seq Scan on first_2 + Filter: (id = 1) + -> Seq Scan on first_3 + Filter: (id = 1) + -> Seq Scan on first_4 + Filter: (id = 1) +(17 rows) SET enable_hashjoin = t; SET enable_mergejoin = t; @@ -297,27 +341,26 @@ SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ EXPLAIN (COSTS OFF) DELETE FROM rowmarks.second WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); - QUERY PLAN ---------------------------------------------- + QUERY PLAN +--------------------------------------- Delete on second -> Nested Loop Semi Join -> Seq Scan on second Filter: (id = 1) - -> Materialize - -> Append - -> Seq Scan on first - Filter: (id = 1) - -> Seq Scan on first_0 - Filter: (id = 1) - -> Seq Scan on first_1 - Filter: (id = 1) - -> Seq Scan on first_2 - Filter: (id = 1) - -> Seq Scan on first_3 - Filter: (id = 1) - -> Seq Scan on first_4 - Filter: (id = 1) -(18 rows) + -> Append + -> Seq Scan on first + Filter: (id = 1) + -> Seq Scan on first_0 + Filter: (id = 1) + -> Seq Scan on first_1 + Filter: (id = 1) + -> Seq Scan on first_2 + Filter: (id = 1) + -> Seq Scan on first_3 + Filter: (id = 1) + -> Seq Scan on first_4 + Filter: (id = 1) +(17 rows) EXPLAIN (COSTS OFF) DELETE FROM rowmarks.second @@ -325,10 +368,10 @@ WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); QUERY PLAN --------------------------------------------- Delete on second - -> Nested Loop + -> Nested Loop Semi Join Join Filter: (second.id = first.id) - -> HashAggregate - Group Key: first.id + -> Seq Scan on second + -> Materialize -> Append -> Seq Scan on first Filter: (id < 1) @@ -342,9 +385,7 @@ WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); Filter: (id < 1) -> Seq Scan on first_4 Filter: (id < 1) - -> Materialize - -> Seq Scan on second -(20 rows) +(18 rows) EXPLAIN (COSTS OFF) DELETE FROM rowmarks.second diff --git a/sql/pathman_rowmarks.sql b/sql/pathman_rowmarks.sql index dac456d7..a95fbe84 100644 --- a/sql/pathman_rowmarks.sql +++ b/sql/pathman_rowmarks.sql @@ -18,6 +18,10 @@ INSERT INTO rowmarks.second SELECT generate_series(1, 10); SELECT create_hash_partitions('rowmarks.first', 'id', 5); + +VACUUM ANALYZE; + + /* Not partitioned */ SELECT * FROM rowmarks.second ORDER BY id FOR UPDATE; @@ -64,6 +68,19 @@ WHERE id = (SELECT id FROM rowmarks.second FOR UPDATE) FOR SHARE; +/* JOIN (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + +/* JOIN (execution) */ +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + /* Check updates (plan) */ SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ diff --git a/src/compat/rowmarks_fix.c b/src/compat/rowmarks_fix.c index 66257d9d..4dd1c20a 100644 --- a/src/compat/rowmarks_fix.c +++ b/src/compat/rowmarks_fix.c @@ -51,161 +51,4 @@ append_tle_for_rowmark(PlannerInfo *root, PlanRowMark *rc) } -#else - - -/* Special column name for rowmarks */ -#define TABLEOID_STR(subst) ( "pathman_tableoid" subst ) -#define TABLEOID_STR_BASE_LEN ( sizeof(TABLEOID_STR("")) - 1 ) - - -static void lock_rows_visitor(Plan *plan, void *context); -static List *get_tableoids_list(List *tlist); - - -/* Final rowmark processing for partitioned tables */ -void -postprocess_lock_rows(List *rtable, Plan *plan) -{ - plan_tree_walker(plan, lock_rows_visitor, rtable); -} - -/* - * Add missing 'TABLEOID_STR%u' junk attributes for inherited partitions - * - * This is necessary since preprocess_targetlist() heavily - * depends on the 'inh' flag which we have to unset. - * - * postprocess_lock_rows() will later transform 'TABLEOID_STR:Oid' - * relnames into 'tableoid:rowmarkId'. - */ -void -rowmark_add_tableoids(Query *parse) -{ - ListCell *lc; - - /* Generate 'tableoid' for partitioned table rowmark */ - foreach (lc, parse->rowMarks) - { - RowMarkClause *rc = (RowMarkClause *) lfirst(lc); - Oid parent = getrelid(rc->rti, parse->rtable); - Var *var; - TargetEntry *tle; - char resname[64]; - - /* Check that table is partitioned */ - if (!get_pathman_relation_info(parent)) - continue; - - var = makeVar(rc->rti, - TableOidAttributeNumber, - OIDOID, - -1, - InvalidOid, - 0); - - /* Use parent's Oid as TABLEOID_STR's key (%u) */ - snprintf(resname, sizeof(resname), TABLEOID_STR("%u"), parent); - - tle = makeTargetEntry((Expr *) var, - list_length(parse->targetList) + 1, - pstrdup(resname), - true); - - /* There's no problem here since new attribute is junk */ - parse->targetList = lappend(parse->targetList, tle); - } -} - -/* - * Extract target entries with resnames beginning with TABLEOID_STR - * and var->varoattno == TableOidAttributeNumber - */ -static List * -get_tableoids_list(List *tlist) -{ - List *result = NIL; - ListCell *lc; - - foreach (lc, tlist) - { - TargetEntry *te = (TargetEntry *) lfirst(lc); - Var *var = (Var *) te->expr; - - if (!IsA(var, Var)) - continue; - - /* Check that column name begins with TABLEOID_STR & it's tableoid */ - if (var->varoattno == TableOidAttributeNumber && - (te->resname && strlen(te->resname) > TABLEOID_STR_BASE_LEN) && - 0 == strncmp(te->resname, TABLEOID_STR(""), TABLEOID_STR_BASE_LEN)) - { - result = lappend(result, te); - } - } - - return result; -} - -/* - * Find 'TABLEOID_STR%u' attributes that were manually - * created for partitioned tables and replace Oids - * (used for '%u') with expected rc->rowmarkIds - */ -static void -lock_rows_visitor(Plan *plan, void *context) -{ - List *rtable = (List *) context; - LockRows *lock_rows = (LockRows *) plan; - Plan *lock_child = outerPlan(plan); - List *tableoids; - ListCell *lc; - - if (!IsA(lock_rows, LockRows)) - return; - - Assert(rtable && IsA(rtable, List) && lock_child); - - /* Select tableoid attributes that must be renamed */ - tableoids = get_tableoids_list(lock_child->targetlist); - if (!tableoids) - return; /* this LockRows has nothing to do with partitioned table */ - - foreach (lc, lock_rows->rowMarks) - { - PlanRowMark *rc = (PlanRowMark *) lfirst(lc); - Oid parent_oid = getrelid(rc->rti, rtable); - ListCell *mark_lc; - List *finished_tes = NIL; /* postprocessed target entries */ - - foreach (mark_lc, tableoids) - { - TargetEntry *te = (TargetEntry *) lfirst(mark_lc); - const char *cur_oid_str = &(te->resname[TABLEOID_STR_BASE_LEN]); - Datum cur_oid_datum; - - cur_oid_datum = DirectFunctionCall1(oidin, CStringGetDatum(cur_oid_str)); - - if (DatumGetObjectId(cur_oid_datum) == parent_oid) - { - char resname[64]; - - /* Replace 'TABLEOID_STR:Oid' with 'tableoid:rowmarkId' */ - snprintf(resname, sizeof(resname), "tableoid%u", rc->rowmarkId); - te->resname = pstrdup(resname); - - finished_tes = lappend(finished_tes, te); - } - } - - /* Remove target entries that have been processed in this step */ - foreach (mark_lc, finished_tes) - tableoids = list_delete_ptr(tableoids, lfirst(mark_lc)); - - if (list_length(tableoids) == 0) - break; /* nothing to do */ - } -} - - #endif diff --git a/src/hooks.c b/src/hooks.c index 884cea09..d0da940e 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -636,9 +636,6 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) if (pathman_ready) { - /* Give rowmark-related attributes correct names */ - ExecuteForPlanTree(result, postprocess_lock_rows); - /* Add PartitionFilter node for INSERT queries */ ExecuteForPlanTree(result, add_partition_filters); diff --git a/src/include/compat/rowmarks_fix.h b/src/include/compat/rowmarks_fix.h index 8bbd2b1d..4875358e 100644 --- a/src/include/compat/rowmarks_fix.h +++ b/src/include/compat/rowmarks_fix.h @@ -24,9 +24,6 @@ void append_tle_for_rowmark(PlannerInfo *root, PlanRowMark *rc); -#define postprocess_lock_rows(rtable, plan) ( (void) true ) -#define rowmark_add_tableoids(parse) ( (void) true ) - #else /* @@ -35,16 +32,13 @@ void append_tle_for_rowmark(PlannerInfo *root, PlanRowMark *rc); * This is absolutely crucial for UPDATE and DELETE queries, * so we had to add some special fixes for 9.5: * - * 1) provide legacy code for RowMarks (tableoids); - * 2) disable dangerous UPDATE & DELETE optimizations. + * 1) disable dangerous UPDATE & DELETE optimizations. + * 2) disable optimizations for SELECT .. FOR UPDATE etc. */ #define LEGACY_ROWMARKS_95 #define append_tle_for_rowmark(root, rc) ( (void) true ) -void postprocess_lock_rows(List *rtable, Plan *plan); -void rowmark_add_tableoids(Query *parse); - #endif diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 1163197b..b601e307 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -246,7 +246,6 @@ pathman_transform_query_walker(Node *node, void *context) assign_query_id(query); /* Apply Query tree modifiers */ - rowmark_add_tableoids(query); disable_standard_inheritance(query, current_context); handle_modification_query(query, current_context); @@ -311,6 +310,12 @@ disable_standard_inheritance(Query *parse, transform_query_cxt *context) { const PartRelationInfo *prel; +#ifdef LEGACY_ROWMARKS_95 + /* Don't process queries with RowMarks on 9.5 */ + if (get_parse_rowmark(parse, current_rti)) + continue; +#endif + /* Proceed if table is partitioned by pg_pathman */ if ((prel = get_pathman_relation_info(rte->relid)) != NULL) { From 1b3dbb9855fcea9dedb71371ff445ad5a60c9869 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 9 Oct 2017 17:03:55 +0300 Subject: [PATCH 165/528] more tests for RowMarks --- expected/pathman_rowmarks.out | 15 +++++++++++++++ expected/pathman_rowmarks_1.out | 15 +++++++++++++++ sql/pathman_rowmarks.sql | 7 +++++++ 3 files changed, 37 insertions(+) diff --git a/expected/pathman_rowmarks.out b/expected/pathman_rowmarks.out index 52fd3347..4c399e85 100644 --- a/expected/pathman_rowmarks.out +++ b/expected/pathman_rowmarks.out @@ -216,6 +216,21 @@ FOR UPDATE; 10 (10 rows) +/* ONLY (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY rowmarks.first FOR SHARE; + QUERY PLAN +------------------------- + LockRows + -> Seq Scan on first +(2 rows) + +/* ONLY (execution) */ +SELECT * FROM ONLY rowmarks.first FOR SHARE; + id +---- +(0 rows) + /* Check updates (plan) */ SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ diff --git a/expected/pathman_rowmarks_1.out b/expected/pathman_rowmarks_1.out index bd21d42f..28d3f27d 100644 --- a/expected/pathman_rowmarks_1.out +++ b/expected/pathman_rowmarks_1.out @@ -221,6 +221,21 @@ FOR UPDATE; 10 (10 rows) +/* ONLY (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY rowmarks.first FOR SHARE; + QUERY PLAN +------------------------- + LockRows + -> Seq Scan on first +(2 rows) + +/* ONLY (execution) */ +SELECT * FROM ONLY rowmarks.first FOR SHARE; + id +---- +(0 rows) + /* Check updates (plan) */ SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ diff --git a/sql/pathman_rowmarks.sql b/sql/pathman_rowmarks.sql index a95fbe84..9864b8b9 100644 --- a/sql/pathman_rowmarks.sql +++ b/sql/pathman_rowmarks.sql @@ -81,6 +81,13 @@ JOIN rowmarks.second USING(id) ORDER BY id FOR UPDATE; +/* ONLY (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY rowmarks.first FOR SHARE; + +/* ONLY (execution) */ +SELECT * FROM ONLY rowmarks.first FOR SHARE; + /* Check updates (plan) */ SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ From 48c8526aef5f1e37ff3697d8dd53c13b0aebe38e Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Mon, 9 Oct 2017 19:25:18 +0300 Subject: [PATCH 166/528] there is no CreateStmt.partition_info in PostgresPro Enterprise since version 10 --- src/partition_creation.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index c432b2fd..f8cf8a55 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -757,7 +757,7 @@ create_single_partition_internal(Oid parent_relid, create_stmt.partbound = NULL; create_stmt.partspec = NULL; #endif -#if defined(PGPRO_EE) && PG_VERSION_NUM >= 90600 +#if defined(PGPRO_EE) && PG_VERSION_NUM < 100000 create_stmt.partition_info = NULL; #endif From cd6db20d3988c801c0a3619fc3cd565324faca20 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 9 Oct 2017 19:43:16 +0300 Subject: [PATCH 167/528] fix prunning in CTEs on PostgreSQL 9.5 --- expected/pathman_upd_del.out | 49 ++++++++++++++++ expected/pathman_upd_del_1.out | 99 +++++++++++++++++++++++++++------ sql/pathman_upd_del.sql | 23 ++++++++ src/planner_tree_modification.c | 49 ++++++++++++---- 4 files changed, 193 insertions(+), 27 deletions(-) diff --git a/expected/pathman_upd_del.out b/expected/pathman_upd_del.out index 863418b3..935b65b4 100644 --- a/expected/pathman_upd_del.out +++ b/expected/pathman_upd_del.out @@ -404,6 +404,55 @@ WITH q AS (DELETE FROM test.tmp t RETURNING *) DELETE FROM test.tmp USING q; ROLLBACK; +/* Test special rule for CTE; Nested CTEs (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (WITH n AS (SELECT id FROM test.tmp2 WHERE id = 2) + DELETE FROM test.tmp t + USING n + WHERE t.id = n.id + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +-------------------------------------------- + Delete on tmp + CTE q + -> Delete on tmp t + CTE n + -> Append + -> Seq Scan on tmp2_2 + Filter: (id = 2) + -> Nested Loop + Join Filter: (t.id = n.id) + -> Seq Scan on tmp t + -> CTE Scan on n + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(14 rows) + +/* Test special rule for CTE; CTE in quals (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); + QUERY PLAN +------------------------------------ + Delete on tmp t + CTE q + -> Append + -> Seq Scan on tmp2_1 + -> Seq Scan on tmp2_2 + -> Nested Loop Semi Join + Join Filter: (t.id = q.id) + -> Seq Scan on tmp t + -> CTE Scan on q +(9 rows) + +BEGIN; +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); +ROLLBACK; DROP SCHEMA test CASCADE; NOTICE: drop cascades to 27 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_upd_del_1.out b/expected/pathman_upd_del_1.out index cce19b10..d0022855 100644 --- a/expected/pathman_upd_del_1.out +++ b/expected/pathman_upd_del_1.out @@ -229,23 +229,41 @@ USING (SELECT * JOIN test.tmp2 a2 USING(id)) t WHERE t.id = r.id; - QUERY PLAN ---------------------------------------------- + QUERY PLAN +--------------------------------------------------------- Delete on tmp r - -> Merge Join - Merge Cond: (a1.id = a2.id) - -> Merge Join - Merge Cond: (r.id = a1.id) - -> Sort - Sort Key: r.id - -> Seq Scan on tmp r - -> Sort - Sort Key: a1.id - -> Seq Scan on tmp2 a1 - -> Sort - Sort Key: a2.id - -> Seq Scan on tmp2 a2 -(14 rows) + -> Nested Loop + Join Filter: (a1.id = a2.id) + -> Nested Loop + Join Filter: (r.id = a1.id) + -> Seq Scan on tmp r + -> Materialize + -> Append + -> Seq Scan on tmp2 a1 + -> Seq Scan on tmp2_1 a1_1 + -> Seq Scan on tmp2_2 a1_2 + -> Seq Scan on tmp2_3 a1_3 + -> Seq Scan on tmp2_4 a1_4 + -> Seq Scan on tmp2_5 a1_5 + -> Seq Scan on tmp2_6 a1_6 + -> Seq Scan on tmp2_7 a1_7 + -> Seq Scan on tmp2_8 a1_8 + -> Seq Scan on tmp2_9 a1_9 + -> Seq Scan on tmp2_10 a1_10 + -> Materialize + -> Append + -> Seq Scan on tmp2 a2 + -> Seq Scan on tmp2_1 a2_1 + -> Seq Scan on tmp2_2 a2_2 + -> Seq Scan on tmp2_3 a2_3 + -> Seq Scan on tmp2_4 a2_4 + -> Seq Scan on tmp2_5 a2_5 + -> Seq Scan on tmp2_6 a2_6 + -> Seq Scan on tmp2_7 a2_7 + -> Seq Scan on tmp2_8 a2_8 + -> Seq Scan on tmp2_9 a2_9 + -> Seq Scan on tmp2_10 a2_10 +(32 rows) BEGIN; DELETE FROM test.tmp r @@ -386,6 +404,55 @@ WITH q AS (DELETE FROM test.tmp t RETURNING *) DELETE FROM test.tmp USING q; ROLLBACK; +/* Test special rule for CTE; Nested CTEs (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (WITH n AS (SELECT id FROM test.tmp2 WHERE id = 2) + DELETE FROM test.tmp t + USING n + WHERE t.id = n.id + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +-------------------------------------------- + Delete on tmp + CTE q + -> Delete on tmp t + CTE n + -> Append + -> Seq Scan on tmp2_2 + Filter: (id = 2) + -> Nested Loop + Join Filter: (t.id = n.id) + -> Seq Scan on tmp t + -> CTE Scan on n + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(14 rows) + +/* Test special rule for CTE; CTE in quals (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); + QUERY PLAN +------------------------------------ + Delete on tmp t + CTE q + -> Append + -> Seq Scan on tmp2_1 + -> Seq Scan on tmp2_2 + -> Nested Loop Semi Join + Join Filter: (t.id = q.id) + -> Seq Scan on tmp t + -> CTE Scan on q +(9 rows) + +BEGIN; +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); +ROLLBACK; DROP SCHEMA test CASCADE; NOTICE: drop cascades to 27 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_upd_del.sql b/sql/pathman_upd_del.sql index 034f942a..adca1e4c 100644 --- a/sql/pathman_upd_del.sql +++ b/sql/pathman_upd_del.sql @@ -243,6 +243,29 @@ DELETE FROM test.tmp USING q; ROLLBACK; +/* Test special rule for CTE; Nested CTEs (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (WITH n AS (SELECT id FROM test.tmp2 WHERE id = 2) + DELETE FROM test.tmp t + USING n + WHERE t.id = n.id + RETURNING *) +DELETE FROM test.tmp USING q; + + +/* Test special rule for CTE; CTE in quals (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); + +BEGIN; +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); +ROLLBACK; + + DROP SCHEMA test CASCADE; DROP EXTENSION pg_pathman CASCADE; diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index b601e307..77540d95 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -62,19 +62,36 @@ (context)->TRANSFORM_CONTEXT_FIELD(command_type) = true; \ break; \ +#define TRANSFORM_CONTEXT_QUERY_IS_CTE_CTE(context, query) \ + ( (context)->parent_cte && \ + (context)->parent_cte->ctequery == (Node *) (query) ) + +#define TRANSFORM_CONTEXT_QUERY_IS_CTE_SL(context, query) \ + ( (context)->parent_sublink && \ + (context)->parent_sublink->subselect == (Node *) (query) && \ + (context)->parent_sublink->subLinkType == CTE_SUBLINK ) + +/* Check if 'query' is CTE according to 'context' */ +#define TRANSFORM_CONTEXT_QUERY_IS_CTE(context, query) \ + ( TRANSFORM_CONTEXT_QUERY_IS_CTE_CTE((context), (query)) || \ + TRANSFORM_CONTEXT_QUERY_IS_CTE_SL ((context), (query)) ) + typedef struct { /* Do we have a parent CmdType query? */ - bool TRANSFORM_CONTEXT_FIELD(SELECT), - TRANSFORM_CONTEXT_FIELD(INSERT), - TRANSFORM_CONTEXT_FIELD(UPDATE), - TRANSFORM_CONTEXT_FIELD(DELETE); + bool TRANSFORM_CONTEXT_FIELD(SELECT), + TRANSFORM_CONTEXT_FIELD(INSERT), + TRANSFORM_CONTEXT_FIELD(UPDATE), + TRANSFORM_CONTEXT_FIELD(DELETE); /* Parameters for handle_modification_query() */ - ParamListInfo query_params; + ParamListInfo query_params; /* SubLink that might contain an examined query */ - SubLink *parent_sublink; + SubLink *parent_sublink; + + /* CommonTableExpr that might containt an examined query */ + CommonTableExpr *parent_cte; } transform_query_cxt; @@ -208,14 +225,24 @@ pathman_transform_query_walker(Node *node, void *context) if (node == NULL) return false; - else if (IsA(node, SubLink)) + else if (IsA(node, SubLink) || IsA(node, CommonTableExpr)) { transform_query_cxt *current_context = context, next_context; /* Initialize next context for bottom subqueries */ next_context = *current_context; - next_context.parent_sublink = (SubLink *) node; + + if (IsA(node, SubLink)) + { + next_context.parent_sublink = (SubLink *) node; + next_context.parent_cte = NULL; + } + else + { + next_context.parent_sublink = NULL; + next_context.parent_cte = (CommonTableExpr *) node; + } /* Handle expression subtree */ return expression_tree_walker(node, @@ -241,6 +268,8 @@ pathman_transform_query_walker(Node *node, void *context) default: break; } + next_context.parent_sublink = NULL; + next_context.parent_cte = NULL; /* Assign Query a 'queryId' */ assign_query_id(query); @@ -284,9 +313,7 @@ disable_standard_inheritance(Query *parse, transform_query_cxt *context) /* Don't process queries under UPDATE or DELETE (except for CTEs) */ if ((TRANSFORM_CONTEXT_HAS_PARENT(context, UPDATE) || TRANSFORM_CONTEXT_HAS_PARENT(context, DELETE)) && - (context->parent_sublink && - context->parent_sublink->subselect == (Node *) parse && - context->parent_sublink->subLinkType != CTE_SUBLINK)) + !TRANSFORM_CONTEXT_QUERY_IS_CTE(context, parse)) return; #endif From bd2e71c47df5eff3ea2adb6516d7971558164761 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 9 Oct 2017 20:05:45 +0300 Subject: [PATCH 168/528] add more tests for UPDATE/DELETE on PostgreSQL 9.5 (issue #77) --- Makefile | 3 +- expected/pathman_views.out | 107 ++++++++++++++++++++++++++ expected/pathman_views_1.out | 143 +++++++++++++++++++++++++++++++++++ sql/pathman_views.sql | 63 +++++++++++++++ 4 files changed, 315 insertions(+), 1 deletion(-) create mode 100644 expected/pathman_views.out create mode 100644 expected/pathman_views_1.out create mode 100644 sql/pathman_views.sql diff --git a/Makefile b/Makefile index 4a94480e..9e036208 100644 --- a/Makefile +++ b/Makefile @@ -53,7 +53,8 @@ REGRESS = pathman_array_qual \ pathman_runtime_nodes \ pathman_update_trigger \ pathman_upd_del \ - pathman_utility_stmt + pathman_utility_stmt \ + pathman_views EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add diff --git a/expected/pathman_views.out b/expected/pathman_views.out new file mode 100644 index 00000000..8d433b89 --- /dev/null +++ b/expected/pathman_views.out @@ -0,0 +1,107 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA views; +/* create a partitioned table */ +create table views._abc(id int4 not null); +select create_hash_partitions('views._abc', 'id', 10); + create_hash_partitions +------------------------ + 10 +(1 row) + +insert into views._abc select generate_series(1, 100); +/* create a facade view */ +create view views.abc as select * from views._abc; +create or replace function views.disable_modification() +returns trigger as +$$ +BEGIN + RAISE EXCEPTION '%', TG_OP; + RETURN NULL; +END; +$$ +language 'plpgsql'; +create trigger abc_mod_tr +instead of insert or update or delete +on views.abc for each row +execute procedure views.disable_modification(); +/* Test SELECT */ +explain (costs off) select * from views.abc; + QUERY PLAN +-------------------------- + Append + -> Seq Scan on _abc_0 + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 +(11 rows) + +explain (costs off) select * from views.abc where id = 1; + QUERY PLAN +-------------------------- + Append + -> Seq Scan on _abc_0 + Filter: (id = 1) +(3 rows) + +select count (*) from views.abc; + count +------- + 100 +(1 row) + +/* Test INSERT */ +explain (costs off) insert into views.abc values (1); + QUERY PLAN +--------------- + Insert on abc + -> Result +(2 rows) + +insert into views.abc values (1); +ERROR: INSERT +/* Test UPDATE */ +explain (costs off) update views.abc set id = 2 where id = 1 or id = 2; + QUERY PLAN +-------------------------------------- + Update on abc + -> Result + -> Append + -> Seq Scan on _abc_0 + Filter: (id = 1) + -> Seq Scan on _abc_6 + Filter: (id = 2) +(7 rows) + +update views.abc set id = 2 where id = 1 or id = 2; +ERROR: UPDATE +/* Test DELETE */ +explain (costs off) delete from views.abc where id = 1 or id = 2; + QUERY PLAN +-------------------------------------- + Delete on abc + -> Result + -> Append + -> Seq Scan on _abc_0 + Filter: (id = 1) + -> Seq Scan on _abc_6 + Filter: (id = 2) +(7 rows) + +delete from views.abc where id = 1 or id = 2; +ERROR: DELETE +DROP SCHEMA views CASCADE; +NOTICE: drop cascades to 13 other objects +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_views_1.out b/expected/pathman_views_1.out new file mode 100644 index 00000000..a0cdfda1 --- /dev/null +++ b/expected/pathman_views_1.out @@ -0,0 +1,143 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA views; +/* create a partitioned table */ +create table views._abc(id int4 not null); +select create_hash_partitions('views._abc', 'id', 10); + create_hash_partitions +------------------------ + 10 +(1 row) + +insert into views._abc select generate_series(1, 100); +/* create a facade view */ +create view views.abc as select * from views._abc; +create or replace function views.disable_modification() +returns trigger as +$$ +BEGIN + RAISE EXCEPTION '%', TG_OP; + RETURN NULL; +END; +$$ +language 'plpgsql'; +create trigger abc_mod_tr +instead of insert or update or delete +on views.abc for each row +execute procedure views.disable_modification(); +/* Test SELECT */ +explain (costs off) select * from views.abc; + QUERY PLAN +-------------------------- + Append + -> Seq Scan on _abc_0 + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 +(11 rows) + +explain (costs off) select * from views.abc where id = 1; + QUERY PLAN +-------------------------- + Append + -> Seq Scan on _abc_0 + Filter: (id = 1) +(3 rows) + +select count (*) from views.abc; + count +------- + 100 +(1 row) + +/* Test INSERT */ +explain (costs off) insert into views.abc values (1); + QUERY PLAN +--------------- + Insert on abc + -> Result +(2 rows) + +insert into views.abc values (1); +ERROR: INSERT +/* Test UPDATE */ +explain (costs off) update views.abc set id = 2 where id = 1 or id = 2; + QUERY PLAN +---------------------------------------------------- + Update on abc + -> Result + -> Append + -> Seq Scan on _abc + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_0 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_1 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_2 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_3 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_4 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_5 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_6 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_7 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_8 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_9 + Filter: ((id = 1) OR (id = 2)) +(25 rows) + +update views.abc set id = 2 where id = 1 or id = 2; +ERROR: UPDATE +/* Test DELETE */ +explain (costs off) delete from views.abc where id = 1 or id = 2; + QUERY PLAN +---------------------------------------------------- + Delete on abc + -> Result + -> Append + -> Seq Scan on _abc + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_0 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_1 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_2 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_3 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_4 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_5 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_6 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_7 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_8 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_9 + Filter: ((id = 1) OR (id = 2)) +(25 rows) + +delete from views.abc where id = 1 or id = 2; +ERROR: DELETE +DROP SCHEMA views CASCADE; +NOTICE: drop cascades to 13 other objects +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_views.sql b/sql/pathman_views.sql new file mode 100644 index 00000000..6fb2989f --- /dev/null +++ b/sql/pathman_views.sql @@ -0,0 +1,63 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + */ + +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA views; + + + +/* create a partitioned table */ +create table views._abc(id int4 not null); +select create_hash_partitions('views._abc', 'id', 10); +insert into views._abc select generate_series(1, 100); + + +/* create a facade view */ +create view views.abc as select * from views._abc; + +create or replace function views.disable_modification() +returns trigger as +$$ +BEGIN + RAISE EXCEPTION '%', TG_OP; + RETURN NULL; +END; +$$ +language 'plpgsql'; + +create trigger abc_mod_tr +instead of insert or update or delete +on views.abc for each row +execute procedure views.disable_modification(); + + +/* Test SELECT */ +explain (costs off) select * from views.abc; +explain (costs off) select * from views.abc where id = 1; +select count (*) from views.abc; + + +/* Test INSERT */ +explain (costs off) insert into views.abc values (1); +insert into views.abc values (1); + + +/* Test UPDATE */ +explain (costs off) update views.abc set id = 2 where id = 1 or id = 2; +update views.abc set id = 2 where id = 1 or id = 2; + + +/* Test DELETE */ +explain (costs off) delete from views.abc where id = 1 or id = 2; +delete from views.abc where id = 1 or id = 2; + + + +DROP SCHEMA views CASCADE; +DROP EXTENSION pg_pathman; From 55e6dfd7f60a11d4956519b57e926e49c49b4678 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 9 Oct 2017 20:20:21 +0300 Subject: [PATCH 169/528] execute SELECT FOR UPDATE on view (pathman_views) --- expected/pathman_views.out | 15 +++++++++++++++ expected/pathman_views_1.out | 35 +++++++++++++++++++++++++++++++++++ sql/pathman_views.sql | 2 ++ 3 files changed, 52 insertions(+) diff --git a/expected/pathman_views.out b/expected/pathman_views.out index 8d433b89..2341919a 100644 --- a/expected/pathman_views.out +++ b/expected/pathman_views.out @@ -56,6 +56,21 @@ explain (costs off) select * from views.abc where id = 1; Filter: (id = 1) (3 rows) +explain (costs off) select * from views.abc where id = 1 for update; + QUERY PLAN +-------------------------------- + LockRows + -> Append + -> Seq Scan on _abc_0 + Filter: (id = 1) +(4 rows) + +select * from views.abc where id = 1 for update; + id +---- + 1 +(1 row) + select count (*) from views.abc; count ------- diff --git a/expected/pathman_views_1.out b/expected/pathman_views_1.out index a0cdfda1..fdf19f28 100644 --- a/expected/pathman_views_1.out +++ b/expected/pathman_views_1.out @@ -56,6 +56,41 @@ explain (costs off) select * from views.abc where id = 1; Filter: (id = 1) (3 rows) +explain (costs off) select * from views.abc where id = 1 for update; + QUERY PLAN +-------------------------------- + LockRows + -> Append + -> Seq Scan on _abc + Filter: (id = 1) + -> Seq Scan on _abc_0 + Filter: (id = 1) + -> Seq Scan on _abc_1 + Filter: (id = 1) + -> Seq Scan on _abc_2 + Filter: (id = 1) + -> Seq Scan on _abc_3 + Filter: (id = 1) + -> Seq Scan on _abc_4 + Filter: (id = 1) + -> Seq Scan on _abc_5 + Filter: (id = 1) + -> Seq Scan on _abc_6 + Filter: (id = 1) + -> Seq Scan on _abc_7 + Filter: (id = 1) + -> Seq Scan on _abc_8 + Filter: (id = 1) + -> Seq Scan on _abc_9 + Filter: (id = 1) +(24 rows) + +select * from views.abc where id = 1 for update; + id +---- + 1 +(1 row) + select count (*) from views.abc; count ------- diff --git a/sql/pathman_views.sql b/sql/pathman_views.sql index 6fb2989f..90118fe0 100644 --- a/sql/pathman_views.sql +++ b/sql/pathman_views.sql @@ -40,6 +40,8 @@ execute procedure views.disable_modification(); /* Test SELECT */ explain (costs off) select * from views.abc; explain (costs off) select * from views.abc where id = 1; +explain (costs off) select * from views.abc where id = 1 for update; +select * from views.abc where id = 1 for update; select count (*) from views.abc; From fda1b36d1869dfe0ed643c1f8aef8323e9b1cf3c Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 10 Oct 2017 15:50:33 +0300 Subject: [PATCH 170/528] rename auto naming constraint if parent is renamed, take AccessShareLock on naming sequence when choosing a name for partition --- expected/pathman_utility_stmt.out | 58 +++++++++++++++- sql/pathman_utility_stmt.sql | 24 ++++++- src/hooks.c | 12 +++- src/include/init.h | 3 +- src/include/utility_stmt_hooking.h | 8 ++- src/init.c | 16 ++++- src/partition_creation.c | 31 ++++++--- src/pl_funcs.c | 2 +- src/pl_range_funcs.c | 4 +- src/utility_stmt_hooking.c | 107 ++++++++++++++++++++++------- 10 files changed, 216 insertions(+), 49 deletions(-) diff --git a/expected/pathman_utility_stmt.out b/expected/pathman_utility_stmt.out index b8d8ad31..95c64f58 100644 --- a/expected/pathman_utility_stmt.out +++ b/expected/pathman_utility_stmt.out @@ -285,6 +285,58 @@ NOTICE: drop cascades to 797 other objects * Test auto check constraint renaming */ CREATE SCHEMA rename; +/* + * Check that auto naming sequence is renamed + */ +CREATE TABLE rename.parent(id int not null); +SELECT create_range_partitions('rename.parent', 'id', 1, 2, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT 'rename.parent'::regclass; /* parent is OK */ + regclass +--------------- + rename.parent +(1 row) + +SELECT 'rename.parent_seq'::regclass; /* sequence is OK */ + regclass +------------------- + rename.parent_seq +(1 row) + +ALTER TABLE rename.parent RENAME TO parent_renamed; +SELECT 'rename.parent_renamed'::regclass; /* parent is OK */ + regclass +----------------------- + rename.parent_renamed +(1 row) + +SELECT 'rename.parent_renamed_seq'::regclass; /* sequence is OK */ + regclass +--------------------------- + rename.parent_renamed_seq +(1 row) + +SELECT append_range_partition('rename.parent_renamed'); /* can append */ + append_range_partition +------------------------- + rename.parent_renamed_3 +(1 row) + +DROP SEQUENCE rename.parent_renamed_seq; +ALTER TABLE rename.parent_renamed RENAME TO parent; +SELECT 'rename.parent'::regclass; /* parent is OK */ + regclass +--------------- + rename.parent +(1 row) + +/* + * Check that partitioning constraints are renamed + */ CREATE TABLE rename.test(a serial, b int); SELECT create_hash_partitions('rename.test', 'a', 3); create_hash_partitions @@ -336,7 +388,9 @@ WHERE r.conrelid = 'rename.test_inh_one'::regclass AND r.contype = 'c'; pathman_test_inh_1_check | CHECK (a < 100) (1 row) -/* Check that plain tables are not affected too */ +/* + * Check that plain tables are not affected too + */ CREATE TABLE rename.plain_test(a serial, b int); ALTER TABLE rename.plain_test RENAME TO plain_test_renamed; SELECT add_constraint('rename.plain_test_renamed'); @@ -365,7 +419,7 @@ WHERE r.conrelid = 'rename.plain_test'::regclass AND r.contype = 'c'; (1 row) DROP SCHEMA rename CASCADE; -NOTICE: drop cascades to 7 other objects +NOTICE: drop cascades to 11 other objects /* * Test DROP INDEX CONCURRENTLY (test snapshots) */ diff --git a/sql/pathman_utility_stmt.sql b/sql/pathman_utility_stmt.sql index a0d4ae0e..62636f00 100644 --- a/sql/pathman_utility_stmt.sql +++ b/sql/pathman_utility_stmt.sql @@ -167,6 +167,25 @@ DROP SCHEMA copy_stmt_hooking CASCADE; */ CREATE SCHEMA rename; + +/* + * Check that auto naming sequence is renamed + */ +CREATE TABLE rename.parent(id int not null); +SELECT create_range_partitions('rename.parent', 'id', 1, 2, 2); +SELECT 'rename.parent'::regclass; /* parent is OK */ +SELECT 'rename.parent_seq'::regclass; /* sequence is OK */ +ALTER TABLE rename.parent RENAME TO parent_renamed; +SELECT 'rename.parent_renamed'::regclass; /* parent is OK */ +SELECT 'rename.parent_renamed_seq'::regclass; /* sequence is OK */ +SELECT append_range_partition('rename.parent_renamed'); /* can append */ +DROP SEQUENCE rename.parent_renamed_seq; +ALTER TABLE rename.parent_renamed RENAME TO parent; +SELECT 'rename.parent'::regclass; /* parent is OK */ + +/* + * Check that partitioning constraints are renamed + */ CREATE TABLE rename.test(a serial, b int); SELECT create_hash_partitions('rename.test', 'a', 3); ALTER TABLE rename.test_0 RENAME TO test_one; @@ -201,7 +220,9 @@ SELECT r.conname, pg_get_constraintdef(r.oid, true) FROM pg_constraint r WHERE r.conrelid = 'rename.test_inh_one'::regclass AND r.contype = 'c'; -/* Check that plain tables are not affected too */ +/* + * Check that plain tables are not affected too + */ CREATE TABLE rename.plain_test(a serial, b int); ALTER TABLE rename.plain_test RENAME TO plain_test_renamed; SELECT add_constraint('rename.plain_test_renamed'); @@ -216,6 +237,7 @@ SELECT r.conname, pg_get_constraintdef(r.oid, true) FROM pg_constraint r WHERE r.conrelid = 'rename.plain_test'::regclass AND r.contype = 'c'; + DROP SCHEMA rename CASCADE; diff --git a/src/hooks.c b/src/hooks.c index d0da940e..690c398a 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -874,6 +874,7 @@ pathman_process_utility_hook(Node *first_arg, Oid relation_oid; PartType part_type; AttrNumber attr_number; + bool is_parent; /* Override standard COPY statement if needed */ if (is_pathman_related_copy(parsetree)) @@ -892,10 +893,15 @@ pathman_process_utility_hook(Node *first_arg, /* Override standard RENAME statement if needed */ else if (is_pathman_related_table_rename(parsetree, - &relation_oid)) + &relation_oid, + &is_parent)) { - PathmanRenameConstraint(relation_oid, - (const RenameStmt *) parsetree); + const RenameStmt *rename_stmt = (const RenameStmt *) parsetree; + + if (is_parent) + PathmanRenameSequence(relation_oid, rename_stmt); + else + PathmanRenameConstraint(relation_oid, rename_stmt); } /* Override standard ALTER COLUMN TYPE statement if needed */ diff --git a/src/include/init.h b/src/include/init.h index 8addc1f4..8aea9295 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -201,7 +201,8 @@ find_children_status find_inheritance_children_array(Oid parentrelId, char *build_check_constraint_name_relid_internal(Oid relid); char *build_check_constraint_name_relname_internal(const char *relname); -char *build_sequence_name_internal(Oid relid); +char *build_sequence_name_relid_internal(Oid relid); +char *build_sequence_name_relname_internal(const char *relname); char *build_update_trigger_name_internal(Oid relid); char *build_update_trigger_func_name_internal(Oid relid); diff --git a/src/include/utility_stmt_hooking.h b/src/include/utility_stmt_hooking.h index 6b45cde3..cc22efaf 100644 --- a/src/include/utility_stmt_hooking.h +++ b/src/include/utility_stmt_hooking.h @@ -23,7 +23,8 @@ /* Various traits */ bool is_pathman_related_copy(Node *parsetree); bool is_pathman_related_table_rename(Node *parsetree, - Oid *partition_relid_out); + Oid *relation_oid_out, + bool *is_parent_out); bool is_pathman_related_alter_column_type(Node *parsetree, Oid *parent_relid_out, AttrNumber *attr_number, @@ -32,8 +33,9 @@ bool is_pathman_related_alter_column_type(Node *parsetree, /* Statement handlers */ void PathmanDoCopy(const CopyStmt *stmt, const char *queryString, int stmt_location, int stmt_len, uint64 *processed); -void PathmanRenameConstraint(Oid partition_relid, - const RenameStmt *partition_rename_stmt); + +void PathmanRenameConstraint(Oid partition_relid, const RenameStmt *rename_stmt); +void PathmanRenameSequence(Oid parent_relid, const RenameStmt *rename_stmt); #endif /* COPY_STMT_HOOKING_H */ diff --git a/src/init.c b/src/init.c index 3219b1fa..3729bd16 100644 --- a/src/init.c +++ b/src/init.c @@ -565,6 +565,7 @@ build_check_constraint_name_relid_internal(Oid relid) char * build_check_constraint_name_relname_internal(const char *relname) { + AssertArg(relname != NULL); return psprintf("pathman_%s_check", relname); } @@ -573,10 +574,21 @@ build_check_constraint_name_relname_internal(const char *relname) * NOTE: this function does not perform sanity checks at all. */ char * -build_sequence_name_internal(Oid relid) +build_sequence_name_relid_internal(Oid relid) { AssertArg(OidIsValid(relid)); - return psprintf("%s_seq", get_rel_name(relid)); + return build_sequence_name_relname_internal(get_rel_name(relid)); +} + +/* + * Generate part sequence name for a parent. + * NOTE: this function does not perform sanity checks at all. + */ +char * +build_sequence_name_relname_internal(const char *relname) +{ + AssertArg(relname != NULL); + return psprintf("%s_seq", relname); } /* diff --git a/src/partition_creation.c b/src/partition_creation.c index f8cf8a55..20094a4f 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -600,22 +600,31 @@ spawn_partitions_val(Oid parent_relid, /* parent's Oid */ static char * choose_range_partition_name(Oid parent_relid, Oid parent_nsp) { - Datum part_num; - Oid part_seq_relid; - char *part_seq_relname; - Oid save_userid; - int save_sec_context; - bool need_priv_escalation = !superuser(); /* we might be a SU */ - char *relname; - int attempts_cnt = 1000; - - part_seq_relname = build_sequence_name_internal(parent_relid); - part_seq_relid = get_relname_relid(part_seq_relname, parent_nsp); + Datum part_num; + Oid part_seq_relid; + char *part_seq_nspname, + *part_seq_relname; + RangeVar *part_seq_rv; + Oid save_userid; + int save_sec_context; + bool need_priv_escalation = !superuser(); /* we might be a SU */ + char *relname; + int attempts_cnt = 1000; + + /* Dispatch sequence and lock it using AccessShareLock */ + part_seq_nspname = get_namespace_name(get_rel_namespace(parent_relid)); + part_seq_relname = build_sequence_name_relid_internal(parent_relid); + part_seq_rv = makeRangeVar(part_seq_nspname, part_seq_relname, -1); + part_seq_relid = RangeVarGetRelid(part_seq_rv, AccessShareLock, true); /* Could not find part number generating sequence */ if (!OidIsValid(part_seq_relid)) elog(ERROR, "auto naming sequence \"%s\" does not exist", part_seq_relname); + pfree(part_seq_nspname); + pfree(part_seq_relname); + pfree(part_seq_rv); + /* Do we have to escalate privileges? */ if (need_priv_escalation) { diff --git a/src/pl_funcs.c b/src/pl_funcs.c index bb66506d..175d36de 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -901,7 +901,7 @@ add_to_pathman_config(PG_FUNCTION_ARGS) Oid naming_seq; naming_seq_rv = makeRangeVar(get_namespace_name(get_rel_namespace(relid)), - build_sequence_name_internal(relid), + build_sequence_name_relid_internal(relid), -1); naming_seq = RangeVarGetRelid(naming_seq_rv, AccessShareLock, true); diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 91452ba9..1b8b2ade 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -589,16 +589,18 @@ build_sequence_name(PG_FUNCTION_ARGS) { Oid parent_relid = PG_GETARG_OID(0); Oid parent_nsp; + char *seq_name; char *result; if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(parent_relid))) ereport(ERROR, (errmsg("relation \"%u\" does not exist", parent_relid))); parent_nsp = get_rel_namespace(parent_relid); + seq_name = build_sequence_name_relid_internal(parent_relid); result = psprintf("%s.%s", quote_identifier(get_namespace_name(parent_nsp)), - quote_identifier(build_sequence_name_internal(parent_relid))); + quote_identifier(seq_name)); PG_RETURN_TEXT_P(cstring_to_text(result)); } diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 31d39bc2..f24b9543 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -134,18 +134,19 @@ is_pathman_related_copy(Node *parsetree) */ bool is_pathman_related_table_rename(Node *parsetree, - Oid *partition_relid_out) /* ret value */ + Oid *relation_oid_out, /* ret value #1 */ + bool *is_parent_out) /* ret value #2 */ { RenameStmt *rename_stmt = (RenameStmt *) parsetree; - Oid partition_relid, + Oid relation_oid, parent_relid; - const PartRelationInfo *prel; PartParentSearch parent_search; + const PartRelationInfo *prel; Assert(IsPathmanReady()); /* Set default values */ - if (partition_relid_out) *partition_relid_out = InvalidOid; + if (relation_oid_out) *relation_oid_out = InvalidOid; if (!IsA(parsetree, RenameStmt)) return false; @@ -154,20 +155,33 @@ is_pathman_related_table_rename(Node *parsetree, if (rename_stmt->renameType != OBJECT_TABLE) return false; - /* Assume it's a partition, fetch its Oid */ - partition_relid = RangeVarGetRelid(rename_stmt->relation, - AccessShareLock, - false); + /* Fetch Oid of this relation */ + relation_oid = RangeVarGetRelid(rename_stmt->relation, + AccessShareLock, + false); + + /* Assume it's a parent */ + if (get_pathman_relation_info(relation_oid)) + { + if (relation_oid_out) + *relation_oid_out = relation_oid; + if (is_parent_out) + *is_parent_out = true; + return true; + } - /* Try fetching parent of this table */ - parent_relid = get_parent_of_partition(partition_relid, &parent_search); + /* Assume it's a partition, fetch its parent */ + parent_relid = get_parent_of_partition(relation_oid, &parent_search); if (parent_search != PPS_ENTRY_PART_PARENT) return false; /* Is parent partitioned? */ if ((prel = get_pathman_relation_info(parent_relid)) != NULL) { - if (partition_relid_out) *partition_relid_out = partition_relid; + if (relation_oid_out) + *relation_oid_out = relation_oid; + if (is_parent_out) + *is_parent_out = false; return true; } @@ -789,12 +803,12 @@ prepare_rri_for_copy(EState *estate, * Rename RANGE\HASH check constraint of a partition on table rename event. */ void -PathmanRenameConstraint(Oid partition_relid, /* cached partition Oid */ - const RenameStmt *part_rename_stmt) /* partition rename stmt */ +PathmanRenameConstraint(Oid partition_relid, /* partition Oid */ + const RenameStmt *rename_stmt) /* partition rename stmt */ { char *old_constraint_name, *new_constraint_name; - RenameStmt rename_stmt; + RenameStmt rename_con_stmt; /* Generate old constraint name */ old_constraint_name = @@ -802,16 +816,61 @@ PathmanRenameConstraint(Oid partition_relid, /* cached partition Oid */ /* Generate new constraint name */ new_constraint_name = - build_check_constraint_name_relname_internal(part_rename_stmt->newname); + build_check_constraint_name_relname_internal(rename_stmt->newname); /* Build check constraint RENAME statement */ - memset((void *) &rename_stmt, 0, sizeof(RenameStmt)); - NodeSetTag(&rename_stmt, T_RenameStmt); - rename_stmt.renameType = OBJECT_TABCONSTRAINT; - rename_stmt.relation = part_rename_stmt->relation; - rename_stmt.subname = old_constraint_name; - rename_stmt.newname = new_constraint_name; - rename_stmt.missing_ok = false; - - RenameConstraint(&rename_stmt); + memset((void *) &rename_con_stmt, 0, sizeof(RenameStmt)); + NodeSetTag(&rename_con_stmt, T_RenameStmt); + rename_con_stmt.renameType = OBJECT_TABCONSTRAINT; + rename_con_stmt.relation = rename_stmt->relation; + rename_con_stmt.subname = old_constraint_name; + rename_con_stmt.newname = new_constraint_name; + rename_con_stmt.missing_ok = false; + + /* Finally, rename partitioning constraint */ + RenameConstraint(&rename_con_stmt); + + pfree(old_constraint_name); + pfree(new_constraint_name); + + /* Make changes visible */ + CommandCounterIncrement(); +} + +/* + * Rename auto naming sequence of a parent on table rename event. + */ +void +PathmanRenameSequence(Oid parent_relid, /* parent Oid */ + const RenameStmt *rename_stmt) /* parent rename stmt */ +{ + char *old_seq_name, + *new_seq_name, + *seq_nsp_name; + RangeVar *seq_rv; + Oid seq_relid; + + /* Produce old & new names and RangeVar */ + seq_nsp_name = get_namespace_name(get_rel_namespace(parent_relid)); + old_seq_name = build_sequence_name_relid_internal(parent_relid); + new_seq_name = build_sequence_name_relname_internal(rename_stmt->newname); + seq_rv = makeRangeVar(seq_nsp_name, old_seq_name, -1); + + /* Fetch Oid of sequence */ + seq_relid = RangeVarGetRelid(seq_rv, AccessExclusiveLock, true); + + /* Do nothing if there's no naming sequence */ + if (!OidIsValid(seq_relid)) + return; + + /* Finally, rename auto naming sequence */ + RenameRelationInternal(seq_relid, new_seq_name, false); + + pfree(seq_nsp_name); + pfree(old_seq_name); + pfree(new_seq_name); + pfree(seq_rv); + + /* Make changes visible */ + CommandCounterIncrement(); } From f9c842e0a9228814f69cff2b6d36f99b3d439204 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 11 Oct 2017 15:59:33 +0300 Subject: [PATCH 171/528] make concurrent part worker more reliable, new tests --- expected/pathman_bgw.out | 77 ++++++++++++++++++++++++- sql/pathman_bgw.sql | 47 ++++++++++++++++ src/include/pathman_workers.h | 21 ++++++- src/pathman_workers.c | 103 ++++++++++++++++++---------------- 4 files changed, 197 insertions(+), 51 deletions(-) diff --git a/expected/pathman_bgw.out b/expected/pathman_bgw.out index 2356c1fc..f7136533 100644 --- a/expected/pathman_bgw.out +++ b/expected/pathman_bgw.out @@ -132,7 +132,7 @@ SELECT set_init_callback('test_bgw.test_5', 'test_bgw.abort_xact(jsonb)'); (1 row) INSERT INTO test_bgw.test_5 VALUES (-100); -ERROR: Attempt to spawn new partitions of relation "test_5" failed +ERROR: attempt to spawn new partitions of relation "test_5" failed SELECT * FROM pathman_partition_list ORDER BY partition; /* should contain 3 partitions */ parent | partition | parttype | expr | range_min | range_max -----------------+-------------------+----------+------+-----------+----------- @@ -143,5 +143,80 @@ SELECT * FROM pathman_partition_list ORDER BY partition; /* should contain 3 par DROP FUNCTION test_bgw.abort_xact(args JSONB); DROP TABLE test_bgw.test_5 CASCADE; NOTICE: drop cascades to 3 other objects +/* + * Tests for ConcurrentPartWorker + */ +CREATE TABLE test_bgw.conc_part(id INT4 NOT NULL); +INSERT INTO test_bgw.conc_part SELECT generate_series(1, 500); +SELECT create_hash_partitions('test_bgw.conc_part', 'id', 5, false); + create_hash_partitions +------------------------ + 5 +(1 row) + +BEGIN; +/* Also test FOR SHARE/UPDATE conflicts in BGW */ +SELECT * FROM test_bgw.conc_part ORDER BY id LIMIT 1 FOR SHARE; + id +---- + 1 +(1 row) + +/* Run partitioning bgworker */ +SELECT partition_table_concurrently('test_bgw.conc_part', 10, 1); +NOTICE: worker started, you can stop it with the following command: select public.stop_concurrent_part_task('conc_part'); + partition_table_concurrently +------------------------------ + +(1 row) + +/* Wait until bgworker starts */ +SELECT pg_sleep(1); + pg_sleep +---------- + +(1 row) + +ROLLBACK; +/* Wait until it finises */ +DO $$ +DECLARE + ops int8; +BEGIN + LOOP + SELECT count(*) + FROM pathman_concurrent_part_tasks + WHERE processed < 500 -- protect from endless loops + INTO ops; + + IF ops > 0 THEN + PERFORM pg_sleep(0.2); + ELSE + EXIT; + END IF; + END LOOP; +END +$$ LANGUAGE plpgsql; +/* Check amount of tasks and rows in parent and partitions */ +SELECT count(*) FROM pathman_concurrent_part_tasks; + count +------- + 0 +(1 row) + +SELECT count(*) FROM ONLY test_bgw.conc_part; + count +------- + 0 +(1 row) + +SELECT count(*) FROM test_bgw.conc_part; + count +------- + 500 +(1 row) + +DROP TABLE test_bgw.conc_part CASCADE; +NOTICE: drop cascades to 5 other objects DROP SCHEMA test_bgw CASCADE; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_bgw.sql b/sql/pathman_bgw.sql index 7eedaff2..1d8a0146 100644 --- a/sql/pathman_bgw.sql +++ b/sql/pathman_bgw.sql @@ -5,6 +5,7 @@ CREATE EXTENSION pg_pathman; CREATE SCHEMA test_bgw; + /* * Tests for SpawnPartitionsWorker */ @@ -74,5 +75,51 @@ DROP TABLE test_bgw.test_5 CASCADE; +/* + * Tests for ConcurrentPartWorker + */ + +CREATE TABLE test_bgw.conc_part(id INT4 NOT NULL); +INSERT INTO test_bgw.conc_part SELECT generate_series(1, 500); +SELECT create_hash_partitions('test_bgw.conc_part', 'id', 5, false); + +BEGIN; +/* Also test FOR SHARE/UPDATE conflicts in BGW */ +SELECT * FROM test_bgw.conc_part ORDER BY id LIMIT 1 FOR SHARE; +/* Run partitioning bgworker */ +SELECT partition_table_concurrently('test_bgw.conc_part', 10, 1); +/* Wait until bgworker starts */ +SELECT pg_sleep(1); +ROLLBACK; + +/* Wait until it finises */ +DO $$ +DECLARE + ops int8; +BEGIN + LOOP + SELECT count(*) + FROM pathman_concurrent_part_tasks + WHERE processed < 500 -- protect from endless loops + INTO ops; + + IF ops > 0 THEN + PERFORM pg_sleep(0.2); + ELSE + EXIT; + END IF; + END LOOP; +END +$$ LANGUAGE plpgsql; + +/* Check amount of tasks and rows in parent and partitions */ +SELECT count(*) FROM pathman_concurrent_part_tasks; +SELECT count(*) FROM ONLY test_bgw.conc_part; +SELECT count(*) FROM test_bgw.conc_part; + +DROP TABLE test_bgw.conc_part CASCADE; + + + DROP SCHEMA test_bgw CASCADE; DROP EXTENSION pg_pathman; diff --git a/src/include/pathman_workers.h b/src/include/pathman_workers.h index 25ab5e1d..6cf73ca5 100644 --- a/src/include/pathman_workers.h +++ b/src/include/pathman_workers.h @@ -112,10 +112,29 @@ cps_set_status(ConcurrentPartSlot *slot, ConcurrentPartSlotStatus status) SpinLockRelease(&slot->mutex); } +static inline const char * +cps_print_status(ConcurrentPartSlotStatus status) +{ + switch(status) + { + case CPS_FREE: + return "free"; + + case CPS_WORKING: + return "working"; + + case CPS_STOPPING: + return "stopping"; + + default: + return "[unknown]"; + } +} + /* Number of worker slots for concurrent partitioning */ -#define PART_WORKER_SLOTS 10 +#define PART_WORKER_SLOTS max_worker_processes /* Max number of attempts per batch */ #define PART_WORKER_MAX_ATTEMPTS 60 diff --git a/src/pathman_workers.c b/src/pathman_workers.c index e3bb7bf5..bb8f954c 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -57,7 +57,7 @@ extern PGDLLEXPORT void bgw_main_concurrent_part(Datum main_arg); static void handle_sigterm(SIGNAL_ARGS); static void bg_worker_load_config(const char *bgw_name); -static void start_bg_worker(const char bgworker_name[BGW_MAXLEN], +static bool start_bgworker(const char bgworker_name[BGW_MAXLEN], const char bgworker_proc[BGW_MAXLEN], Datum bgw_arg, bool wait_for_shutdown); @@ -90,6 +90,7 @@ static const char *concurrent_part_bgw = "ConcurrentPartWorker"; Size estimate_concurrent_part_task_slots_size(void) { + /* NOTE: we suggest that max_worker_processes is in PGC_POSTMASTER */ return sizeof(ConcurrentPartSlot) * PART_WORKER_SLOTS; } @@ -125,6 +126,7 @@ init_concurrent_part_task_slots(void) /* * Handle SIGTERM in BGW's process. + * Use it in favor of bgworker_die(). */ static void handle_sigterm(SIGNAL_ARGS) @@ -160,8 +162,8 @@ bg_worker_load_config(const char *bgw_name) /* * Common function to start background worker. */ -static void -start_bg_worker(const char bgworker_name[BGW_MAXLEN], +static bool +start_bgworker(const char bgworker_name[BGW_MAXLEN], const char bgworker_proc[BGW_MAXLEN], Datum bgw_arg, bool wait_for_shutdown) { @@ -218,10 +220,9 @@ start_bg_worker(const char bgworker_name[BGW_MAXLEN], switch (exec_state) { + /* Caller might want to handle this case */ case BGW_COULD_NOT_START: - elog(ERROR, "Unable to create background %s for pg_pathman", - bgworker_name); - break; + return false; case BGW_PM_DIED: ereport(ERROR, @@ -232,6 +233,8 @@ start_bg_worker(const char bgworker_name[BGW_MAXLEN], default: break; } + + return true; } @@ -311,10 +314,10 @@ create_partitions_for_value_bg_worker(Oid relid, Datum value, Oid value_type) #endif /* Start worker and wait for it to finish */ - start_bg_worker(spawn_partitions_bgw, - CppAsString(bgw_main_spawn_partitions), - UInt32GetDatum(segment_handle), - true); + (void) start_bgworker(spawn_partitions_bgw, + CppAsString(bgw_main_spawn_partitions), + UInt32GetDatum(segment_handle), + true); /* Save the result (partition Oid) */ child_oid = bgw_args->result; @@ -324,7 +327,7 @@ create_partitions_for_value_bg_worker(Oid relid, Datum value, Oid value_type) if (child_oid == InvalidOid) ereport(ERROR, - (errmsg("Attempt to spawn new partitions of relation \"%s\" failed", + (errmsg("attempt to spawn new partitions of relation \"%s\" failed", get_rel_name_or_relid(relid)), errhint("See server log for more details."))); @@ -412,6 +415,15 @@ bgw_main_spawn_partitions(Datum main_arg) * ------------------------------------- */ +/* Free bgworker's CPS slot */ +static void +free_cps_slot(int code, Datum arg) +{ + ConcurrentPartSlot *part_slot =(ConcurrentPartSlot *) DatumGetPointer(arg); + + cps_set_status(part_slot, CPS_FREE); +} + /* * Entry point for ConcurrentPartWorker's process. */ @@ -424,7 +436,14 @@ bgw_main_concurrent_part(Datum main_arg) char *sql = NULL; ConcurrentPartSlot *part_slot; - /* Establish signal handlers before unblocking signals. */ + /* Update concurrent part slot */ + part_slot = &concurrent_part_slots[DatumGetInt32(main_arg)]; + part_slot->pid = MyProcPid; + + /* Establish atexit callback that will fre CPS slot */ + on_proc_exit(free_cps_slot, PointerGetDatum(part_slot)); + + /* Establish signal handlers before unblocking signals */ pqsignal(SIGTERM, handle_sigterm); /* We're now ready to receive signals */ @@ -433,10 +452,6 @@ bgw_main_concurrent_part(Datum main_arg) /* Create resource owner */ CurrentResourceOwner = ResourceOwnerCreate(NULL, concurrent_part_bgw); - /* Update concurrent part slot */ - part_slot = &concurrent_part_slots[DatumGetInt32(main_arg)]; - part_slot->pid = MyProcPid; - /* Disable auto partition propagation */ SetAutoPartitionEnabled(false); @@ -461,6 +476,8 @@ bgw_main_concurrent_part(Datum main_arg) failed = false; rows = 0; + CHECK_FOR_INTERRUPTS(); + /* Start new transaction (syscache access etc.) */ StartTransactionCommand(); @@ -592,12 +609,13 @@ bgw_main_concurrent_part(Datum main_arg) /* Add rows to total_rows */ SpinLockAcquire(&part_slot->mutex); part_slot->total_rows += rows; -/* Report debug message */ + SpinLockRelease(&part_slot->mutex); + #ifdef USE_ASSERT_CHECKING + /* Report debug message */ elog(DEBUG1, "%s: relocated %d rows, total: " UINT64_FORMAT " [%u]", concurrent_part_bgw, rows, part_slot->total_rows, MyProcPid); #endif - SpinLockRelease(&part_slot->mutex); } /* If other backend requested to stop us, quit */ @@ -608,9 +626,6 @@ bgw_main_concurrent_part(Datum main_arg) /* Reclaim the resources */ pfree(sql); - - /* Mark slot as FREE */ - cps_set_status(part_slot, CPS_FREE); } @@ -694,9 +709,8 @@ partition_table_concurrently(PG_FUNCTION_ARGS) if (empty_slot_idx >= 0 && empty_slot_idx != i) SpinLockRelease(&concurrent_part_slots[empty_slot_idx].mutex); - elog(ERROR, - "table \"%s\" is already being partitioned", - get_rel_name(relid)); + ereport(ERROR, (errmsg("table \"%s\" is already being partitioned", + get_rel_name(relid)))); } /* Normally we don't want to keep it */ @@ -706,7 +720,9 @@ partition_table_concurrently(PG_FUNCTION_ARGS) /* Looks like we could not find an empty slot */ if (empty_slot_idx < 0) - elog(ERROR, "no empty worker slots found"); + ereport(ERROR, (ERRCODE_CONFIGURATION_LIMIT_EXCEEDED, + errmsg("no empty worker slots found"), + errhint("consider increasing max_worker_processes"))); else { /* Initialize concurrent part slot */ @@ -719,10 +735,14 @@ partition_table_concurrently(PG_FUNCTION_ARGS) } /* Start worker (we should not wait) */ - start_bg_worker(concurrent_part_bgw, - CppAsString(bgw_main_concurrent_part), - Int32GetDatum(empty_slot_idx), - false); + if (!start_bgworker(concurrent_part_bgw, + CppAsString(bgw_main_concurrent_part), + Int32GetDatum(empty_slot_idx), + false)) + { + /* Couldn't start, free CPS slot */ + cps_set_status(&concurrent_part_slots[empty_slot_idx], CPS_FREE); + } /* Tell user everything's fine */ elog(NOTICE, @@ -807,22 +827,8 @@ show_concurrent_part_tasks_internal(PG_FUNCTION_ARGS) values[Anum_pathman_cp_tasks_processed - 1] = cur_slot->total_rows; /* Now build a status string */ - switch(cur_slot->worker_status) - { - case CPS_WORKING: - values[Anum_pathman_cp_tasks_status - 1] = - PointerGetDatum(cstring_to_text("working")); - break; - - case CPS_STOPPING: - values[Anum_pathman_cp_tasks_status - 1] = - PointerGetDatum(cstring_to_text("stopping")); - break; - - default: - values[Anum_pathman_cp_tasks_status - 1] = - PointerGetDatum(cstring_to_text("[unknown]")); - } + values[Anum_pathman_cp_tasks_status - 1] = + CStringGetTextDatum(cps_print_status(cur_slot->worker_status)); /* Form output tuple */ htup = heap_form_tuple(funcctx->tuple_desc, values, isnull); @@ -857,26 +863,25 @@ stop_concurrent_part_task(PG_FUNCTION_ARGS) { ConcurrentPartSlot *cur_slot = &concurrent_part_slots[i]; - HOLD_INTERRUPTS(); SpinLockAcquire(&cur_slot->mutex); if (cur_slot->worker_status != CPS_FREE && cur_slot->relid == relid && cur_slot->dbid == MyDatabaseId) { - elog(NOTICE, "worker will stop after it finishes current batch"); - /* Change worker's state & set 'worker_found' */ cur_slot->worker_status = CPS_STOPPING; worker_found = true; } SpinLockRelease(&cur_slot->mutex); - RESUME_INTERRUPTS(); } if (worker_found) + { + elog(NOTICE, "worker will stop after it finishes current batch"); PG_RETURN_BOOL(true); + } else { elog(ERROR, "cannot find worker for relation \"%s\"", From 485f02577705bd39dbc0758cc7f38e9507a0a2ae Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 11 Oct 2017 16:24:53 +0300 Subject: [PATCH 172/528] improve error messages in BGWs --- src/pathman_workers.c | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/src/pathman_workers.c b/src/pathman_workers.c index bb8f954c..f2944bfb 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -237,6 +237,16 @@ start_bgworker(const char bgworker_name[BGW_MAXLEN], return true; } +/* + * Show generic error message if we failed to start bgworker. + */ +static inline void +start_bgworker_errmsg(const char *bgworker_name) +{ + ereport(ERROR, (errmsg("could not start %s", bgworker_name), + errhint("consider increasing max_worker_processes"))); +} + /* * -------------------------------------- @@ -314,10 +324,13 @@ create_partitions_for_value_bg_worker(Oid relid, Datum value, Oid value_type) #endif /* Start worker and wait for it to finish */ - (void) start_bgworker(spawn_partitions_bgw, - CppAsString(bgw_main_spawn_partitions), - UInt32GetDatum(segment_handle), - true); + if (!start_bgworker(spawn_partitions_bgw, + CppAsString(bgw_main_spawn_partitions), + UInt32GetDatum(segment_handle), + true)) + { + start_bgworker_errmsg(spawn_partitions_bgw); + } /* Save the result (partition Oid) */ child_oid = bgw_args->result; @@ -742,6 +755,8 @@ partition_table_concurrently(PG_FUNCTION_ARGS) { /* Couldn't start, free CPS slot */ cps_set_status(&concurrent_part_slots[empty_slot_idx], CPS_FREE); + + start_bgworker_errmsg(concurrent_part_bgw); } /* Tell user everything's fine */ From 6494216767a6d3fb96c9eac63cd3fc0dedc43468 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 11 Oct 2017 19:37:24 +0300 Subject: [PATCH 173/528] fix race conditions in tests in pathman_bgw --- expected/pathman_bgw.out | 29 ++++++++++++++++++++++++----- sql/pathman_bgw.sql | 29 ++++++++++++++++++++++++----- src/pathman_workers.c | 2 +- 3 files changed, 49 insertions(+), 11 deletions(-) diff --git a/expected/pathman_bgw.out b/expected/pathman_bgw.out index f7136533..a38d2096 100644 --- a/expected/pathman_bgw.out +++ b/expected/pathman_bgw.out @@ -181,19 +181,38 @@ ROLLBACK; /* Wait until it finises */ DO $$ DECLARE - ops int8; + ops int8; + rows int8; + rows_old int8 := 0; + i int4 := 0; -- protect from endless loop BEGIN LOOP - SELECT count(*) + SELECT processed FROM pathman_concurrent_part_tasks - WHERE processed < 500 -- protect from endless loops - INTO ops; + WHERE relid = 'test_bgw.conc_part'::regclass + INTO rows; + + -- get number of partitioning tasks + GET DIAGNOSTICS ops = ROW_COUNT; IF ops > 0 THEN PERFORM pg_sleep(0.2); + + ASSERT rows IS NOT NULL; + + IF rows_old = rows THEN + i = i + 1; + END IF; ELSE - EXIT; + EXIT; -- exit loop END IF; + + IF i > 50 THEN + RAISE WARNING 'looks like partitioning bgw is stuck!'; + EXIT; -- exit loop + END IF; + + rows_old = rows; END LOOP; END $$ LANGUAGE plpgsql; diff --git a/sql/pathman_bgw.sql b/sql/pathman_bgw.sql index 1d8a0146..e8d7df4f 100644 --- a/sql/pathman_bgw.sql +++ b/sql/pathman_bgw.sql @@ -95,19 +95,38 @@ ROLLBACK; /* Wait until it finises */ DO $$ DECLARE - ops int8; + ops int8; + rows int8; + rows_old int8 := 0; + i int4 := 0; -- protect from endless loop BEGIN LOOP - SELECT count(*) + SELECT processed FROM pathman_concurrent_part_tasks - WHERE processed < 500 -- protect from endless loops - INTO ops; + WHERE relid = 'test_bgw.conc_part'::regclass + INTO rows; + + -- get number of partitioning tasks + GET DIAGNOSTICS ops = ROW_COUNT; IF ops > 0 THEN PERFORM pg_sleep(0.2); + + ASSERT rows IS NOT NULL; + + IF rows_old = rows THEN + i = i + 1; + END IF; ELSE - EXIT; + EXIT; -- exit loop END IF; + + IF i > 50 THEN + RAISE WARNING 'looks like partitioning bgw is stuck!'; + EXIT; -- exit loop + END IF; + + rows_old = rows; END LOOP; END $$ LANGUAGE plpgsql; diff --git a/src/pathman_workers.c b/src/pathman_workers.c index f2944bfb..10bf15ad 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -432,7 +432,7 @@ bgw_main_spawn_partitions(Datum main_arg) static void free_cps_slot(int code, Datum arg) { - ConcurrentPartSlot *part_slot =(ConcurrentPartSlot *) DatumGetPointer(arg); + ConcurrentPartSlot *part_slot = (ConcurrentPartSlot *) DatumGetPointer(arg); cps_set_status(part_slot, CPS_FREE); } From a49fdacb423b9e3e9dfb45875ffb3542ca66dbc1 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 12 Oct 2017 15:27:23 +0300 Subject: [PATCH 174/528] bugfixes and improved error handling in ConcurrentPartWorker --- src/include/pathman_workers.h | 2 +- src/pathman_workers.c | 136 +++++++++++++++++++++------------- 2 files changed, 84 insertions(+), 54 deletions(-) diff --git a/src/include/pathman_workers.h b/src/include/pathman_workers.h index 6cf73ca5..be4d6425 100644 --- a/src/include/pathman_workers.h +++ b/src/include/pathman_workers.h @@ -74,7 +74,7 @@ typedef struct pid_t pid; /* worker's PID */ Oid dbid; /* database which contains the relation */ Oid relid; /* table to be partitioned concurrently */ - uint64 total_rows; /* total amount of rows processed */ + int64 total_rows; /* total amount of rows processed */ int32 batch_size; /* number of rows in a batch */ float8 sleep_time; /* how long should we sleep in case of error? */ diff --git a/src/pathman_workers.c b/src/pathman_workers.c index 10bf15ad..ccacdace 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -443,11 +443,12 @@ free_cps_slot(int code, Datum arg) void bgw_main_concurrent_part(Datum main_arg) { - int rows; + ConcurrentPartSlot *part_slot; + char *sql = NULL; + int64 rows; bool failed; int failures_count = 0; - char *sql = NULL; - ConcurrentPartSlot *part_slot; + LOCKMODE lockmode = RowExclusiveLock; /* Update concurrent part slot */ part_slot = &concurrent_part_slots[DatumGetInt32(main_arg)]; @@ -479,12 +480,14 @@ bgw_main_concurrent_part(Datum main_arg) /* Do the job */ do { - MemoryContext old_mcxt; + MemoryContext old_mcxt; Oid types[2] = { OIDOID, INT4OID }; Datum vals[2] = { part_slot->relid, part_slot->batch_size }; bool nulls[2] = { false, false }; + bool rel_locked = false; + /* Reset loop variables */ failed = false; rows = 0; @@ -520,44 +523,73 @@ bgw_main_concurrent_part(Datum main_arg) /* Exec ret = _partition_data_concurrent() */ PG_TRY(); { - /* Make sure that relation exists and has partitions */ - if (SearchSysCacheExists1(RELOID, ObjectIdGetDatum(part_slot->relid)) && - get_pathman_relation_info(part_slot->relid) != NULL) - { - int ret; - bool isnull; + int ret; + bool isnull; - ret = SPI_execute_with_args(sql, 2, types, vals, nulls, false, 0); - if (ret == SPI_OK_SELECT) - { - TupleDesc tupdesc = SPI_tuptable->tupdesc; - HeapTuple tuple = SPI_tuptable->vals[0]; + /* Lock relation for DELETE and INSERT */ + if (!ConditionalLockRelationOid(part_slot->relid, lockmode)) + { + elog(ERROR, "could not take lock on relation %u", part_slot->relid); + } - Assert(SPI_processed == 1); /* there should be 1 result at most */ + /* Great, now relation is locked */ + rel_locked = true; - rows = DatumGetInt32(SPI_getbinval(tuple, tupdesc, 1, &isnull)); + /* Make sure that relation exists */ + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(part_slot->relid))) + { + /* Exit after we raise ERROR */ + failures_count = PART_WORKER_MAX_ATTEMPTS; - Assert(!isnull); /* ... and ofc it must not be NULL */ - } + elog(ERROR, "relation %u does not exist", part_slot->relid); } - /* Otherwise it's time to exit */ - else + + /* Make sure that relation has partitions */ + if (get_pathman_relation_info(part_slot->relid) == NULL) { + /* Exit after we raise ERROR */ failures_count = PART_WORKER_MAX_ATTEMPTS; - elog(LOG, "relation \"%u\" is not partitioned (or does not exist)", - part_slot->relid); + elog(ERROR, "relation \"%s\" is not partitioned", + get_rel_name(part_slot->relid)); + } + + /* Call concurrent partitioning function */ + ret = SPI_execute_with_args(sql, 2, types, vals, nulls, false, 0); + if (ret == SPI_OK_SELECT) + { + TupleDesc tupdesc = SPI_tuptable->tupdesc; + HeapTuple tuple = SPI_tuptable->vals[0]; + + /* There should be 1 result at most */ + Assert(SPI_processed == 1); + + /* Extract number of processed rows */ + rows = DatumGetInt64(SPI_getbinval(tuple, tupdesc, 1, &isnull)); + Assert(!isnull); /* ... and ofc it must not be NULL */ } + /* Else raise generic error */ + else elog(ERROR, "partitioning function returned %u", ret); + + /* Finally, unlock our partitioned table */ + UnlockRelationOid(part_slot->relid, lockmode); } PG_CATCH(); { /* * The most common exception we can catch here is a deadlock with * concurrent user queries. Check that attempts count doesn't exceed - * some reasonable value + * some reasonable value. */ - ErrorData *error; - char *sleep_time_str; + ErrorData *error; + + /* Unlock relation if we caught ERROR too early */ + if (rel_locked) + UnlockRelationOid(part_slot->relid, lockmode); + + /* Increase number of failures and set 'failed' status */ + failures_count++; + failed = true; /* Switch to the original context & copy edata */ MemoryContextSwitchTo(old_mcxt); @@ -565,21 +597,15 @@ bgw_main_concurrent_part(Datum main_arg) FlushErrorState(); /* Print messsage for this BGWorker to server log */ - sleep_time_str = datum_to_cstring(Float8GetDatum(part_slot->sleep_time), - FLOAT8OID); - failures_count++; ereport(LOG, (errmsg("%s: %s", concurrent_part_bgw, error->message), - errdetail("attempt: %d/%d, sleep time: %s", + errdetail("attempt: %d/%d, sleep time: %.2f", failures_count, PART_WORKER_MAX_ATTEMPTS, - sleep_time_str))); - pfree(sleep_time_str); /* free the time string */ + (float) part_slot->sleep_time))); + /* Finally, free error data */ FreeErrorData(error); - - /* Set 'failed' flag */ - failed = true; } PG_END_TRY(); @@ -606,9 +632,10 @@ bgw_main_concurrent_part(Datum main_arg) /* Failed this time, wait */ else if (failed) { - /* Abort transaction and sleep for a second */ + /* Abort transaction */ AbortCurrentTransaction(); + /* Sleep for a specified amount of time (default 1s) */ DirectFunctionCall1(pg_sleep, Float8GetDatum(part_slot->sleep_time)); } @@ -626,8 +653,10 @@ bgw_main_concurrent_part(Datum main_arg) #ifdef USE_ASSERT_CHECKING /* Report debug message */ - elog(DEBUG1, "%s: relocated %d rows, total: " UINT64_FORMAT " [%u]", - concurrent_part_bgw, rows, part_slot->total_rows, MyProcPid); + elog(DEBUG1, "%s: " + "relocated" INT64_FORMAT "rows, " + "total: " INT64_FORMAT, + concurrent_part_bgw, rows, part_slot->total_rows); #endif } @@ -636,9 +665,6 @@ bgw_main_concurrent_part(Datum main_arg) break; } while(rows > 0 || failed); /* do while there's still rows to be relocated */ - - /* Reclaim the resources */ - pfree(sql); } @@ -824,26 +850,33 @@ show_concurrent_part_tasks_internal(PG_FUNCTION_ARGS) /* Iterate through worker slots */ for (i = userctx->cur_idx; i < PART_WORKER_SLOTS; i++) { - ConcurrentPartSlot *cur_slot = &concurrent_part_slots[i]; + ConcurrentPartSlot *cur_slot = &concurrent_part_slots[i], + slot_copy; HeapTuple htup = NULL; - HOLD_INTERRUPTS(); + /* Copy slot to process local memory */ SpinLockAcquire(&cur_slot->mutex); + memcpy(&slot_copy, cur_slot, sizeof(ConcurrentPartSlot)); + SpinLockRelease(&cur_slot->mutex); - if (cur_slot->worker_status != CPS_FREE) + if (slot_copy.worker_status != CPS_FREE) { Datum values[Natts_pathman_cp_tasks]; bool isnull[Natts_pathman_cp_tasks] = { 0 }; - values[Anum_pathman_cp_tasks_userid - 1] = cur_slot->userid; - values[Anum_pathman_cp_tasks_pid - 1] = cur_slot->pid; - values[Anum_pathman_cp_tasks_dbid - 1] = cur_slot->dbid; - values[Anum_pathman_cp_tasks_relid - 1] = cur_slot->relid; - values[Anum_pathman_cp_tasks_processed - 1] = cur_slot->total_rows; + values[Anum_pathman_cp_tasks_userid - 1] = slot_copy.userid; + values[Anum_pathman_cp_tasks_pid - 1] = slot_copy.pid; + values[Anum_pathman_cp_tasks_dbid - 1] = slot_copy.dbid; + values[Anum_pathman_cp_tasks_relid - 1] = slot_copy.relid; + + /* Record processed rows */ + values[Anum_pathman_cp_tasks_processed - 1] = + /* FIXME: use Int64GetDatum() in release 1.5 */ + Int32GetDatum((int32) slot_copy.total_rows); /* Now build a status string */ values[Anum_pathman_cp_tasks_status - 1] = - CStringGetTextDatum(cps_print_status(cur_slot->worker_status)); + CStringGetTextDatum(cps_print_status(slot_copy.worker_status)); /* Form output tuple */ htup = heap_form_tuple(funcctx->tuple_desc, values, isnull); @@ -852,9 +885,6 @@ show_concurrent_part_tasks_internal(PG_FUNCTION_ARGS) userctx->cur_idx = i + 1; } - SpinLockRelease(&cur_slot->mutex); - RESUME_INTERRUPTS(); - /* Return tuple if needed */ if (htup) SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(htup)); From 6b00d812b9396353fff72d42181278c4bd19b68f Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 12 Oct 2017 15:44:58 +0300 Subject: [PATCH 175/528] hide false positives found by clang analyzer --- src/pathman_workers.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/pathman_workers.c b/src/pathman_workers.c index ccacdace..8cd23fd7 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -534,12 +534,14 @@ bgw_main_concurrent_part(Datum main_arg) /* Great, now relation is locked */ rel_locked = true; + (void) rel_locked; /* mute clang analyzer */ /* Make sure that relation exists */ if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(part_slot->relid))) { /* Exit after we raise ERROR */ failures_count = PART_WORKER_MAX_ATTEMPTS; + (void) failures_count; /* mute clang analyzer */ elog(ERROR, "relation %u does not exist", part_slot->relid); } @@ -549,6 +551,7 @@ bgw_main_concurrent_part(Datum main_arg) { /* Exit after we raise ERROR */ failures_count = PART_WORKER_MAX_ATTEMPTS; + (void) failures_count; /* mute clang analyzer */ elog(ERROR, "relation \"%s\" is not partitioned", get_rel_name(part_slot->relid)); From 546a99bb0b43158c15dfabbe960c14b5fff83059 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 12 Oct 2017 16:00:54 +0300 Subject: [PATCH 176/528] return 64-bit total_rows from show_concurrent_part_tasks_internal() --- init.sql | 2 +- src/pathman_workers.c | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/init.sql b/init.sql index 5dd808ec..f54d48eb 100644 --- a/init.sql +++ b/init.sql @@ -285,7 +285,7 @@ RETURNS TABLE ( pid INT, dbid OID, relid REGCLASS, - processed INT, + processed INT8, status TEXT) AS 'pg_pathman', 'show_concurrent_part_tasks_internal' LANGUAGE C STRICT; diff --git a/src/pathman_workers.c b/src/pathman_workers.c index 8cd23fd7..b5cb0721 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -837,7 +837,7 @@ show_concurrent_part_tasks_internal(PG_FUNCTION_ARGS) TupleDescInitEntry(tupdesc, Anum_pathman_cp_tasks_relid, "relid", REGCLASSOID, -1, 0); TupleDescInitEntry(tupdesc, Anum_pathman_cp_tasks_processed, - "processed", INT4OID, -1, 0); + "processed", INT8OID, -1, 0); TupleDescInitEntry(tupdesc, Anum_pathman_cp_tasks_status, "status", TEXTOID, -1, 0); @@ -874,8 +874,7 @@ show_concurrent_part_tasks_internal(PG_FUNCTION_ARGS) /* Record processed rows */ values[Anum_pathman_cp_tasks_processed - 1] = - /* FIXME: use Int64GetDatum() in release 1.5 */ - Int32GetDatum((int32) slot_copy.total_rows); + Int64GetDatum(slot_copy.total_rows); /* Now build a status string */ values[Anum_pathman_cp_tasks_status - 1] = From 4d7ce5db980005cd5fb4f309d0eea1ff7740b557 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 12 Oct 2017 16:09:21 +0300 Subject: [PATCH 177/528] check type of 'rows' in bgw_main_concurrent_part() --- src/pathman_workers.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/pathman_workers.c b/src/pathman_workers.c index 8cd23fd7..ffc0f4a5 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -569,6 +569,7 @@ bgw_main_concurrent_part(Datum main_arg) /* Extract number of processed rows */ rows = DatumGetInt64(SPI_getbinval(tuple, tupdesc, 1, &isnull)); + Assert(tupdesc->attrs[0]->atttypid == INT8OID); /* check type */ Assert(!isnull); /* ... and ofc it must not be NULL */ } /* Else raise generic error */ From 2e0efa436a93a22476e3cb274d8435892ece1b4f Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 13 Oct 2017 14:20:15 +0300 Subject: [PATCH 178/528] improve pathman_bgw tests --- expected/pathman_bgw.out | 3 ++- sql/pathman_bgw.sql | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/expected/pathman_bgw.out b/expected/pathman_bgw.out index a38d2096..a02cfc65 100644 --- a/expected/pathman_bgw.out +++ b/expected/pathman_bgw.out @@ -200,7 +200,8 @@ BEGIN ASSERT rows IS NOT NULL; - IF rows_old = rows THEN + -- rows should increase! + IF rows_old <= rows THEN i = i + 1; END IF; ELSE diff --git a/sql/pathman_bgw.sql b/sql/pathman_bgw.sql index e8d7df4f..edd40c81 100644 --- a/sql/pathman_bgw.sql +++ b/sql/pathman_bgw.sql @@ -114,7 +114,8 @@ BEGIN ASSERT rows IS NOT NULL; - IF rows_old = rows THEN + -- rows should increase! + IF rows_old <= rows THEN i = i + 1; END IF; ELSE From c44d6d5f85ea77b3fda7f23d253cc53f92b25405 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 13 Oct 2017 14:26:35 +0300 Subject: [PATCH 179/528] bump lib version to 1.4.7 --- META.json | 4 ++-- expected/pathman_calamity.out | 2 +- src/include/init.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/META.json b/META.json index 31e669e8..2718f180 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_pathman", "abstract": "Partitioning tool", "description": "The `pg_pathman` module provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.4.6", + "version": "1.4.7", "maintainer": [ "Ildar Musin ", "Dmitry Ivanov ", @@ -24,7 +24,7 @@ "pg_pathman": { "file": "pg_pathman--1.4.sql", "docfile": "README.md", - "version": "1.4.6", + "version": "1.4.7", "abstract": "Partitioning tool" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index b887d37b..33af45fa 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -12,7 +12,7 @@ SELECT debug_capture(); SELECT get_pathman_lib_version(); get_pathman_lib_version ------------------------- - 10406 + 10407 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index 8aea9295..73f58c8b 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -157,7 +157,7 @@ simpify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT 0x010400 /* Current version of native C library (0xAA_BB_CC) */ -#define CURRENT_LIB_VERSION 0x010406 +#define CURRENT_LIB_VERSION 0x010407 void *pathman_cache_search_relid(HTAB *cache_table, From 0faf90ffed748638729839c7e8bd0c25f4e22419 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 17 Oct 2017 16:49:35 +0300 Subject: [PATCH 180/528] fix error code in pathman_workers.c --- src/pathman_workers.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pathman_workers.c b/src/pathman_workers.c index ffc0f4a5..d6d9a953 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -763,7 +763,7 @@ partition_table_concurrently(PG_FUNCTION_ARGS) /* Looks like we could not find an empty slot */ if (empty_slot_idx < 0) - ereport(ERROR, (ERRCODE_CONFIGURATION_LIMIT_EXCEEDED, + ereport(ERROR, (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED), errmsg("no empty worker slots found"), errhint("consider increasing max_worker_processes"))); else From 9a7050562b992c299995c10da52bafbe0f8c6d17 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 17 Oct 2017 17:49:44 +0300 Subject: [PATCH 181/528] add more sanity checks to pathman_bgw tests --- expected/pathman_bgw.out | 8 ++++++-- sql/pathman_bgw.sql | 8 ++++++-- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/expected/pathman_bgw.out b/expected/pathman_bgw.out index a02cfc65..4166ef4e 100644 --- a/expected/pathman_bgw.out +++ b/expected/pathman_bgw.out @@ -187,6 +187,7 @@ DECLARE i int4 := 0; -- protect from endless loop BEGIN LOOP + -- get total number of processed rows SELECT processed FROM pathman_concurrent_part_tasks WHERE relid = 'test_bgw.conc_part'::regclass @@ -200,9 +201,12 @@ BEGIN ASSERT rows IS NOT NULL; - -- rows should increase! - IF rows_old <= rows THEN + IF rows_old = rows THEN i = i + 1; + ELSIF rows < rows_old THEN + RAISE EXCEPTION 'rows is decreasing: new %, old %', rows, rows_old; + ELSIF rows > 500 THEN + RAISE EXCEPTION 'processed % rows', rows; END IF; ELSE EXIT; -- exit loop diff --git a/sql/pathman_bgw.sql b/sql/pathman_bgw.sql index edd40c81..e05a829d 100644 --- a/sql/pathman_bgw.sql +++ b/sql/pathman_bgw.sql @@ -101,6 +101,7 @@ DECLARE i int4 := 0; -- protect from endless loop BEGIN LOOP + -- get total number of processed rows SELECT processed FROM pathman_concurrent_part_tasks WHERE relid = 'test_bgw.conc_part'::regclass @@ -114,9 +115,12 @@ BEGIN ASSERT rows IS NOT NULL; - -- rows should increase! - IF rows_old <= rows THEN + IF rows_old = rows THEN i = i + 1; + ELSIF rows < rows_old THEN + RAISE EXCEPTION 'rows is decreasing: new %, old %', rows, rows_old; + ELSIF rows > 500 THEN + RAISE EXCEPTION 'processed % rows', rows; END IF; ELSE EXIT; -- exit loop From cc7cc957198633859bd9fa559352d48514200f4b Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sat, 21 Oct 2017 20:41:57 +0300 Subject: [PATCH 182/528] improve compatibility with pg_dump --- expected/pathman_utility_stmt.out | 114 +++++------------------ sql/pathman_utility_stmt.sql | 28 +++--- src/hooks.c | 2 +- src/utility_stmt_hooking.c | 144 ++++++------------------------ 4 files changed, 62 insertions(+), 226 deletions(-) diff --git a/expected/pathman_utility_stmt.out b/expected/pathman_utility_stmt.out index 95c64f58..37149f1e 100644 --- a/expected/pathman_utility_stmt.out +++ b/expected/pathman_utility_stmt.out @@ -24,70 +24,6 @@ VACUUM FULL copy_stmt_hooking.test_1; VACUUM FULL copy_stmt_hooking.test_2; VACUUM FULL copy_stmt_hooking.test_3; VACUUM FULL copy_stmt_hooking.test_4; -/* COPY TO */ -COPY copy_stmt_hooking.test TO stdout; -1 comment \N \N -2 comment \N \N -3 comment \N \N -4 comment \N \N -5 comment \N \N -6 comment \N \N -7 comment \N \N -8 comment \N \N -9 comment \N \N -10 comment \N \N -11 comment \N \N -12 comment \N \N -13 comment \N \N -14 comment \N \N -15 comment \N \N -16 comment \N \N -17 comment \N \N -18 comment \N \N -19 comment \N \N -20 comment \N \N -\copy copy_stmt_hooking.test to stdout (format csv) -1,comment,, -2,comment,, -3,comment,, -4,comment,, -5,comment,, -6,comment,, -7,comment,, -8,comment,, -9,comment,, -10,comment,, -11,comment,, -12,comment,, -13,comment,, -14,comment,, -15,comment,, -16,comment,, -17,comment,, -18,comment,, -19,comment,, -20,comment,, -\copy copy_stmt_hooking.test(comment) to stdout -comment -comment -comment -comment -comment -comment -comment -comment -comment -comment -comment -comment -comment -comment -comment -comment -comment -comment -comment -comment /* DELETE ROWS, COPY FROM */ DELETE FROM copy_stmt_hooking.test; COPY copy_stmt_hooking.test FROM stdin; @@ -113,32 +49,30 @@ VACUUM FULL copy_stmt_hooking.test_1; VACUUM FULL copy_stmt_hooking.test_2; VACUUM FULL copy_stmt_hooking.test_3; VACUUM FULL copy_stmt_hooking.test_4; -/* COPY FROM (specified columns) */ -COPY copy_stmt_hooking.test (val) TO stdout; -1 -6 -7 -11 -16 -COPY copy_stmt_hooking.test (val, comment) TO stdout; -1 test_1 -6 test_2 -7 test_2 -11 test_3 -16 test_4 -COPY copy_stmt_hooking.test (c3, val, comment) TO stdout; -0 1 test_1 -0 6 test_2 -0 7 test_2 -0 11 test_3 -0 16 test_4 -COPY copy_stmt_hooking.test (val, comment, c3, c4) TO stdout; +/* COPY TO */ +COPY copy_stmt_hooking.test TO stdout; /* not ok */ +WARNING: COPY TO will only select rows from parent table "test" +COPY copy_stmt_hooking.test (val) TO stdout; /* not ok */ +WARNING: COPY TO will only select rows from parent table "test" +COPY (SELECT * FROM copy_stmt_hooking.test) TO stdout; +1 test_1 0 0 +6 test_2 0 0 +7 test_2 0 0 +11 test_3 0 0 +16 test_4 0 0 +COPY (SELECT * FROM copy_stmt_hooking.test) TO stdout (FORMAT CSV); +1,test_1,0,0 +6,test_2,0,0 +7,test_2,0,0 +11,test_3,0,0 +16,test_4,0,0 +\copy (SELECT * FROM copy_stmt_hooking.test) TO stdout 1 test_1 0 0 6 test_2 0 0 7 test_2 0 0 11 test_3 0 0 16 test_4 0 0 -/* COPY TO (partition does not exist, NOT allowed to create partitions) */ +/* COPY FROM (partition does not exist, NOT allowed to create partitions) */ SET pg_pathman.enable_auto_partition = OFF; COPY copy_stmt_hooking.test FROM stdin; ERROR: no suitable partition for key '21' @@ -147,7 +81,7 @@ SELECT * FROM copy_stmt_hooking.test WHERE val > 20; -----+---------+----+---- (0 rows) -/* COPY TO (partition does not exist, allowed to create partitions) */ +/* COPY FROM (partition does not exist, allowed to create partitions) */ SET pg_pathman.enable_auto_partition = ON; COPY copy_stmt_hooking.test FROM stdin; SELECT * FROM copy_stmt_hooking.test WHERE val > 20; @@ -194,8 +128,8 @@ WHERE attnum > 0 AND attrelid = 'copy_stmt_hooking.test_6'::REGCLASS; 3 (1 row) -/* COPY FROM (test transformed tuples) */ -COPY copy_stmt_hooking.test (val, c3, c4) TO stdout; +/* test transformed tuples */ +COPY (SELECT * FROM copy_stmt_hooking.test) TO stdout; 1 0 0 6 0 0 7 0 0 @@ -203,9 +137,9 @@ COPY copy_stmt_hooking.test (val, c3, c4) TO stdout; 16 0 0 21 0 0 26 1 2 -/* COPY TO (insert into table with dropped column) */ +/* COPY FROM (insert into table with dropped column) */ COPY copy_stmt_hooking.test(val, c3, c4) FROM stdin; -/* COPY TO (insert into table without dropped column) */ +/* COPY FROM (insert into table without dropped column) */ COPY copy_stmt_hooking.test(val, c3, c4) FROM stdin; /* check tuples from last partition (without dropped column) */ SELECT *, tableoid::REGCLASS FROM copy_stmt_hooking.test ORDER BY val; diff --git a/sql/pathman_utility_stmt.sql b/sql/pathman_utility_stmt.sql index 62636f00..c7d25051 100644 --- a/sql/pathman_utility_stmt.sql +++ b/sql/pathman_utility_stmt.sql @@ -28,11 +28,6 @@ VACUUM FULL copy_stmt_hooking.test_2; VACUUM FULL copy_stmt_hooking.test_3; VACUUM FULL copy_stmt_hooking.test_4; -/* COPY TO */ -COPY copy_stmt_hooking.test TO stdout; -\copy copy_stmt_hooking.test to stdout (format csv) -\copy copy_stmt_hooking.test(comment) to stdout - /* DELETE ROWS, COPY FROM */ DELETE FROM copy_stmt_hooking.test; COPY copy_stmt_hooking.test FROM stdin; @@ -52,20 +47,21 @@ VACUUM FULL copy_stmt_hooking.test_2; VACUUM FULL copy_stmt_hooking.test_3; VACUUM FULL copy_stmt_hooking.test_4; -/* COPY FROM (specified columns) */ -COPY copy_stmt_hooking.test (val) TO stdout; -COPY copy_stmt_hooking.test (val, comment) TO stdout; -COPY copy_stmt_hooking.test (c3, val, comment) TO stdout; -COPY copy_stmt_hooking.test (val, comment, c3, c4) TO stdout; +/* COPY TO */ +COPY copy_stmt_hooking.test TO stdout; /* not ok */ +COPY copy_stmt_hooking.test (val) TO stdout; /* not ok */ +COPY (SELECT * FROM copy_stmt_hooking.test) TO stdout; +COPY (SELECT * FROM copy_stmt_hooking.test) TO stdout (FORMAT CSV); +\copy (SELECT * FROM copy_stmt_hooking.test) TO stdout -/* COPY TO (partition does not exist, NOT allowed to create partitions) */ +/* COPY FROM (partition does not exist, NOT allowed to create partitions) */ SET pg_pathman.enable_auto_partition = OFF; COPY copy_stmt_hooking.test FROM stdin; 21 test_no_part 0 0 \. SELECT * FROM copy_stmt_hooking.test WHERE val > 20; -/* COPY TO (partition does not exist, allowed to create partitions) */ +/* COPY FROM (partition does not exist, allowed to create partitions) */ SET pg_pathman.enable_auto_partition = ON; COPY copy_stmt_hooking.test FROM stdin; 21 test_no_part 0 0 @@ -98,16 +94,16 @@ WHERE attnum > 0 AND attrelid = 'copy_stmt_hooking.test'::REGCLASS; SELECT count(*) FROM pg_attribute WHERE attnum > 0 AND attrelid = 'copy_stmt_hooking.test_6'::REGCLASS; +/* test transformed tuples */ +COPY (SELECT * FROM copy_stmt_hooking.test) TO stdout; -/* COPY FROM (test transformed tuples) */ -COPY copy_stmt_hooking.test (val, c3, c4) TO stdout; -/* COPY TO (insert into table with dropped column) */ +/* COPY FROM (insert into table with dropped column) */ COPY copy_stmt_hooking.test(val, c3, c4) FROM stdin; 2 1 2 \. -/* COPY TO (insert into table without dropped column) */ +/* COPY FROM (insert into table without dropped column) */ COPY copy_stmt_hooking.test(val, c3, c4) FROM stdin; 27 1 2 \. diff --git a/src/hooks.c b/src/hooks.c index 690c398a..b8fc39db 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -886,7 +886,7 @@ pathman_process_utility_hook(Node *first_arg, stmt_location, stmt_len, &processed); if (completionTag) snprintf(completionTag, COMPLETION_TAG_BUFSIZE, - "PATHMAN COPY " UINT64_FORMAT, processed); + "COPY " UINT64_FORMAT, processed); return; /* don't call standard_ProcessUtility() or hooks */ } diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index f24b9543..103f194e 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -55,6 +55,10 @@ ProtocolVersion FrontendProtocol = (ProtocolVersion) 0; #endif +#define PATHMAN_COPY_READ_LOCK AccessShareLock +#define PATHMAN_COPY_WRITE_LOCK RowExclusiveLock + + static uint64 PathmanCopyFrom(CopyState cstate, Relation parent_rel, List *range_table, @@ -94,8 +98,8 @@ is_pathman_related_copy(Node *parsetree) /* Get partition's Oid while locking it */ parent_relid = RangeVarGetRelid(copy_stmt->relation, (copy_stmt->is_from ? - RowExclusiveLock : - AccessShareLock), + PATHMAN_COPY_WRITE_LOCK : + PATHMAN_COPY_READ_LOCK), false); /* Check that relation is partitioned */ @@ -346,12 +350,12 @@ PathmanDoCopy(const CopyStmt *stmt, uint64 *processed) { CopyState cstate; - bool is_from = stmt->is_from; - bool pipe = (stmt->filename == NULL); + ParseState *pstate; Relation rel; - Node *query = NULL; List *range_table = NIL; - ParseState *pstate; + bool is_from = stmt->is_from, + pipe = (stmt->filename == NULL), + is_old_protocol = PG_PROTOCOL_MAJOR(FrontendProtocol) < 3 && pipe; /* Disallow COPY TO/FROM file or program except to superusers. */ if (!pipe && !superuser()) @@ -404,96 +408,22 @@ PathmanDoCopy(const CopyStmt *stmt, } ExecCheckRTPerms(range_table, true); - /* - * We should perform a query instead of low-level heap scan whenever: - * a) table has a RLS policy; - * b) table is partitioned & it's COPY FROM. - */ - if (check_enable_rls(rte->relid, InvalidOid, false) == RLS_ENABLED || - is_from == false) /* rewrite COPY table TO statements */ + /* Disable COPY FROM if table has RLS */ + if (is_from && check_enable_rls(rte->relid, InvalidOid, false) == RLS_ENABLED) { - SelectStmt *select; - RangeVar *from; - List *target_list = NIL; - - if (is_from) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("COPY FROM not supported with row-level security"), errhint("Use INSERT statements instead."))); + } - /* Build target list */ - if (!stmt->attlist) - { - ColumnRef *cr; - ResTarget *target; - - cr = makeNode(ColumnRef); - cr->fields = list_make1(makeNode(A_Star)); - cr->location = -1; - - /* Build the ResTarget and add the ColumnRef to it. */ - target = makeNode(ResTarget); - target->name = NULL; - target->indirection = NIL; - target->val = (Node *) cr; - target->location = -1; - - target_list = list_make1(target); - } - else - { - ListCell *lc; - - foreach(lc, stmt->attlist) - { - ColumnRef *cr; - ResTarget *target; - - /* - * Build the ColumnRef for each column. The ColumnRef - * 'fields' property is a String 'Value' node (see - * nodes/value.h) that corresponds to the column name - * respectively. - */ - cr = makeNode(ColumnRef); - cr->fields = list_make1(lfirst(lc)); - cr->location = -1; - - /* Build the ResTarget and add the ColumnRef to it. */ - target = makeNode(ResTarget); - target->name = NULL; - target->indirection = NIL; - target->val = (Node *) cr; - target->location = -1; - - /* Add each column to the SELECT statements target list */ - target_list = lappend(target_list, target); - } - } - - /* - * Build RangeVar for from clause, fully qualified based on the - * relation which we have opened and locked. - */ - from = makeRangeVar(get_namespace_name(RelationGetNamespace(rel)), - RelationGetRelationName(rel), -1); - - /* Build query */ - select = makeNode(SelectStmt); - select->targetList = target_list; - select->fromClause = list_make1(from); - - query = (Node *) select; - - /* - * Close the relation for now, but keep the lock on it to prevent - * changes between now and when we start the query-based COPY. - * - * We'll reopen it later as part of the query-based COPY. - */ - heap_close(rel, NoLock); - rel = NULL; + /* Disable COPY TO */ + if (!is_from) + { + ereport(WARNING, + (errmsg("COPY TO will only select rows from parent table \"%s\"", + RelationGetRelationName(rel)), + errhint("Consider using the COPY (SELECT ...) TO variant."))); } } @@ -503,19 +433,12 @@ PathmanDoCopy(const CopyStmt *stmt, pstate = make_parsestate(NULL); pstate->p_sourcetext = queryString; - /* COPY ... FROM ... */ if (is_from) { - bool is_old_protocol = PG_PROTOCOL_MAJOR(FrontendProtocol) < 3 && - stmt->filename == NULL; - - /* There should be relation */ - if (!rel) elog(FATAL, "No relation for PATHMAN COPY FROM"); - /* check read-only transaction and parallel mode */ if (XactReadOnly && !rel->rd_islocaltemp) - PreventCommandIfReadOnly("PATHMAN COPY FROM"); - PreventCommandIfParallelMode("PATHMAN COPY FROM"); + PreventCommandIfReadOnly("COPY FROM"); + PreventCommandIfParallelMode("COPY FROM"); cstate = BeginCopyFromCompat(pstate, rel, stmt->filename, stmt->is_program, NULL, stmt->attlist, @@ -523,31 +446,14 @@ PathmanDoCopy(const CopyStmt *stmt, *processed = PathmanCopyFrom(cstate, rel, range_table, is_old_protocol); EndCopyFrom(cstate); } - /* COPY ... TO ... */ else { - CopyStmt modified_copy_stmt; - - /* We should've created a query */ - Assert(query); - - /* Copy 'stmt' and override some of the fields */ - modified_copy_stmt = *stmt; - modified_copy_stmt.relation = NULL; - modified_copy_stmt.query = query; - /* Call standard DoCopy using a new CopyStmt */ - DoCopyCompat(pstate, &modified_copy_stmt, stmt_location, stmt_len, - processed); + DoCopyCompat(pstate, stmt, stmt_location, stmt_len, processed); } - /* - * Close the relation. If reading, we can release the AccessShareLock we - * got; if writing, we should hold the lock until end of transaction to - * ensure that updates will be committed before lock is released. - */ - if (rel != NULL) - heap_close(rel, (is_from ? NoLock : AccessShareLock)); + /* Close the relation, but keep it locked */ + heap_close(rel, (is_from ? NoLock : PATHMAN_COPY_READ_LOCK)); } /* From 3370dc42b21875f05c5a949966d4763199c6f963 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 23 Oct 2017 16:29:32 +0300 Subject: [PATCH 183/528] add issue template --- .github/ISSUE_TEMPLATE.md | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE.md diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md new file mode 100644 index 00000000..5ad2562c --- /dev/null +++ b/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,28 @@ + + + +### Problem description + +Explain your problem here (it's always better to provide reproduction steps) ... + + + +### Environment + + + + + + + + + + + + + From 83dc5afcc96bec3d51a3f62d76a695568b8ced4d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 25 Oct 2017 19:43:49 +0300 Subject: [PATCH 184/528] fix complete cache invalidation event handling --- src/hooks.c | 23 +++- src/include/init.h | 1 + src/include/relation_info.h | 2 + src/init.c | 8 ++ src/relation_info.c | 232 ++++++++++++++++++++++++++---------- 5 files changed, 199 insertions(+), 67 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index b8fc39db..3503f857 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -36,6 +36,11 @@ #include "utils/lsyscache.h" +#ifdef USE_ASSERT_CHECKING +#define USE_RELCACHE_LOGGING +#endif + + /* Borrowed from joinpath.c */ #define PATH_PARAM_BY_REL(path, rel) \ ((path)->param_info && bms_overlap(PATH_REQ_OUTER(path), (rel)->relids)) @@ -808,6 +813,18 @@ pathman_relcache_hook(Datum arg, Oid relid) if (!IsPathmanReady()) return; + /* Special case: flush whole relcache */ + if (relid == InvalidOid) + { + delay_invalidation_whole_cache(); + +#ifdef USE_RELCACHE_LOGGING + elog(DEBUG2, "Invalidation message for all relations [%u]", MyProcPid); +#endif + + return; + } + /* We shouldn't even consider special OIDs */ if (relid < FirstNormalObjectId) return; @@ -827,16 +844,20 @@ pathman_relcache_hook(Datum arg, Oid relid) { delay_invalidation_parent_rel(parent_relid); +#ifdef USE_RELCACHE_LOGGING elog(DEBUG2, "Invalidation message for partition %u [%u]", relid, MyProcPid); +#endif } /* We can't say, perform full invalidation procedure */ else { delay_invalidation_vague_rel(relid); - elog(DEBUG2, "Invalidation message for vague relation %u [%u]", +#ifdef USE_RELCACHE_LOGGING + elog(DEBUG2, "Invalidation message for vague rel %u [%u]", relid, MyProcPid); +#endif } } diff --git a/src/include/init.h b/src/include/init.h index 73f58c8b..763292f0 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -54,6 +54,7 @@ typedef struct #define PATHMAN_MCXT_COUNT 4 extern MemoryContext TopPathmanContext; +extern MemoryContext PathmanInvalJobsContext; extern MemoryContext PathmanRelationCacheContext; extern MemoryContext PathmanParentCacheContext; extern MemoryContext PathmanBoundCacheContext; diff --git a/src/include/relation_info.h b/src/include/relation_info.h index cbc16b6e..c4bc3a05 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -275,6 +275,7 @@ const PartRelationInfo *refresh_pathman_relation_info(Oid relid, Datum *values, bool allow_incomplete); PartRelationInfo *invalidate_pathman_relation_info(Oid relid, bool *found); +void invalidate_pathman_relation_info_cache(const Oid *parents, int parents_count); void remove_pathman_relation_info(Oid relid); const PartRelationInfo *get_pathman_relation_info(Oid relid); const PartRelationInfo *get_pathman_relation_info_after_lock(Oid relid, @@ -296,6 +297,7 @@ char *canonicalize_partitioning_expression(const Oid relid, /* Global invalidation routines */ void delay_pathman_shutdown(void); +void delay_invalidation_whole_cache(void); void delay_invalidation_parent_rel(Oid parent); void delay_invalidation_vague_rel(Oid vague_rel); void finish_delayed_invalidation(void); diff --git a/src/init.c b/src/init.c index 3729bd16..80ba4f0a 100644 --- a/src/init.c +++ b/src/init.c @@ -41,6 +41,7 @@ /* Various memory contexts for caches */ MemoryContext TopPathmanContext = NULL; +MemoryContext PathmanInvalJobsContext = NULL; MemoryContext PathmanRelationCacheContext = NULL; MemoryContext PathmanParentCacheContext = NULL; MemoryContext PathmanBoundCacheContext = NULL; @@ -312,6 +313,7 @@ init_local_cache(void) if (TopPathmanContext) { /* Check that child contexts exist */ + Assert(MemoryContextIsValid(PathmanInvalJobsContext)); Assert(MemoryContextIsValid(PathmanRelationCacheContext)); Assert(MemoryContextIsValid(PathmanParentCacheContext)); Assert(MemoryContextIsValid(PathmanBoundCacheContext)); @@ -322,6 +324,7 @@ init_local_cache(void) /* Initialize pg_pathman's memory contexts */ else { + Assert(PathmanInvalJobsContext == NULL); Assert(PathmanRelationCacheContext == NULL); Assert(PathmanParentCacheContext == NULL); Assert(PathmanBoundCacheContext == NULL); @@ -331,6 +334,11 @@ init_local_cache(void) CppAsString(TopPathmanContext), ALLOCSET_DEFAULT_SIZES); + PathmanInvalJobsContext = + AllocSetContextCreate(TopMemoryContext, + CppAsString(PathmanInvalJobsContext), + ALLOCSET_SMALL_SIZES); + /* For PartRelationInfo */ PathmanRelationCacheContext = AllocSetContextCreate(TopPathmanContext, diff --git a/src/relation_info.c b/src/relation_info.c index cb33c29d..e032f036 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -33,6 +33,7 @@ #include "utils/builtins.h" #include "utils/fmgroids.h" #include "utils/hsearch.h" +#include "utils/inval.h" #include "utils/memutils.h" #include "utils/ruleutils.h" #include "utils/syscache.h" @@ -53,6 +54,11 @@ #define COOK_PART_EXPR_ERROR "failed to analyze partitioning expression \"%s\"" +#ifdef USE_ASSERT_CHECKING +#define USE_RELINFO_LOGGING +#endif + + /* Comparison function info */ typedef struct cmp_func_info { @@ -70,22 +76,26 @@ bool pg_pathman_enable_bounds_cache = true; * We delay all invalidation jobs received in relcache hook. */ static List *delayed_invalidation_parent_rels = NIL; -static List *delayed_invalidation_vague_rels = NIL; +static List *delayed_invalidation_vague_rels = NIL; +static bool delayed_invalidation_whole_cache = false; static bool delayed_shutdown = false; /* pathman was dropped */ +#define INVAL_LIST_MAX_ITEMS 10000 + /* Add unique Oid to list, allocate in TopPathmanContext */ #define list_add_unique(list, oid) \ do { \ - MemoryContext old_mcxt = MemoryContextSwitchTo(TopPathmanContext); \ - list = list_append_unique_oid(list, ObjectIdGetDatum(oid)); \ + MemoryContext old_mcxt = MemoryContextSwitchTo(PathmanInvalJobsContext); \ + list = list_append_unique_oid(list, (oid)); \ MemoryContextSwitchTo(old_mcxt); \ } while (0) -#define free_invalidation_list(list) \ +#define free_invalidation_lists() \ do { \ - list_free(list); \ - list = NIL; \ + MemoryContextReset(PathmanInvalJobsContext); \ + delayed_invalidation_parent_rels = NIL; \ + delayed_invalidation_vague_rels = NIL; \ } while (0) /* Handy wrappers for Oids */ @@ -101,6 +111,8 @@ static Oid get_parent_of_partition_internal(Oid partition, static Expr *get_partition_constraint_expr(Oid partition); +static void free_prel_partitions(PartRelationInfo *prel); + static void fill_prel_with_partitions(PartRelationInfo *prel, const Oid *partitions, const uint32 parts_count); @@ -323,35 +335,70 @@ invalidate_pathman_relation_info(Oid relid, bool *found) relid, action, &prel_found); - /* Handle valid PartRelationInfo */ - if ((action == HASH_FIND || - (action == HASH_ENTER && prel_found)) && PrelIsValid(prel)) - { - /* Remove this parent from parents cache */ - ForgetParent(prel); + /* It's a new entry, mark it 'invalid' */ + if (prel && !prel_found) + prel->valid = false; - /* Drop cached bounds etc */ - MemoryContextDelete(prel->mcxt); + /* Clear the remaining resources */ + free_prel_partitions(prel); + + /* Set 'found' if necessary */ + if (found) *found = prel_found; + +#ifdef USE_ASSERT_CHECKING + elog(DEBUG2, + "dispatch_cache: invalidating %s record for parent %u [%u]", + (prel ? "live" : "NULL"), relid, MyProcPid); +#endif + + return prel; +} + +/* Invalidate PartRelationInfo cache entries that exist in 'parents` array */ +void +invalidate_pathman_relation_info_cache(const Oid *parents, int parents_count) +{ + HASH_SEQ_STATUS stat; + PartRelationInfo *prel; + List *prel_bad = NIL; + ListCell *lc; + int i; + + for (i = 0; i < parents_count; i++) + { + invalidate_pathman_relation_info(parents[i], NULL); } - /* Set important default values */ - if (prel) + hash_seq_init(&stat, partitioned_rels); + + while ((prel = (PartRelationInfo *) hash_seq_search(&stat)) != NULL) { - prel->children = NULL; - prel->ranges = NULL; - prel->mcxt = NULL; + Oid parent_relid = PrelParentRelid(prel); - prel->valid = false; /* now cache entry is invalid */ + /* Does this entry exist in PATHMAN_CONFIG table? */ + if (!bsearch_oid(parent_relid, parents, parents_count)) + { + /* All entry to 'outdated' list */ + prel_bad = lappend_oid(prel_bad, parent_relid); + + /* Clear the remaining resources */ + free_prel_partitions(prel); + } } - /* Set 'found' if necessary */ - if (found) *found = prel_found; + /* Remove outdated entries */ + foreach (lc, prel_bad) + { + pathman_cache_search_relid(partitioned_rels, + lfirst_oid(lc), + HASH_REMOVE, + NULL); + } +#ifdef USE_ASSERT_CHECKING elog(DEBUG2, - "Invalidating record for relation %u in pg_pathman's cache [%u]", - relid, MyProcPid); - - return prel; + "dispatch_cache: invalidated all records [%u]", MyProcPid); +#endif } /* Get PartRelationInfo from local cache. */ @@ -387,9 +434,11 @@ get_pathman_relation_info(Oid relid) } } +#ifdef USE_RELINFO_LOGGING elog(DEBUG2, - "Fetching %s record for relation %u from pg_pathman's cache [%u]", + "dispatch_cache: fetching %s record for parent %u [%u]", (prel ? "live" : "NULL"), relid, MyProcPid); +#endif /* Make sure that 'prel' is valid */ Assert(!prel || PrelIsValid(prel)); @@ -423,7 +472,7 @@ get_pathman_relation_info_after_lock(Oid relid, return prel; } -/* Remove PartRelationInfo from local cache. */ +/* Remove PartRelationInfo from local cache */ void remove_pathman_relation_info(Oid relid) { @@ -434,11 +483,39 @@ remove_pathman_relation_info(Oid relid) /* Now let's remove the entry completely */ if (found) + { pathman_cache_search_relid(partitioned_rels, relid, HASH_REMOVE, NULL); - elog(DEBUG2, - "Removing record for relation %u in pg_pathman's cache [%u]", - relid, MyProcPid); +#ifdef USE_RELINFO_LOGGING + elog(DEBUG2, + "dispatch_cache: removing record for parent %u [%u]", + relid, MyProcPid); +#endif + } +} + +static void +free_prel_partitions(PartRelationInfo *prel) +{ + /* Handle valid PartRelationInfo */ + if (PrelIsValid(prel)) + { + /* Remove this parent from parents cache */ + ForgetParent(prel); + + /* Drop cached bounds etc */ + MemoryContextDelete(prel->mcxt); + } + + /* Set important default values */ + if (prel) + { + prel->children = NULL; + prel->ranges = NULL; + prel->mcxt = NULL; + + prel->valid = false; /* now cache entry is invalid */ + } } /* Fill PartRelationInfo with partition-related info */ @@ -854,26 +931,55 @@ delay_pathman_shutdown(void) delayed_shutdown = true; } +/* Add new delayed invalidation job for whole dispatch cache */ +void +delay_invalidation_whole_cache(void) +{ + /* Free useless invalidation lists */ + free_invalidation_lists(); + + delayed_invalidation_whole_cache = true; +} + +/* Generic wrapper for lists */ +static void +delay_invalidation_event(List **inval_list, Oid relation) +{ + /* Skip if we already need to drop whole cache */ + if (delayed_invalidation_whole_cache) + return; + + if (list_length(*inval_list) > INVAL_LIST_MAX_ITEMS) + { + /* Too many events, drop whole cache */ + delay_invalidation_whole_cache(); + return; + } + + list_add_unique(*inval_list, relation); +} + /* Add new delayed invalidation job for a [ex-]parent relation */ void delay_invalidation_parent_rel(Oid parent) { - list_add_unique(delayed_invalidation_parent_rels, parent); + delay_invalidation_event(&delayed_invalidation_parent_rels, parent); } /* Add new delayed invalidation job for a vague relation */ void delay_invalidation_vague_rel(Oid vague_rel) { - list_add_unique(delayed_invalidation_vague_rels, vague_rel); + delay_invalidation_event(&delayed_invalidation_vague_rels, vague_rel); } /* Finish all pending invalidation jobs if possible */ void finish_delayed_invalidation(void) -{ +{ /* Exit early if there's nothing to do */ - if (delayed_invalidation_parent_rels == NIL && + if (delayed_invalidation_whole_cache == false && + delayed_invalidation_parent_rels == NIL && delayed_invalidation_vague_rels == NIL && delayed_shutdown == false) { @@ -888,6 +994,8 @@ finish_delayed_invalidation(void) bool parents_fetched = false; ListCell *lc; + AcceptInvalidationMessages(); + /* Handle the probable 'DROP EXTENSION' case */ if (delayed_shutdown) { @@ -908,14 +1016,31 @@ finish_delayed_invalidation(void) unload_config(); /* Disregard all remaining invalidation jobs */ - free_invalidation_list(delayed_invalidation_parent_rels); - free_invalidation_list(delayed_invalidation_vague_rels); + delayed_invalidation_whole_cache = false; + free_invalidation_lists(); /* No need to continue, exit */ return; } } + /* We might be asked to perform a complete cache invalidation */ + if (delayed_invalidation_whole_cache) + { + /* Unset 'invalidation_whole_cache' flag */ + delayed_invalidation_whole_cache = false; + + /* Fetch all partitioned tables */ + if (!parents_fetched) + { + parents = read_parent_oids(&parents_count); + parents_fetched = true; + } + + /* Invalidate live entries and remove dead ones */ + invalidate_pathman_relation_info_cache(parents, parents_count); + } + /* Process relations that are (or were) definitely partitioned */ foreach (lc, delayed_invalidation_parent_rels) { @@ -992,8 +1117,8 @@ finish_delayed_invalidation(void) } } - free_invalidation_list(delayed_invalidation_parent_rels); - free_invalidation_list(delayed_invalidation_vague_rels); + /* Finally, free invalidation jobs lists */ + free_invalidation_lists(); if (parents) pfree(parents); @@ -1009,20 +1134,14 @@ finish_delayed_invalidation(void) void cache_parent_of_partition(Oid partition, Oid parent) { - bool found; PartParentInfo *ppar; ppar = pathman_cache_search_relid(parent_cache, partition, HASH_ENTER, - &found); - elog(DEBUG2, - found ? - "Refreshing record for child %u in pg_pathman's cache [%u]" : - "Creating new record for child %u in pg_pathman's cache [%u]", - partition, MyProcPid); + NULL); - ppar->child_rel = partition; + ppar->child_rel = partition; ppar->parent_rel = parent; } @@ -1052,30 +1171,11 @@ get_parent_of_partition_internal(Oid partition, PartParentSearch *status, HASHACTION action) { - const char *action_str; /* "Fetching"\"Resetting" */ Oid parent; PartParentInfo *ppar = pathman_cache_search_relid(parent_cache, partition, HASH_FIND, NULL); - /* Set 'action_str' */ - switch (action) - { - case HASH_REMOVE: - action_str = "Resetting"; - break; - - case HASH_FIND: - action_str = "Fetching"; - break; - - default: - elog(ERROR, "Unexpected HTAB action %u", action); - } - - elog(DEBUG2, - "%s %s record for child %u from pg_pathman's cache [%u]", - action_str, (ppar ? "live" : "NULL"), partition, MyProcPid); if (ppar) { From c45a95ab354f03bec617bd62a818d5eadcc06fc0 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 25 Oct 2017 20:08:26 +0300 Subject: [PATCH 185/528] mute warning produced by cppcheck --- src/relation_info.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/relation_info.c b/src/relation_info.c index e032f036..b46c62ee 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -990,7 +990,7 @@ finish_delayed_invalidation(void) if (IsTransactionState()) { Oid *parents = NULL; - int parents_count; + int parents_count = 0; bool parents_fetched = false; ListCell *lc; From 861f84f783055ae39a7f2544603b75eca19d4e38 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 31 Oct 2017 12:28:05 +0300 Subject: [PATCH 186/528] bump lib version to 1.4.8 --- META.json | 4 ++-- expected/pathman_calamity.out | 2 +- src/include/init.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/META.json b/META.json index 2718f180..2718a8da 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_pathman", "abstract": "Partitioning tool", "description": "The `pg_pathman` module provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.4.7", + "version": "1.4.8", "maintainer": [ "Ildar Musin ", "Dmitry Ivanov ", @@ -24,7 +24,7 @@ "pg_pathman": { "file": "pg_pathman--1.4.sql", "docfile": "README.md", - "version": "1.4.7", + "version": "1.4.8", "abstract": "Partitioning tool" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 33af45fa..7c090761 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -12,7 +12,7 @@ SELECT debug_capture(); SELECT get_pathman_lib_version(); get_pathman_lib_version ------------------------- - 10407 + 10408 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index 763292f0..a2f7ec77 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -158,7 +158,7 @@ simpify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT 0x010400 /* Current version of native C library (0xAA_BB_CC) */ -#define CURRENT_LIB_VERSION 0x010407 +#define CURRENT_LIB_VERSION 0x010408 void *pathman_cache_search_relid(HTAB *cache_table, From 7e2ef6cc005e39dfe5b6867ff67fb7cad7892516 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 31 Oct 2017 12:34:40 +0300 Subject: [PATCH 187/528] fix docs --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 2935ff3c..3f3a80ba 100644 --- a/README.md +++ b/README.md @@ -53,7 +53,7 @@ More interesting features are yet to come. Stay tuned! * `RuntimeAppend` & `RuntimeMergeAppend` custom plan nodes to pick partitions at runtime; * `PartitionFilter`: an efficient drop-in replacement for INSERT triggers; * Automatic partition creation for new INSERTed data (only for RANGE partitioning); - * Improved `COPY FROM\TO` statement that is able to insert rows directly into partitions; + * Improved `COPY FROM` statement that is able to insert rows directly into partitions; * UPDATE triggers generation out of the box (will be replaced with custom nodes too); * User-defined callbacks for partition creation event handling; * Non-blocking concurrent table partitioning; From 2c24811f7ae5135b41278d928e4651b8cbf39927 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 2 Nov 2017 15:43:35 +0300 Subject: [PATCH 188/528] fix warnings in pythonic tests --- tests/python/.flake8 | 2 + tests/python/.style.yapf | 2 +- tests/python/partitioning_test.py | 86 +++++++++++++------------------ 3 files changed, 38 insertions(+), 52 deletions(-) create mode 100644 tests/python/.flake8 diff --git a/tests/python/.flake8 b/tests/python/.flake8 new file mode 100644 index 00000000..7d6f9f71 --- /dev/null +++ b/tests/python/.flake8 @@ -0,0 +1,2 @@ +[flake8] +ignore = E241, E501 diff --git a/tests/python/.style.yapf b/tests/python/.style.yapf index e2ca7ba3..88f004bb 100644 --- a/tests/python/.style.yapf +++ b/tests/python/.style.yapf @@ -2,4 +2,4 @@ based_on_style = pep8 spaces_before_comment = 4 split_before_logical_operator = false -column_limit=90 +column_limit=100 diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 2d8cb858..853de564 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -13,12 +13,11 @@ import re import subprocess import threading -import json import time import unittest from distutils.version import LooseVersion -from testgres import get_new_node, get_bin_path, get_pg_config +from testgres import get_new_node, get_bin_path, get_pg_version # set setup base logging config, it can be turned on by `use_logging` # parameter on node setup @@ -54,7 +53,7 @@ } logging.config.dictConfig(LOG_CONFIG) -version = LooseVersion(get_pg_config().get("VERSION_NUM")) +version = LooseVersion(get_pg_version()) # Helper function for json equality @@ -106,23 +105,6 @@ def start_new_pathman_cluster(self, return node - def catchup_replica(self, master, replica): - """ Wait until replica synchronizes with master """ - if version >= LooseVersion('10'): - wait_lsn_query = """ - SELECT pg_current_wal_lsn() <= replay_lsn - FROM pg_stat_replication - WHERE application_name = '{0}' - """ - else: - wait_lsn_query = """ - SELECT pg_current_xlog_location() <= replay_location - FROM pg_stat_replication - WHERE application_name = '{0}' - """ - - master.poll_query_until('postgres', wait_lsn_query.format(replica.name)) - def test_concurrent(self): """ Test concurrent partitioning """ @@ -158,8 +140,7 @@ def test_replication(self): with self.start_new_pathman_cluster(allow_streaming=True, test_data=True) as node: with node.replicate('node2') as replica: replica.start() - # wait until replica catches up - self.catchup_replica(node, replica) + replica.catchup() # check that results are equal self.assertEqual( @@ -169,7 +150,9 @@ def test_replication(self): # enable parent and see if it is enabled in replica node.psql('postgres', "select enable_parent('abc')") - self.catchup_replica(node, replica) + # wait until replica catches up + replica.catchup() + self.assertEqual( node.psql('postgres', 'explain (costs off) select * from abc'), replica.psql('postgres', 'explain (costs off) select * from abc')) @@ -182,7 +165,10 @@ def test_replication(self): # check that UPDATE in pathman_config_params invalidates cache node.psql('postgres', 'update pathman_config_params set enable_parent = false') - self.catchup_replica(node, replica) + + # wait until replica catches up + replica.catchup() + self.assertEqual( node.psql('postgres', 'explain (costs off) select * from abc'), replica.psql('postgres', 'explain (costs off) select * from abc')) @@ -688,7 +674,7 @@ def con2_thread(): explain (analyze, costs off, timing off) select * from drop_test where val = any (select generate_series(1, 40, 34)) - """) # query selects from drop_test_1 and drop_test_4 + """) # query selects from drop_test_1 and drop_test_4 con2.commit() @@ -712,15 +698,14 @@ def con2_thread(): # return all values in tuple queue.put((has_runtime_append, has_drop_test_1, has_drop_test_4)) - # Step 1: cache partitioned table in con1 con1.begin() - con1.execute('select count(*) from drop_test') # load pathman's cache + con1.execute('select count(*) from drop_test') # load pathman's cache con1.commit() # Step 2: cache partitioned table in con2 con2.begin() - con2.execute('select count(*) from drop_test') # load pathman's cache + con2.execute('select count(*) from drop_test') # load pathman's cache con2.commit() # Step 3: drop first partition of 'drop_test' @@ -786,12 +771,12 @@ def con2_thread(): # Step 1: lock partitioned table in con1 con1.begin() - con1.execute('select count(*) from ins_test') # load pathman's cache + con1.execute('select count(*) from ins_test') # load pathman's cache con1.execute('lock table ins_test in share update exclusive mode') # Step 2: try inserting new value in con2 (waiting) con2.begin() - con2.execute('select count(*) from ins_test') # load pathman's cache + con2.execute('select count(*) from ins_test') # load pathman's cache t = threading.Thread(target=con2_thread) t.start() @@ -853,12 +838,12 @@ def con2_thread(): # Step 1: initilize con1 con1.begin() - con1.execute('select count(*) from ins_test') # load pathman's cache + con1.execute('select count(*) from ins_test') # load pathman's cache # Step 2: initilize con2 con2.begin() - con2.execute('select count(*) from ins_test') # load pathman's cache - con2.commit() # unlock relations + con2.execute('select count(*) from ins_test') # load pathman's cache + con2.commit() # unlock relations # Step 3: merge 'ins_test1' + 'ins_test_2' in con1 (success) con1.execute( @@ -1031,12 +1016,12 @@ def turnon_pathman(node): get_bin_path("pg_dump"), "-p {}".format(node.port), "initial" ], [get_bin_path("psql"), "-p {}".format(node.port), "copy"], - cmp_full), # dump as plain text and restore via COPY + cmp_full), # dump as plain text and restore via COPY (turnoff_pathman, turnon_pathman, [ get_bin_path("pg_dump"), "-p {}".format(node.port), "--inserts", "initial" ], [get_bin_path("psql"), "-p {}".format(node.port), "copy"], - cmp_full), # dump as plain text and restore via INSERTs + cmp_full), # dump as plain text and restore via INSERTs (None, None, [ get_bin_path("pg_dump"), "-p {}".format(node.port), "--format=custom", "initial" @@ -1052,7 +1037,7 @@ def turnon_pathman(node): dump_restore_cmd = " | ".join((' '.join(pg_dump_params), ' '.join(pg_restore_params))) - if (preproc != None): + if (preproc is not None): preproc(node) # transfer and restore data @@ -1065,12 +1050,12 @@ def turnon_pathman(node): stderr=fnull) p2.communicate(input=stdoutdata) - if (postproc != None): + if (postproc is not None): postproc(node) # validate data with node.connect('initial') as con1, \ - node.connect('copy') as con2: + node.connect('copy') as con2: # compare plans and contents of initial and copy cmp_result = cmp_dbs(con1, con2) @@ -1092,8 +1077,8 @@ def turnon_pathman(node): config_params_initial[row[0]] = row[1:] for row in con2.execute(config_params_query): config_params_copy[row[0]] = row[1:] - self.assertEqual(config_params_initial, config_params_copy, \ - "mismatch in pathman_config_params under the command: %s" % dump_restore_cmd) + self.assertEqual(config_params_initial, config_params_copy, + "mismatch in pathman_config_params under the command: %s" % dump_restore_cmd) # compare constraints on each partition constraints_query = """ @@ -1106,8 +1091,8 @@ def turnon_pathman(node): constraints_initial[row[0]] = row[1:] for row in con2.execute(constraints_query): constraints_copy[row[0]] = row[1:] - self.assertEqual(constraints_initial, constraints_copy, \ - "mismatch in partitions' constraints under the command: %s" % dump_restore_cmd) + self.assertEqual(constraints_initial, constraints_copy, + "mismatch in partitions' constraints under the command: %s" % dump_restore_cmd) # clear copy database node.psql('copy', 'drop schema public cascade') @@ -1128,9 +1113,9 @@ def test_concurrent_detach(self): test_interval = int(math.ceil(detach_timeout * num_detachs)) insert_pgbench_script = os.path.dirname(os.path.realpath(__file__)) \ - + "/pgbench_scripts/insert_current_timestamp.pgbench" + + "/pgbench_scripts/insert_current_timestamp.pgbench" detach_pgbench_script = os.path.dirname(os.path.realpath(__file__)) \ - + "/pgbench_scripts/detachs_in_timeout.pgbench" + + "/pgbench_scripts/detachs_in_timeout.pgbench" # Check pgbench scripts on existance self.assertTrue( @@ -1202,16 +1187,14 @@ def test_update_node_plan1(self): Test scan on all partititions when using update node. We can't use regression tests here because 9.5 and 9.6 give different plans - ''' + ''' with get_new_node('test_update_node') as node: node.init() - node.append_conf( - 'postgresql.conf', - """ - shared_preload_libraries=\'pg_pathman\' - pg_pathman.override_copy=false - pg_pathman.enable_partitionrouter=on + node.append_conf('postgresql.conf', """ + shared_preload_libraries=\'pg_pathman\' + pg_pathman.override_copy=false + pg_pathman.enable_partitionrouter=on """) node.start() @@ -1275,5 +1258,6 @@ def test_update_node_plan1(self): node.psql('postgres', 'DROP SCHEMA test_update_node CASCADE;') node.psql('postgres', 'DROP EXTENSION pg_pathman CASCADE;') + if __name__ == "__main__": unittest.main() From e5280fb5e2d80956ec81b6b98bfe550d0fa3a8e6 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 9 Nov 2017 14:16:15 +0300 Subject: [PATCH 189/528] WIP some fundamental changes to caches --- src/hooks.c | 48 +- src/include/init.h | 48 +- src/include/relation_info.h | 252 ++-- src/init.c | 99 +- src/partition_filter.c | 4 +- src/pl_funcs.c | 49 +- src/pl_range_funcs.c | 15 +- src/planner_tree_modification.c | 2 +- src/relation_info.c | 2002 +++++++++++++------------------ src/utility_stmt_hooking.c | 4 +- 10 files changed, 1078 insertions(+), 1445 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 96a7feb0..2a968683 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -812,8 +812,6 @@ pathman_shmem_startup_hook(void) void pathman_relcache_hook(Datum arg, Oid relid) { - Oid parent_relid; - /* See cook_partitioning_expression() */ if (!pathman_hooks_enabled) return; @@ -821,51 +819,29 @@ pathman_relcache_hook(Datum arg, Oid relid) if (!IsPathmanReady()) return; - /* Special case: flush whole relcache */ + /* Invalidation event for whole cache */ if (relid == InvalidOid) { - delay_invalidation_whole_cache(); - -#ifdef USE_RELCACHE_LOGGING - elog(DEBUG2, "Invalidation message for all relations [%u]", MyProcPid); -#endif - - return; + invalidate_pathman_status_info_cache(); } - /* We shouldn't even consider special OIDs */ - if (relid < FirstNormalObjectId) - return; - /* Invalidation event for PATHMAN_CONFIG table (probably DROP) */ if (relid == get_pathman_config_relid(false)) + { delay_pathman_shutdown(); + } - /* Invalidate PartBoundInfo cache if needed */ - forget_bounds_of_partition(relid); - - /* Invalidate PartParentInfo cache if needed */ - parent_relid = forget_parent_of_partition(relid, NULL); - - /* It *might have been a partition*, invalidate parent */ - if (OidIsValid(parent_relid)) + /* Invalidation event for some user table */ + else if (relid >= FirstNormalObjectId) { - delay_invalidation_parent_rel(parent_relid); + /* Invalidate PartBoundInfo entry if needed */ + forget_bounds_of_partition(relid); -#ifdef USE_RELCACHE_LOGGING - elog(DEBUG2, "Invalidation message for partition %u [%u]", - relid, MyProcPid); -#endif - } - /* We can't say, perform full invalidation procedure */ - else - { - delay_invalidation_vague_rel(relid); + /* Invalidate PartParentInfo entry if needed */ + forget_parent_of_partition(relid); -#ifdef USE_RELCACHE_LOGGING - elog(DEBUG2, "Invalidation message for vague rel %u [%u]", - relid, MyProcPid); -#endif + /* Invalidate PartStatusInfo entry if needed */ + invalidate_pathman_status_info(relid); } } diff --git a/src/include/init.h b/src/include/init.h index aab2e266..799e1c2d 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -46,22 +46,21 @@ typedef struct do { \ Assert(CurrentMemoryContext != TopMemoryContext); \ Assert(CurrentMemoryContext != TopPathmanContext); \ - Assert(CurrentMemoryContext != PathmanRelationCacheContext); \ - Assert(CurrentMemoryContext != PathmanParentCacheContext); \ - Assert(CurrentMemoryContext != PathmanBoundCacheContext); \ + Assert(CurrentMemoryContext != PathmanParentsCacheContext); \ + Assert(CurrentMemoryContext != PathmanStatusCacheContext); \ + Assert(CurrentMemoryContext != PathmanBoundsCacheContext); \ } while (0) #define PATHMAN_MCXT_COUNT 4 extern MemoryContext TopPathmanContext; -extern MemoryContext PathmanInvalJobsContext; -extern MemoryContext PathmanRelationCacheContext; -extern MemoryContext PathmanParentCacheContext; -extern MemoryContext PathmanBoundCacheContext; +extern MemoryContext PathmanParentsCacheContext; +extern MemoryContext PathmanStatusCacheContext; +extern MemoryContext PathmanBoundsCacheContext; -extern HTAB *partitioned_rels; -extern HTAB *parent_cache; -extern HTAB *bound_cache; +extern HTAB *parents_cache; +extern HTAB *status_cache; +extern HTAB *bounds_cache; /* pg_pathman's initialization state */ extern PathmanInitState pathman_init_state; @@ -70,28 +69,29 @@ extern PathmanInitState pathman_init_state; extern bool pathman_hooks_enabled; +#define PATHMAN_TOP_CONTEXT "maintenance" +#define PATHMAN_PARENTS_CACHE "partition parents cache" +#define PATHMAN_STATUS_CACHE "partition status cache" +#define PATHMAN_BOUNDS_CACHE "partition bounds cache" + + /* Transform pg_pathman's memory context into simple name */ static inline const char * -simpify_mcxt_name(MemoryContext mcxt) +simplify_mcxt_name(MemoryContext mcxt) { - static const char *top_mcxt = "maintenance", - *rel_mcxt = "partition dispatch cache", - *parent_mcxt = "partition parents cache", - *bound_mcxt = "partition bounds cache"; - if (mcxt == TopPathmanContext) - return top_mcxt; + return PATHMAN_TOP_CONTEXT; - else if (mcxt == PathmanRelationCacheContext) - return rel_mcxt; + else if (mcxt == PathmanParentsCacheContext) + return PATHMAN_PARENTS_CACHE; - else if (mcxt == PathmanParentCacheContext) - return parent_mcxt; + else if (mcxt == PathmanStatusCacheContext) + return PATHMAN_STATUS_CACHE; - else if (mcxt == PathmanBoundCacheContext) - return bound_mcxt; + else if (mcxt == PathmanBoundsCacheContext) + return PATHMAN_BOUNDS_CACHE; - else elog(ERROR, "error in function " CppAsString(simpify_mcxt_name)); + else elog(ERROR, "unknown memory context"); } diff --git a/src/include/relation_info.h b/src/include/relation_info.h index dadc3511..70f2eedc 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -110,9 +110,7 @@ cmp_bounds(FmgrInfo *cmp_func, } -/* - * Partitioning type. - */ +/* Partitioning type */ typedef enum { PT_ANY = 0, /* for part type traits (virtual type) */ @@ -120,9 +118,7 @@ typedef enum PT_RANGE } PartType; -/* - * Child relation info for RANGE partitioning. - */ +/* Child relation info for RANGE partitioning */ typedef struct { Oid child_oid; @@ -130,16 +126,60 @@ typedef struct max; } RangeEntry; +/* + * PartStatusInfo + * Cached partitioning status of the specified relation. + * Allows us to quickly search for PartRelationInfo. + */ +typedef struct PartStatusInfo +{ + Oid relid; /* key */ + int32 refcount; /* reference counter */ + bool is_valid; /* is this entry fresh? */ + struct PartRelationInfo *prel; +} PartStatusInfo; + +/* + * PartParentInfo + * Cached parent of the specified partition. + * Allows us to quickly search for PartRelationInfo. + */ +typedef struct PartParentInfo +{ + Oid child_relid; /* key */ + Oid parent_relid; +} PartParentInfo; + +/* + * PartBoundInfo + * Cached bounds of the specified partition. + * Allows us to deminish overhead of check constraints. + */ +typedef struct PartBoundInfo +{ + Oid child_relid; /* key */ + + PartType parttype; + + /* For RANGE partitions */ + Bound range_min; + Bound range_max; + bool byval; + + /* For HASH partitions */ + uint32 part_idx; +} PartBoundInfo; + /* * PartRelationInfo * Per-relation partitioning information. * Allows us to perform partition pruning. */ -typedef struct +typedef struct PartRelationInfo { - Oid key; /* partitioned table's Oid */ - bool valid, /* is this entry valid? */ - enable_parent; /* should plan include parent? */ + PartStatusInfo *psin; /* entry holding this prel */ + + bool enable_parent; /* should plan include parent? */ PartType parttype; /* partitioning type (HASH | RANGE) */ @@ -170,55 +210,11 @@ typedef struct #define PART_EXPR_VARNO ( 1 ) -/* - * PartParentInfo - * Cached parent of the specified partition. - * Allows us to quickly search for PartRelationInfo. - */ -typedef struct -{ - Oid child_rel; /* key */ - Oid parent_rel; -} PartParentInfo; - -/* - * PartBoundInfo - * Cached bounds of the specified partition. - * Allows us to deminish overhead of check constraints. - */ -typedef struct -{ - Oid child_rel; /* key */ - - PartType parttype; - - /* For RANGE partitions */ - Bound range_min; - Bound range_max; - bool byval; - - /* For HASH partitions */ - uint32 part_idx; -} PartBoundInfo; - -/* - * PartParentSearch - * Represents status of a specific cached entry. - * Returned by [for]get_parent_of_partition(). - */ -typedef enum -{ - PPS_ENTRY_NOT_FOUND = 0, - PPS_ENTRY_PARENT, /* entry was found, but pg_pathman doesn't know it */ - PPS_ENTRY_PART_PARENT, /* entry is parent and is known by pg_pathman */ - PPS_NOT_SURE /* can't determine (not transactional state) */ -} PartParentSearch; - /* * PartRelationInfo field access macros & functions. */ -#define PrelParentRelid(prel) ( (prel)->key ) +#define PrelParentRelid(prel) ( (prel)->psin->relid ) #define PrelGetChildrenArray(prel) ( (prel)->children ) @@ -226,13 +222,9 @@ typedef enum #define PrelChildrenCount(prel) ( (prel)->children_count ) -#define PrelIsValid(prel) ( (prel) && (prel)->valid ) - static inline uint32 PrelLastChild(const PartRelationInfo *prel) { - Assert(PrelIsValid(prel)); - if (PrelChildrenCount(prel) == 0) elog(ERROR, "pg_pathman's cache entry for relation %u has 0 children", PrelParentRelid(prel)); @@ -258,13 +250,13 @@ PrelExpressionColumnNames(const PartRelationInfo *prel) } static inline Node * -PrelExpressionForRelid(const PartRelationInfo *prel, Index rel_index) +PrelExpressionForRelid(const PartRelationInfo *prel, Index rti) { /* TODO: implement some kind of cache */ Node *expr = copyObject(prel->expr); - if (rel_index != PART_EXPR_VARNO) - ChangeVarNodes(expr, PART_EXPR_VARNO, rel_index, 0); + if (rti != PART_EXPR_VARNO) + ChangeVarNodes(expr, PART_EXPR_VARNO, rti, 0); return expr; } @@ -273,54 +265,16 @@ AttrNumber *PrelExpressionAttributesMap(const PartRelationInfo *prel, TupleDesc source_tupdesc, int *map_length); +/* + * PartStatusInfo field access macros & functions. + */ -const PartRelationInfo *refresh_pathman_relation_info(Oid relid, - Datum *values, - bool allow_incomplete); -PartRelationInfo *invalidate_pathman_relation_info(Oid relid, bool *found); -void invalidate_pathman_relation_info_cache(const Oid *parents, int parents_count); -void remove_pathman_relation_info(Oid relid); -const PartRelationInfo *get_pathman_relation_info(Oid relid); -const PartRelationInfo *get_pathman_relation_info_after_lock(Oid relid, - bool unlock_if_not_found, - LockAcquireResult *lock_result); - -/* Partitioning expression routines */ -Node *parse_partitioning_expression(const Oid relid, - const char *expr_cstr, - char **query_string_out, - Node **parsetree_out); - -Datum cook_partitioning_expression(const Oid relid, - const char *expr_cstr, - Oid *expr_type); +#define PsinIsValid(psin) ( (psin)->is_valid ) -char *canonicalize_partitioning_expression(const Oid relid, - const char *expr_cstr); -bool is_equal_to_partitioning_expression(Oid relid, char *expression, - Oid value_type); +#define PsinReferenceCount(psin) ( (psin)->refcount ) -/* Global invalidation routines */ -void delay_pathman_shutdown(void); -void delay_invalidation_whole_cache(void); -void delay_invalidation_parent_rel(Oid parent); -void delay_invalidation_vague_rel(Oid vague_rel); -void finish_delayed_invalidation(void); - -/* Parent cache */ -void cache_parent_of_partition(Oid partition, Oid parent); -Oid forget_parent_of_partition(Oid partition, PartParentSearch *status); -Oid get_parent_of_partition(Oid partition, PartParentSearch *status); - -/* Bounds cache */ -void forget_bounds_of_partition(Oid partition); -PartBoundInfo *get_bounds_of_partition(Oid partition, - const PartRelationInfo *prel); -Datum get_lower_bound(Oid parent_relid, Oid value_type); -Datum get_upper_bound(Oid relid, Oid value_type); /* PartType wrappers */ - static inline void WrongPartType(PartType parttype) { @@ -341,16 +295,13 @@ DatumGetPartType(Datum datum) static inline char * PartTypeToCString(PartType parttype) { - static char *hash_str = "1", - *range_str = "2"; - switch (parttype) { case PT_HASH: - return hash_str; + return "1"; case PT_RANGE: - return range_str; + return "2"; default: WrongPartType(parttype); @@ -359,41 +310,68 @@ PartTypeToCString(PartType parttype) } -/* PartRelationInfo checker */ +/* Dispatch cache */ +void refresh_pathman_relation_info(Oid relid); +const PartRelationInfo *get_pathman_relation_info(Oid relid); +const PartRelationInfo *get_pathman_relation_info_after_lock(Oid relid, + bool unlock_if_not_found, + LockAcquireResult *lock_result); + void shout_if_prel_is_invalid(const Oid parent_oid, const PartRelationInfo *prel, const PartType expected_part_type); +/* Status cache */ +PartStatusInfo *open_pathman_status_info(Oid relid); +void close_pathman_status_info(PartStatusInfo *psin); +void invalidate_pathman_status_info(Oid relid); +void invalidate_pathman_status_info_cache(void); -/* - * Useful functions & macros for freeing memory. - */ +/* Bounds cache */ +void forget_bounds_of_partition(Oid partition); +PartBoundInfo *get_bounds_of_partition(Oid partition, const PartRelationInfo *prel); +Datum get_lower_bound(Oid partition_relid, Oid value_type); +Datum get_upper_bound(Oid partition_relid, Oid value_type); -/* Remove all references to this parent from parents cache */ -static inline void -ForgetParent(PartRelationInfo *prel) -{ - uint32 i; +/* Parent cache */ +void cache_parent_of_partition(Oid partition, Oid parent); +void forget_parent_of_partition(Oid partition); +Oid get_parent_of_partition(Oid partition); - AssertArg(MemoryContextIsValid(prel->mcxt)); +/* Partitioning expression routines */ +Node *parse_partitioning_expression(const Oid relid, + const char *expr_cstr, + char **query_string_out, + Node **parsetree_out); - /* Remove relevant PartParentInfos */ - if (prel->children) - { - for (i = 0; i < PrelChildrenCount(prel); i++) - { - Oid child = prel->children[i]; - - /* Skip if Oid is invalid (e.g. initialization error) */ - if (!OidIsValid(child)) - continue; - - /* If it's *always been* relid's partition, free cache */ - if (PrelParentRelid(prel) == get_parent_of_partition(child, NULL)) - forget_parent_of_partition(child, NULL); - } - } -} +Datum cook_partitioning_expression(const Oid relid, + const char *expr_cstr, + Oid *expr_type); + +char *canonicalize_partitioning_expression(const Oid relid, + const char *expr_cstr); + +bool is_equal_to_partitioning_expression(const Oid relid, + const char *expression, + const Oid value_type); + +/* Partitioning expression routines */ +Node *parse_partitioning_expression(const Oid relid, + const char *expr_cstr, + char **query_string_out, + Node **parsetree_out); + +Datum cook_partitioning_expression(const Oid relid, + const char *expr_cstr, + Oid *expr_type); + +char *canonicalize_partitioning_expression(const Oid relid, + const char *expr_cstr); + + +/* Global invalidation routines */ +void delay_pathman_shutdown(void); +void finish_delayed_invalidation(void); /* For pg_pathman.enable_bounds_cache GUC */ diff --git a/src/init.c b/src/init.c index 0a9f7da6..545eb670 100644 --- a/src/init.c +++ b/src/init.c @@ -43,19 +43,18 @@ /* Various memory contexts for caches */ MemoryContext TopPathmanContext = NULL; -MemoryContext PathmanInvalJobsContext = NULL; -MemoryContext PathmanRelationCacheContext = NULL; -MemoryContext PathmanParentCacheContext = NULL; -MemoryContext PathmanBoundCacheContext = NULL; +MemoryContext PathmanParentsCacheContext = NULL; +MemoryContext PathmanStatusCacheContext = NULL; +MemoryContext PathmanBoundsCacheContext = NULL; /* Storage for PartRelationInfos */ -HTAB *partitioned_rels = NULL; +HTAB *parents_cache = NULL; /* Storage for PartParentInfos */ -HTAB *parent_cache = NULL; +HTAB *status_cache = NULL; /* Storage for PartBoundInfos */ -HTAB *bound_cache = NULL; +HTAB *bounds_cache = NULL; /* pg_pathman's init status */ PathmanInitState pathman_init_state; @@ -309,18 +308,17 @@ init_local_cache(void) HASHCTL ctl; /* Destroy caches, just in case */ - hash_destroy(partitioned_rels); - hash_destroy(parent_cache); - hash_destroy(bound_cache); + hash_destroy(parents_cache); + hash_destroy(status_cache); + hash_destroy(bounds_cache); /* Reset pg_pathman's memory contexts */ if (TopPathmanContext) { /* Check that child contexts exist */ - Assert(MemoryContextIsValid(PathmanInvalJobsContext)); - Assert(MemoryContextIsValid(PathmanRelationCacheContext)); - Assert(MemoryContextIsValid(PathmanParentCacheContext)); - Assert(MemoryContextIsValid(PathmanBoundCacheContext)); + Assert(MemoryContextIsValid(PathmanParentsCacheContext)); + Assert(MemoryContextIsValid(PathmanStatusCacheContext)); + Assert(MemoryContextIsValid(PathmanBoundsCacheContext)); /* Clear children */ MemoryContextResetChildren(TopPathmanContext); @@ -328,66 +326,60 @@ init_local_cache(void) /* Initialize pg_pathman's memory contexts */ else { - Assert(PathmanInvalJobsContext == NULL); - Assert(PathmanRelationCacheContext == NULL); - Assert(PathmanParentCacheContext == NULL); - Assert(PathmanBoundCacheContext == NULL); + Assert(PathmanParentsCacheContext == NULL); + Assert(PathmanStatusCacheContext == NULL); + Assert(PathmanBoundsCacheContext == NULL); TopPathmanContext = AllocSetContextCreate(TopMemoryContext, - CppAsString(TopPathmanContext), + PATHMAN_TOP_CONTEXT, ALLOCSET_DEFAULT_SIZES); - PathmanInvalJobsContext = - AllocSetContextCreate(TopMemoryContext, - CppAsString(PathmanInvalJobsContext), - ALLOCSET_SMALL_SIZES); - - /* For PartRelationInfo */ - PathmanRelationCacheContext = + /* For PartParentInfo */ + PathmanParentsCacheContext = AllocSetContextCreate(TopPathmanContext, - CppAsString(PathmanRelationCacheContext), - ALLOCSET_DEFAULT_SIZES); + PATHMAN_PARENTS_CACHE, + ALLOCSET_SMALL_SIZES); - /* For PartParentInfo */ - PathmanParentCacheContext = + /* For PartStatusInfo */ + PathmanStatusCacheContext = AllocSetContextCreate(TopPathmanContext, - CppAsString(PathmanParentCacheContext), - ALLOCSET_DEFAULT_SIZES); + PATHMAN_STATUS_CACHE, + ALLOCSET_SMALL_SIZES); /* For PartBoundInfo */ - PathmanBoundCacheContext = + PathmanBoundsCacheContext = AllocSetContextCreate(TopPathmanContext, - CppAsString(PathmanBoundCacheContext), - ALLOCSET_DEFAULT_SIZES); + PATHMAN_BOUNDS_CACHE, + ALLOCSET_SMALL_SIZES); } memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(PartRelationInfo); - ctl.hcxt = PathmanRelationCacheContext; + ctl.hcxt = PathmanParentsCacheContext; - partitioned_rels = hash_create("pg_pathman's partition dispatch cache", - PART_RELS_SIZE, &ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + parents_cache = hash_create(PATHMAN_PARENTS_CACHE, + PART_RELS_SIZE, &ctl, + HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); - ctl.entrysize = sizeof(PartParentInfo); - ctl.hcxt = PathmanParentCacheContext; + ctl.entrysize = sizeof(PartStatusInfo); + ctl.hcxt = PathmanStatusCacheContext; - parent_cache = hash_create("pg_pathman's partition parents cache", + status_cache = hash_create(PATHMAN_STATUS_CACHE, PART_RELS_SIZE * CHILD_FACTOR, &ctl, HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(PartBoundInfo); - ctl.hcxt = PathmanBoundCacheContext; + ctl.hcxt = PathmanBoundsCacheContext; - bound_cache = hash_create("pg_pathman's partition bounds cache", - PART_RELS_SIZE * CHILD_FACTOR, &ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + bounds_cache = hash_create(PATHMAN_BOUNDS_CACHE, + PART_RELS_SIZE * CHILD_FACTOR, &ctl, + HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); } /* @@ -397,13 +389,13 @@ static void fini_local_cache(void) { /* First, destroy hash tables */ - hash_destroy(partitioned_rels); - hash_destroy(parent_cache); - hash_destroy(bound_cache); + hash_destroy(parents_cache); + hash_destroy(status_cache); + hash_destroy(bounds_cache); - partitioned_rels = NULL; - parent_cache = NULL; - bound_cache = NULL; + parents_cache = NULL; + status_cache = NULL; + bounds_cache = NULL; /* Now we can clear allocations */ MemoryContextResetChildren(TopPathmanContext); @@ -876,9 +868,6 @@ startup_invalidate_parent(Datum *values, bool *isnull, void *context) PATHMAN_CONFIG, relid), errhint(INIT_ERROR_HINT))); } - - /* get_pathman_relation_info() will refresh this entry */ - invalidate_pathman_relation_info(relid, NULL); } /* diff --git a/src/partition_filter.c b/src/partition_filter.c index 78123c71..33424e06 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -455,7 +455,7 @@ select_partition_for_insert(ExprState *expr_state, value, prel->ev_type); /* get_pathman_relation_info() will refresh this entry */ - invalidate_pathman_relation_info(parent_relid, NULL); + refresh_pathman_relation_info(parent_relid); } else partition_relid = parts[0]; @@ -467,7 +467,7 @@ select_partition_for_insert(ExprState *expr_state, if (rri_holder == NULL) { /* get_pathman_relation_info() will refresh this entry */ - invalidate_pathman_relation_info(parent_relid, NULL); + refresh_pathman_relation_info(parent_relid); /* Get a fresh PartRelationInfo */ prel = get_pathman_relation_info(parent_relid); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index ac6b0dca..197c2347 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -126,26 +126,12 @@ get_number_of_partitions_pl(PG_FUNCTION_ARGS) Datum get_parent_of_partition_pl(PG_FUNCTION_ARGS) { - Oid partition = PG_GETARG_OID(0); - PartParentSearch parent_search; - Oid parent; - bool emit_error = PG_GETARG_BOOL(1); + Oid partition = PG_GETARG_OID(0), + parent = get_parent_of_partition(partition); - /* Fetch parent & write down search status */ - parent = get_parent_of_partition(partition, &parent_search); - - /* We MUST be sure :) */ - Assert(parent_search != PPS_NOT_SURE); - - /* It must be parent known by pg_pathman */ - if (parent_search == PPS_ENTRY_PART_PARENT) + if (OidIsValid(parent)) PG_RETURN_OID(parent); - if (emit_error) - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("\"%s\" is not a partition", - get_rel_name_or_relid(partition)))); - PG_RETURN_NULL(); } @@ -160,8 +146,7 @@ is_equal_to_partitioning_expression_pl(PG_FUNCTION_ARGS) char *expr = TextDatumGetCString(PG_GETARG_TEXT_P(1)); Oid value_type = PG_GETARG_OID(2); - result = is_equal_to_partitioning_expression(parent_relid, expr, - value_type); + result = is_equal_to_partitioning_expression(parent_relid, expr, value_type); PG_RETURN_BOOL(result); } @@ -171,10 +156,10 @@ is_equal_to_partitioning_expression_pl(PG_FUNCTION_ARGS) Datum get_lower_bound_pl(PG_FUNCTION_ARGS) { - Oid relid = PG_GETARG_OID(0); + Oid partition_relid = PG_GETARG_OID(0); Oid value_type = get_fn_expr_argtype(fcinfo->flinfo, 1); - PG_RETURN_POINTER(get_lower_bound(relid, value_type)); + PG_RETURN_POINTER(get_lower_bound(partition_relid, value_type)); } /* @@ -183,10 +168,10 @@ get_lower_bound_pl(PG_FUNCTION_ARGS) Datum get_upper_bound_pl(PG_FUNCTION_ARGS) { - Oid relid = PG_GETARG_OID(0); + Oid partition_relid = PG_GETARG_OID(0); Oid value_type = get_fn_expr_argtype(fcinfo->flinfo, 1); - PG_RETURN_POINTER(get_upper_bound(relid, value_type)); + PG_RETURN_POINTER(get_upper_bound(partition_relid, value_type)); } /* @@ -269,14 +254,14 @@ show_cache_stats_internal(PG_FUNCTION_ARGS) usercxt = (show_cache_stats_cxt *) palloc(sizeof(show_cache_stats_cxt)); usercxt->pathman_contexts[0] = TopPathmanContext; - usercxt->pathman_contexts[1] = PathmanRelationCacheContext; - usercxt->pathman_contexts[2] = PathmanParentCacheContext; - usercxt->pathman_contexts[3] = PathmanBoundCacheContext; + usercxt->pathman_contexts[1] = PathmanParentsCacheContext; + usercxt->pathman_contexts[2] = PathmanStatusCacheContext; + usercxt->pathman_contexts[3] = PathmanBoundsCacheContext; usercxt->pathman_htables[0] = NULL; /* no HTAB for this entry */ - usercxt->pathman_htables[1] = partitioned_rels; - usercxt->pathman_htables[2] = parent_cache; - usercxt->pathman_htables[3] = bound_cache; + usercxt->pathman_htables[1] = parents_cache; + usercxt->pathman_htables[2] = status_cache; + usercxt->pathman_htables[3] = bounds_cache; usercxt->current_item = 0; @@ -318,7 +303,7 @@ show_cache_stats_internal(PG_FUNCTION_ARGS) current_htab = usercxt->pathman_htables[usercxt->current_item]; values[Anum_pathman_cs_context - 1] = - CStringGetTextDatum(simpify_mcxt_name(current_mcxt)); + CStringGetTextDatum(simplify_mcxt_name(current_mcxt)); /* We can't check stats of mcxt prior to 9.6 */ #if PG_VERSION_NUM >= 90600 @@ -864,9 +849,7 @@ add_to_pathman_config(PG_FUNCTION_ARGS) /* Some flags might change during refresh attempt */ save_pathman_init_state(&init_state); - refresh_pathman_relation_info(relid, - values, - false); /* initialize immediately */ + get_pathman_relation_info(relid); } PG_CATCH(); { diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 37b8fcb9..93a78241 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -403,7 +403,6 @@ get_part_range_by_oid(PG_FUNCTION_ARGS) { Oid partition_relid, parent_relid; - PartParentSearch parent_search; RangeEntry *ranges; const PartRelationInfo *prel; uint32 i; @@ -415,8 +414,8 @@ get_part_range_by_oid(PG_FUNCTION_ARGS) else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("'partition_relid' should not be NULL"))); - parent_relid = get_parent_of_partition(partition_relid, &parent_search); - if (parent_search != PPS_ENTRY_PART_PARENT) + parent_relid = get_parent_of_partition(partition_relid); + if (!OidIsValid(parent_relid)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("relation \"%s\" is not a partition", get_rel_name_or_relid(partition_relid)))); @@ -615,7 +614,6 @@ Datum merge_range_partitions(PG_FUNCTION_ARGS) { Oid parent = InvalidOid; - PartParentSearch parent_search; ArrayType *arr = PG_GETARG_ARRAYTYPE_P(0); Oid *partitions; @@ -658,10 +656,10 @@ merge_range_partitions(PG_FUNCTION_ARGS) /* Check if all partitions are from the same parent */ for (i = 0; i < nparts; i++) { - Oid cur_parent = get_parent_of_partition(partitions[i], &parent_search); + Oid cur_parent = get_parent_of_partition(partitions[i]); /* If we couldn't find a parent, it's not a partition */ - if (parent_search != PPS_ENTRY_PART_PARENT) + if (!OidIsValid(cur_parent)) ereport(ERROR, (errmsg("cannot merge partitions"), errdetail("relation \"%s\" is not a partition", get_rel_name_or_relid(partitions[i])))); @@ -783,15 +781,14 @@ Datum drop_range_partition_expand_next(PG_FUNCTION_ARGS) { const PartRelationInfo *prel; - PartParentSearch parent_search; Oid relid = PG_GETARG_OID(0), parent; RangeEntry *ranges; int i; /* Get parent's relid */ - parent = get_parent_of_partition(relid, &parent_search); - if (parent_search != PPS_ENTRY_PART_PARENT) + parent = get_parent_of_partition(relid); + if (!OidIsValid(parent)) elog(ERROR, "relation \"%s\" is not a partition", get_rel_name_or_relid(relid)); diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index e8bcc129..9e6d64e1 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -584,7 +584,7 @@ partition_router_visitor(Plan *plan, void *context) const PartRelationInfo *prel; /* Find topmost parent */ - while ((tmp_relid = get_parent_of_partition(relid, NULL)) != InvalidOid) + while (OidIsValid(tmp_relid = get_parent_of_partition(relid))) relid = tmp_relid; /* Check that table is partitioned */ diff --git a/src/relation_info.c b/src/relation_info.c index e327bc57..77a81fc0 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -75,44 +75,19 @@ bool pg_pathman_enable_bounds_cache = true; /* * We delay all invalidation jobs received in relcache hook. */ -static List *delayed_invalidation_parent_rels = NIL; -static List *delayed_invalidation_vague_rels = NIL; -static bool delayed_invalidation_whole_cache = false; static bool delayed_shutdown = false; /* pathman was dropped */ -#define INVAL_LIST_MAX_ITEMS 10000 - -/* Add unique Oid to list, allocate in TopPathmanContext */ -#define list_add_unique(list, oid) \ - do { \ - MemoryContext old_mcxt = MemoryContextSwitchTo(PathmanInvalJobsContext); \ - list = list_append_unique_oid(list, (oid)); \ - MemoryContextSwitchTo(old_mcxt); \ - } while (0) - -#define free_invalidation_lists() \ - do { \ - MemoryContextReset(PathmanInvalJobsContext); \ - delayed_invalidation_parent_rels = NIL; \ - delayed_invalidation_vague_rels = NIL; \ - } while (0) - /* Handy wrappers for Oids */ #define bsearch_oid(key, array, array_size) \ bsearch((const void *) &(key), (array), (array_size), sizeof(Oid), oid_cmp) -static bool try_invalidate_parent(Oid relid, Oid *parents, int parents_count); -static Oid try_catalog_parent_search(Oid partition, PartParentSearch *status); -static Oid get_parent_of_partition_internal(Oid partition, - PartParentSearch *status, - HASHACTION action); +static PartRelationInfo *build_pathman_relation_info(Oid relid, Datum *values); +static void free_pathman_relation_info(PartRelationInfo *prel); static Expr *get_partition_constraint_expr(Oid partition); -static void free_prel_partitions(PartRelationInfo *prel); - static void fill_prel_with_partitions(PartRelationInfo *prel, const Oid *partitions, const uint32 parts_count); @@ -141,381 +116,205 @@ init_relation_info_static_data(void) NULL); } + /* - * refresh\invalidate\get\remove PartRelationInfo functions. + * Partition dispatch routines. */ -const PartRelationInfo * -refresh_pathman_relation_info(Oid relid, - Datum *values, - bool allow_incomplete) +/* TODO: comment */ +void +refresh_pathman_relation_info(Oid relid) { - const LOCKMODE lockmode = AccessShareLock; - const TypeCacheEntry *typcache; - Oid *prel_children; - uint32 prel_children_count = 0, - i; - PartRelationInfo *prel; - Datum param_values[Natts_pathman_config_params]; - bool param_isnull[Natts_pathman_config_params]; - char *expr; - MemoryContext old_mcxt; - - AssertTemporaryContext(); - prel = invalidate_pathman_relation_info(relid, NULL); - Assert(prel); - - /* Try locking parent, exit fast if 'allow_incomplete' */ - if (allow_incomplete) - { - if (!ConditionalLockRelationOid(relid, lockmode)) - return NULL; /* leave an invalid entry */ - } - else LockRelationOid(relid, lockmode); - /* Check if parent exists */ - if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relid))) - { - /* Nope, it doesn't, remove this entry and exit */ - UnlockRelationOid(relid, lockmode); - remove_pathman_relation_info(relid); - return NULL; /* exit */ - } +} - /* Make both arrays point to NULL */ - prel->children = NULL; - prel->ranges = NULL; +/* Get PartRelationInfo from local cache */ +const PartRelationInfo * +get_pathman_relation_info(Oid relid) +{ + PartStatusInfo *psin = open_pathman_status_info(relid); + PartRelationInfo *prel = psin ? psin->prel : NULL; - /* Set partitioning type */ - prel->parttype = DatumGetPartType(values[Anum_pathman_config_parttype - 1]); +#ifdef USE_RELINFO_LOGGING + elog(DEBUG2, + "fetching %s record for parent %u [%u]", + (prel ? "live" : "NULL"), relid, MyProcPid); +#endif - /* Fetch cooked partitioning expression */ - expr = TextDatumGetCString(values[Anum_pathman_config_cooked_expr - 1]); + return prel; +} - /* Create a new memory context to store expression tree etc */ - prel->mcxt = AllocSetContextCreate(PathmanRelationCacheContext, - CppAsString(refresh_pathman_relation_info), - ALLOCSET_SMALL_SIZES); +/* Acquire lock on a table and try to get PartRelationInfo */ +const PartRelationInfo * +get_pathman_relation_info_after_lock(Oid relid, + bool unlock_if_not_found, + LockAcquireResult *lock_result) +{ + const PartRelationInfo *prel; + LockAcquireResult acquire_result; - /* Switch to persistent memory context */ - old_mcxt = MemoryContextSwitchTo(prel->mcxt); + /* Restrict concurrent partition creation (it's dangerous) */ + acquire_result = xact_lock_rel(relid, ShareUpdateExclusiveLock, false); - /* Build partitioning expression tree */ - prel->expr_cstr = TextDatumGetCString(values[Anum_pathman_config_expr - 1]); - prel->expr = (Node *) stringToNode(expr); - fix_opfuncids(prel->expr); + /* Invalidate cache entry (see AcceptInvalidationMessages()) */ + refresh_pathman_relation_info(relid); - /* Extract Vars and varattnos of partitioning expression */ - prel->expr_vars = NIL; - prel->expr_atts = NULL; - prel->expr_vars = pull_var_clause_compat(prel->expr, 0, 0); - pull_varattnos((Node *) prel->expr_vars, PART_EXPR_VARNO, &prel->expr_atts); + /* Set 'lock_result' if asked to */ + if (lock_result) + *lock_result = acquire_result; - MemoryContextSwitchTo(old_mcxt); + prel = get_pathman_relation_info(relid); + if (!prel && unlock_if_not_found) + UnlockRelationOid(relid, ShareUpdateExclusiveLock); - /* First, fetch type of partitioning expression */ - prel->ev_type = exprType(prel->expr); - prel->ev_typmod = exprTypmod(prel->expr); - prel->ev_collid = exprCollation(prel->expr); + return prel; +} - /* Fetch HASH & CMP fuctions and other stuff from type cache */ - typcache = lookup_type_cache(prel->ev_type, - TYPECACHE_CMP_PROC | TYPECACHE_HASH_PROC); +/* Build a new PartRelationInfo for relation (might emit ERROR) */ +static PartRelationInfo * +build_pathman_relation_info(Oid relid, Datum *values) +{ + const LOCKMODE lockmode = AccessShareLock; + MemoryContext prel_mcxt; + PartRelationInfo *prel; - prel->ev_byval = typcache->typbyval; - prel->ev_len = typcache->typlen; - prel->ev_align = typcache->typalign; + AssertTemporaryContext(); - prel->cmp_proc = typcache->cmp_proc; - prel->hash_proc = typcache->hash_proc; + /* Lock parent table */ + LockRelationOid(relid, lockmode); - /* Try searching for children (don't wait if we can't lock) */ - switch (find_inheritance_children_array(relid, lockmode, - allow_incomplete, - &prel_children_count, - &prel_children)) + /* Check if parent exists */ + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relid))) { - /* If there's no children at all, remove this entry */ - case FCS_NO_CHILDREN: - elog(DEBUG2, "refresh: relation %u has no children [%u]", - relid, MyProcPid); - - UnlockRelationOid(relid, lockmode); - remove_pathman_relation_info(relid); - return NULL; /* exit */ - - /* If can't lock children, leave an invalid entry */ - case FCS_COULD_NOT_LOCK: - elog(DEBUG2, "refresh: cannot lock children of relation %u [%u]", - relid, MyProcPid); - - UnlockRelationOid(relid, lockmode); - return NULL; /* exit */ - - /* Found some children, just unlock parent */ - case FCS_FOUND: - elog(DEBUG2, "refresh: found children of relation %u [%u]", - relid, MyProcPid); + /* Nope, it doesn't, remove this entry and exit */ + UnlockRelationOid(relid, lockmode); + return NULL; /* exit */ + } - UnlockRelationOid(relid, lockmode); - break; /* continue */ + /* Create a new memory context to store expression tree etc */ + prel_mcxt = AllocSetContextCreate(PathmanParentsCacheContext, + __FUNCTION__, + ALLOCSET_SMALL_SIZES); - /* Error: unknown result code */ - default: - elog(ERROR, "error in function " - CppAsString(find_inheritance_children_array)); - } + /* Create a new PartRelationInfo */ + prel = MemoryContextAlloc(prel_mcxt, sizeof(PartRelationInfo)); + prel->mcxt = prel_mcxt; - /* - * Fill 'prel' with partition info, raise ERROR if anything is wrong. - * This way PartRelationInfo will remain 'invalid', and 'get' procedure - * will try to refresh it again (and again), until the error is fixed - * by user manually (i.e. invalid check constraints etc). - */ + /* Memory leak protection */ PG_TRY(); { - fill_prel_with_partitions(prel, prel_children, prel_children_count); - } - PG_CATCH(); - { - /* Remove this parent from parents cache */ - ForgetParent(prel); - - /* Delete unused 'prel_mcxt' */ - MemoryContextDelete(prel->mcxt); - + MemoryContext old_mcxt; + const TypeCacheEntry *typcache; + char *expr; + Datum param_values[Natts_pathman_config_params]; + bool param_isnull[Natts_pathman_config_params]; + Oid *prel_children; + uint32 prel_children_count = 0, + i; + + /* Make both arrays point to NULL */ prel->children = NULL; prel->ranges = NULL; - prel->mcxt = NULL; - - /* Rethrow ERROR further */ - PG_RE_THROW(); - } - PG_END_TRY(); - /* Peform some actions for each child */ - for (i = 0; i < prel_children_count; i++) - { - /* Add "partition+parent" pair to cache */ - cache_parent_of_partition(prel_children[i], relid); - - /* Now it's time to unlock this child */ - UnlockRelationOid(prel_children[i], lockmode); - } - - if (prel_children) - pfree(prel_children); - - /* Read additional parameters ('enable_parent' at the moment) */ - if (read_pathman_params(relid, param_values, param_isnull)) - { - prel->enable_parent = param_values[Anum_pathman_config_params_enable_parent - 1]; - } - /* Else set default values if they cannot be found */ - else - { - prel->enable_parent = DEFAULT_PATHMAN_ENABLE_PARENT; - } + /* Set partitioning type */ + prel->parttype = DatumGetPartType(values[Anum_pathman_config_parttype - 1]); - /* We've successfully built a cache entry */ - prel->valid = true; - - return prel; -} + /* Fetch cooked partitioning expression */ + expr = TextDatumGetCString(values[Anum_pathman_config_cooked_expr - 1]); -/* Invalidate PartRelationInfo cache entry. Create new entry if 'found' is NULL. */ -PartRelationInfo * -invalidate_pathman_relation_info(Oid relid, bool *found) -{ - bool prel_found; - HASHACTION action = found ? HASH_FIND : HASH_ENTER; - PartRelationInfo *prel; + /* Switch to persistent memory context */ + old_mcxt = MemoryContextSwitchTo(prel->mcxt); - prel = pathman_cache_search_relid(partitioned_rels, - relid, action, - &prel_found); + /* Build partitioning expression tree */ + prel->expr_cstr = TextDatumGetCString(values[Anum_pathman_config_expr - 1]); + prel->expr = (Node *) stringToNode(expr); + fix_opfuncids(prel->expr); - /* It's a new entry, mark it 'invalid' */ - if (prel && !prel_found) - prel->valid = false; + /* Extract Vars and varattnos of partitioning expression */ + prel->expr_vars = NIL; + prel->expr_atts = NULL; + prel->expr_vars = pull_var_clause_compat(prel->expr, 0, 0); + pull_varattnos((Node *) prel->expr_vars, PART_EXPR_VARNO, &prel->expr_atts); - /* Clear the remaining resources */ - free_prel_partitions(prel); + MemoryContextSwitchTo(old_mcxt); - /* Set 'found' if necessary */ - if (found) *found = prel_found; + /* First, fetch type of partitioning expression */ + prel->ev_type = exprType(prel->expr); + prel->ev_typmod = exprTypmod(prel->expr); + prel->ev_collid = exprCollation(prel->expr); -#ifdef USE_ASSERT_CHECKING - elog(DEBUG2, - "dispatch_cache: invalidating %s record for parent %u [%u]", - (prel ? "live" : "NULL"), relid, MyProcPid); -#endif + /* Fetch HASH & CMP fuctions and other stuff from type cache */ + typcache = lookup_type_cache(prel->ev_type, + TYPECACHE_CMP_PROC | TYPECACHE_HASH_PROC); - return prel; -} + prel->ev_byval = typcache->typbyval; + prel->ev_len = typcache->typlen; + prel->ev_align = typcache->typalign; -/* Invalidate PartRelationInfo cache entries that exist in 'parents` array */ -void -invalidate_pathman_relation_info_cache(const Oid *parents, int parents_count) -{ - HASH_SEQ_STATUS stat; - PartRelationInfo *prel; - List *prel_bad = NIL; - ListCell *lc; - int i; + prel->cmp_proc = typcache->cmp_proc; + prel->hash_proc = typcache->hash_proc; - for (i = 0; i < parents_count; i++) - { - invalidate_pathman_relation_info(parents[i], NULL); - } + /* Try searching for children */ + (void) find_inheritance_children_array(relid, lockmode, false, + &prel_children_count, + &prel_children); - hash_seq_init(&stat, partitioned_rels); + /* Fill 'prel' with partition info, raise ERROR if anything is wrong */ + fill_prel_with_partitions(prel, prel_children, prel_children_count); - while ((prel = (PartRelationInfo *) hash_seq_search(&stat)) != NULL) - { - Oid parent_relid = PrelParentRelid(prel); + /* Unlock the parent */ + UnlockRelationOid(relid, lockmode); - /* Does this entry exist in PATHMAN_CONFIG table? */ - if (!bsearch_oid(parent_relid, parents, parents_count)) + /* Now it's time to take care of children */ + for (i = 0; i < prel_children_count; i++) { - /* All entry to 'outdated' list */ - prel_bad = lappend_oid(prel_bad, parent_relid); + /* Cache this child */ + cache_parent_of_partition(prel_children[i], relid); - /* Clear the remaining resources */ - free_prel_partitions(prel); + /* Unlock this child */ + UnlockRelationOid(prel_children[i], lockmode); } - } - - /* Remove outdated entries */ - foreach (lc, prel_bad) - { - pathman_cache_search_relid(partitioned_rels, - lfirst_oid(lc), - HASH_REMOVE, - NULL); - } - -#ifdef USE_ASSERT_CHECKING - elog(DEBUG2, - "dispatch_cache: invalidated all records [%u]", MyProcPid); -#endif -} -/* Get PartRelationInfo from local cache. */ -const PartRelationInfo * -get_pathman_relation_info(Oid relid) -{ - const PartRelationInfo *prel = pathman_cache_search_relid(partitioned_rels, - relid, HASH_FIND, - NULL); - /* Refresh PartRelationInfo if needed */ - if (prel && !PrelIsValid(prel)) - { - ItemPointerData iptr; - Datum values[Natts_pathman_config]; - bool isnull[Natts_pathman_config]; + if (prel_children) + pfree(prel_children); - /* Check that PATHMAN_CONFIG table contains this relation */ - if (pathman_config_contains_relation(relid, values, isnull, NULL, &iptr)) + /* Read additional parameters ('enable_parent' at the moment) */ + if (read_pathman_params(relid, param_values, param_isnull)) { - bool upd_expr = isnull[Anum_pathman_config_cooked_expr - 1]; - if (upd_expr) - pathman_config_refresh_parsed_expression(relid, values, isnull, &iptr); - - /* Refresh partitioned table cache entry (might turn NULL) */ - prel = refresh_pathman_relation_info(relid, values, false); + prel->enable_parent = + param_values[Anum_pathman_config_params_enable_parent - 1]; } - - /* Else clear remaining cache entry */ + /* Else set default values if they cannot be found */ else { - remove_pathman_relation_info(relid); - prel = NULL; /* don't forget to reset 'prel' */ + prel->enable_parent = DEFAULT_PATHMAN_ENABLE_PARENT; } } - -#ifdef USE_RELINFO_LOGGING - elog(DEBUG2, - "dispatch_cache: fetching %s record for parent %u [%u]", - (prel ? "live" : "NULL"), relid, MyProcPid); -#endif - - /* Make sure that 'prel' is valid */ - Assert(!prel || PrelIsValid(prel)); - - return prel; -} - -/* Acquire lock on a table and try to get PartRelationInfo */ -const PartRelationInfo * -get_pathman_relation_info_after_lock(Oid relid, - bool unlock_if_not_found, - LockAcquireResult *lock_result) -{ - const PartRelationInfo *prel; - LockAcquireResult acquire_result; - - /* Restrict concurrent partition creation (it's dangerous) */ - acquire_result = xact_lock_rel(relid, ShareUpdateExclusiveLock, false); - - /* Invalidate cache entry (see AcceptInvalidationMessages()) */ - invalidate_pathman_relation_info(relid, NULL); - - /* Set 'lock_result' if asked to */ - if (lock_result) - *lock_result = acquire_result; - - prel = get_pathman_relation_info(relid); - if (!prel && unlock_if_not_found) - UnlockRelationOid(relid, ShareUpdateExclusiveLock); - - return prel; -} - -/* Remove PartRelationInfo from local cache */ -void -remove_pathman_relation_info(Oid relid) -{ - bool found; - - /* Free resources */ - invalidate_pathman_relation_info(relid, &found); - - /* Now let's remove the entry completely */ - if (found) + PG_CATCH(); { - pathman_cache_search_relid(partitioned_rels, relid, HASH_REMOVE, NULL); + /* Free this entry */ + free_pathman_relation_info(prel); -#ifdef USE_RELINFO_LOGGING - elog(DEBUG2, - "dispatch_cache: removing record for parent %u [%u]", - relid, MyProcPid); -#endif + /* Rethrow ERROR further */ + PG_RE_THROW(); } -} + PG_END_TRY(); -static void -free_prel_partitions(PartRelationInfo *prel) -{ - /* Handle valid PartRelationInfo */ - if (PrelIsValid(prel)) + /* Free trivial entries */ + if (PrelChildrenCount(prel) == 0) { - /* Remove this parent from parents cache */ - ForgetParent(prel); - - /* Drop cached bounds etc */ - MemoryContextDelete(prel->mcxt); + free_pathman_relation_info(prel); + prel = NULL; } - /* Set important default values */ - if (prel) - { - prel->children = NULL; - prel->ranges = NULL; - prel->mcxt = NULL; + return prel; +} - prel->valid = false; /* now cache entry is invalid */ - } +/* Free PartRelationInfo struct safely */ +static void +free_pathman_relation_info(PartRelationInfo *prel) +{ + MemoryContextDelete(prel->mcxt); } /* Fill PartRelationInfo with partition-related info */ @@ -548,7 +347,7 @@ fill_prel_with_partitions(PartRelationInfo *prel, /* Create temporary memory context for loop */ temp_mcxt = AllocSetContextCreate(CurrentMemoryContext, CppAsString(fill_prel_with_partitions), - ALLOCSET_DEFAULT_SIZES); + ALLOCSET_SMALL_SIZES); /* Initialize bounds of partitions */ for (i = 0; i < PrelChildrenCount(prel); i++) @@ -570,13 +369,13 @@ fill_prel_with_partitions(PartRelationInfo *prel, switch (prel->parttype) { case PT_HASH: - prel->children[pbin->part_idx] = pbin->child_rel; + prel->children[pbin->part_idx] = pbin->child_relid; break; case PT_RANGE: { /* Copy child's Oid */ - prel->ranges[i].child_oid = pbin->child_rel; + prel->ranges[i].child_oid = pbin->child_relid; /* Copy all min & max Datums to the persistent mcxt */ old_mcxt = MemoryContextSwitchTo(prel->mcxt); @@ -640,7 +439,7 @@ fill_prel_with_partitions(PartRelationInfo *prel, #endif } -/* qsort comparison function for RangeEntries */ +/* qsort() comparison function for RangeEntries */ static int cmp_range_entries(const void *p1, const void *p2, void *arg) { @@ -651,558 +450,351 @@ cmp_range_entries(const void *p1, const void *p2, void *arg) return cmp_bounds(&info->flinfo, info->collid, &v1->min, &v2->min); } - /* - * Partitioning expression routines. + * Common PartRelationInfo checks. Emit ERROR if anything is wrong. */ - -/* Wraps expression in SELECT query and returns parse tree */ -Node * -parse_partitioning_expression(const Oid relid, - const char *expr_cstr, - char **query_string_out, /* ret value #1 */ - Node **parsetree_out) /* ret value #2 */ +void +shout_if_prel_is_invalid(const Oid parent_oid, + const PartRelationInfo *prel, + const PartType expected_part_type) { - SelectStmt *select_stmt; - List *parsetree_list; - MemoryContext old_mcxt; - - const char *sql = "SELECT (%s) FROM ONLY %s.%s"; - char *relname = get_rel_name(relid), - *nspname = get_namespace_name(get_rel_namespace(relid)); - char *query_string = psprintf(sql, expr_cstr, - quote_identifier(nspname), - quote_identifier(relname)); - - old_mcxt = CurrentMemoryContext; + if (!prel) + elog(ERROR, "relation \"%s\" has no partitions", + get_rel_name_or_relid(parent_oid)); - PG_TRY(); - { - parsetree_list = raw_parser(query_string); - } - PG_CATCH(); + /* Check partitioning type unless it's "ANY" */ + if (expected_part_type != PT_ANY && + expected_part_type != prel->parttype) { - ErrorData *error; + char *expected_str; - /* Switch to the original context & copy edata */ - MemoryContextSwitchTo(old_mcxt); - error = CopyErrorData(); - FlushErrorState(); + switch (expected_part_type) + { + case PT_HASH: + expected_str = "HASH"; + break; - /* Adjust error message */ - error->detail = error->message; - error->message = psprintf(PARSE_PART_EXPR_ERROR, expr_cstr); - error->sqlerrcode = ERRCODE_INVALID_PARAMETER_VALUE; - error->cursorpos = 0; - error->internalpos = 0; + case PT_RANGE: + expected_str = "RANGE"; + break; - ReThrowError(error); - } - PG_END_TRY(); + default: + WrongPartType(expected_part_type); + expected_str = NULL; /* keep compiler happy */ + } - if (list_length(parsetree_list) != 1) - elog(ERROR, "expression \"%s\" produced more than one query", expr_cstr); + elog(ERROR, "relation \"%s\" is not partitioned by %s", + get_rel_name_or_relid(parent_oid), + expected_str); + } +} -#if PG_VERSION_NUM >= 100000 - select_stmt = (SelectStmt *) ((RawStmt *) linitial(parsetree_list))->stmt; -#else - select_stmt = (SelectStmt *) linitial(parsetree_list); -#endif +/* + * Remap partitioning expression columns for tuple source relation. + * This is a simplified version of functions that return TupleConversionMap. + * It should be faster if expression uses a few fields of relation. + */ +AttrNumber * +PrelExpressionAttributesMap(const PartRelationInfo *prel, + TupleDesc source_tupdesc, + int *map_length) +{ + Oid parent_relid = PrelParentRelid(prel); + int source_natts = source_tupdesc->natts, + expr_natts = 0; + AttrNumber *result, + i; + bool is_trivial = true; - if (query_string_out) - *query_string_out = query_string; + /* Get largest attribute number used in expression */ + i = -1; + while ((i = bms_next_member(prel->expr_atts, i)) >= 0) + expr_natts = i; - if (parsetree_out) - *parsetree_out = (Node *) linitial(parsetree_list); + /* Allocate array for map */ + result = (AttrNumber *) palloc0(expr_natts * sizeof(AttrNumber)); - return ((ResTarget *) linitial(select_stmt->targetList))->val; -} + /* Find a match for each attribute */ + i = -1; + while ((i = bms_next_member(prel->expr_atts, i)) >= 0) + { + AttrNumber attnum = i + FirstLowInvalidHeapAttributeNumber; + char *attname = get_attname(parent_relid, attnum); + int j; -/* Parse partitioning expression and return its type and nodeToString() as TEXT */ -Datum -cook_partitioning_expression(const Oid relid, - const char *expr_cstr, - Oid *expr_type_out) /* ret value #1 */ -{ - Node *parse_tree; - List *query_tree_list; + Assert(attnum <= expr_natts); - char *query_string, - *expr_serialized = ""; /* keep compiler happy */ + for (j = 0; j < source_natts; j++) + { + Form_pg_attribute att = TupleDescAttr(source_tupdesc, j); - Datum expr_datum; + if (att->attisdropped) + continue; /* attrMap[attnum - 1] is already 0 */ - MemoryContext parse_mcxt, - old_mcxt; + if (strcmp(NameStr(att->attname), attname) == 0) + { + result[attnum - 1] = (AttrNumber) (j + 1); + break; + } + } - AssertTemporaryContext(); + if (result[attnum - 1] == 0) + elog(ERROR, "cannot find column \"%s\" in child relation", attname); - /* - * We use separate memory context here, just to make sure we won't - * leave anything behind after parsing, rewriting and planning. - */ - parse_mcxt = AllocSetContextCreate(CurrentMemoryContext, - CppAsString(cook_partitioning_expression), - ALLOCSET_DEFAULT_SIZES); + if (result[attnum - 1] != attnum) + is_trivial = false; + } - /* Switch to mcxt for cooking :) */ - old_mcxt = MemoryContextSwitchTo(parse_mcxt); + /* Check if map is trivial */ + if (is_trivial) + { + pfree(result); + return NULL; + } - /* First we have to build a raw AST */ - (void) parse_partitioning_expression(relid, expr_cstr, - &query_string, &parse_tree); + *map_length = expr_natts; + return result; +} - /* We don't need pg_pathman's magic here */ - pathman_hooks_enabled = false; - PG_TRY(); - { - Query *query; - Node *expr; - int expr_attr; - Relids expr_varnos; - Bitmapset *expr_varattnos = NULL; +/* + * Partitioning status cache routines. + */ - /* This will fail with ERROR in case of wrong expression */ - query_tree_list = pg_analyze_and_rewrite_compat(parse_tree, query_string, - NULL, 0, NULL); +PartStatusInfo * +open_pathman_status_info(Oid relid) +{ + PartStatusInfo *psin; + bool found; + bool refresh; - /* Sanity check #1 */ - if (list_length(query_tree_list) != 1) - elog(ERROR, "partitioning expression produced more than 1 query"); + /* Should always be called in transaction */ + Assert(IsTransactionState()); - query = (Query *) linitial(query_tree_list); + /* We don't cache catalog objects */ + if (relid < FirstNormalObjectId) + return NULL; - /* Sanity check #2 */ - if (list_length(query->targetList) != 1) - elog(ERROR, "there should be exactly 1 partitioning expression"); + /* Create a new entry for this table if needed */ + psin = pathman_cache_search_relid(status_cache, + relid, HASH_ENTER, + &found); - /* Sanity check #3 */ - if (query_tree_walker(query, query_contains_subqueries, NULL, 0)) - elog(ERROR, "subqueries are not allowed in partitioning expression"); + /* Initialize new entry */ + if (!found) + { + psin->refcount = 0; + psin->is_valid = false; + psin->prel = NULL; + } - expr = (Node *) ((TargetEntry *) linitial(query->targetList))->expr; - expr = eval_const_expressions(NULL, expr); + /* Should we refresh this entry? */ + refresh = !psin->is_valid && psin->refcount == 0; - /* Sanity check #4 */ - if (contain_mutable_functions(expr)) - ereport(ERROR, - (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("functions in partitioning expression" - " must be marked IMMUTABLE"))); + if (refresh) + { + ItemPointerData iptr; + Datum values[Natts_pathman_config]; + bool isnull[Natts_pathman_config]; - /* Sanity check #5 */ - expr_varnos = pull_varnos(expr); - if (bms_num_members(expr_varnos) != 1 || - relid != ((RangeTblEntry *) linitial(query->rtable))->relid) + /* Set basic fields */ + psin->is_valid = false; + + /* Free old dispatch info */ + if (psin->prel) { - elog(ERROR, "partitioning expression should reference table \"%s\"", - get_rel_name(relid)); + free_pathman_relation_info(psin->prel); + psin->prel = NULL; } - /* Sanity check #6 */ - pull_varattnos(expr, bms_singleton_member(expr_varnos), &expr_varattnos); - expr_attr = -1; - while ((expr_attr = bms_next_member(expr_varattnos, expr_attr)) >= 0) + /* Check if PATHMAN_CONFIG table contains this relation */ + if (pathman_config_contains_relation(relid, values, isnull, NULL, &iptr)) { - AttrNumber attnum = expr_attr + FirstLowInvalidHeapAttributeNumber; - HeapTuple htup; - - /* Check that there's no system attributes in expression */ - if (attnum < InvalidAttrNumber) - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("system attributes are not supported"))); - - htup = SearchSysCache2(ATTNUM, - ObjectIdGetDatum(relid), - Int16GetDatum(attnum)); - if (HeapTupleIsValid(htup)) - { - bool nullable; + bool upd_expr = isnull[Anum_pathman_config_cooked_expr - 1]; - /* Fetch 'nullable' and free syscache tuple */ - nullable = !((Form_pg_attribute) GETSTRUCT(htup))->attnotnull; - ReleaseSysCache(htup); + if (upd_expr) + pathman_config_refresh_parsed_expression(relid, values, + isnull, &iptr); - if (nullable) - ereport(ERROR, (errcode(ERRCODE_NOT_NULL_VIOLATION), - errmsg("column \"%s\" should be marked NOT NULL", - get_attname(relid, attnum)))); - } + /* Build a partitioned table cache entry (might emit ERROR) */ + psin->prel = build_pathman_relation_info(relid, values); } - /* Free sets */ - bms_free(expr_varnos); - bms_free(expr_varattnos); - - Assert(expr); - expr_serialized = nodeToString(expr); - - /* Set 'expr_type_out' if needed */ - if (expr_type_out) - *expr_type_out = exprType(expr); + /* Good, entry is valid */ + psin->is_valid = true; } - PG_CATCH(); - { - ErrorData *error; - - /* Don't forget to enable pg_pathman's hooks */ - pathman_hooks_enabled = true; - - /* Switch to the original context & copy edata */ - MemoryContextSwitchTo(old_mcxt); - error = CopyErrorData(); - FlushErrorState(); - - /* Adjust error message */ - error->detail = error->message; - error->message = psprintf(COOK_PART_EXPR_ERROR, expr_cstr); - error->sqlerrcode = ERRCODE_INVALID_PARAMETER_VALUE; - error->cursorpos = 0; - error->internalpos = 0; - ReThrowError(error); - } - PG_END_TRY(); + /* Increase refcount */ + psin->refcount++; - /* Don't forget to enable pg_pathman's hooks */ - pathman_hooks_enabled = true; + return psin; +} - /* Switch to previous mcxt */ - MemoryContextSwitchTo(old_mcxt); +void +close_pathman_status_info(PartStatusInfo *psin) +{ + /* Should always be called in transaction */ + Assert(IsTransactionState()); - /* Get Datum of serialized expression (right mcxt) */ - expr_datum = CStringGetTextDatum(expr_serialized); + /* Should not be NULL */ + Assert(psin); - /* Free memory */ - MemoryContextDelete(parse_mcxt); + /* Should be referenced elsewhere */ + Assert(psin->refcount > 0); - return expr_datum; + /* Decrease recount */ + psin->refcount--; } -/* Canonicalize user's expression (trim whitespaces etc) */ -char * -canonicalize_partitioning_expression(const Oid relid, - const char *expr_cstr) +void +invalidate_pathman_status_info(Oid relid) { - Node *parse_tree; - Expr *expr; - char *query_string; - Query *query; - - AssertTemporaryContext(); + PartStatusInfo *psin; - /* First we have to build a raw AST */ - (void) parse_partitioning_expression(relid, expr_cstr, - &query_string, &parse_tree); + psin = pathman_cache_search_relid(status_cache, + relid, HASH_FIND, + NULL); - query = parse_analyze_compat(parse_tree, query_string, NULL, 0, NULL); - expr = ((TargetEntry *) linitial(query->targetList))->expr; + if (psin) + { +#ifdef USE_RELINFO_LOGGING + elog(DEBUG2, "invalidation message for relation %u [%u]", + relid, MyProcPid); +#endif - /* We don't care about memory efficiency here */ - return deparse_expression((Node *) expr, - deparse_context_for(get_rel_name(relid), relid), - false, false); + /* Mark entry as invalid */ + psin->is_valid = false; + } } -/* Check if query has subqueries */ -static bool -query_contains_subqueries(Node *node, void *context) +void +invalidate_pathman_status_info_cache(void) { - if (node == NULL) - return false; + HASH_SEQ_STATUS status; + PartStatusInfo *psin; - /* We've met a subquery */ - if (IsA(node, Query)) - return true; + while ((psin = (PartStatusInfo *) hash_seq_search(&status)) != NULL) + { +#ifdef USE_RELINFO_LOGGING + elog(DEBUG2, "invalidation message for relation %u [%u]", + psin->relid, MyProcPid); +#endif - return expression_tree_walker(node, query_contains_subqueries, NULL); + /* Mark entry as invalid */ + psin->is_valid = false; + } } /* - * Functions for delayed invalidation. + * Partition bounds cache routines. */ -/* Add new delayed pathman shutdown job (DROP EXTENSION) */ -void -delay_pathman_shutdown(void) -{ - delayed_shutdown = true; -} - -/* Add new delayed invalidation job for whole dispatch cache */ -void -delay_invalidation_whole_cache(void) -{ - /* Free useless invalidation lists */ - free_invalidation_lists(); - - delayed_invalidation_whole_cache = true; -} - -/* Generic wrapper for lists */ -static void -delay_invalidation_event(List **inval_list, Oid relation) -{ - /* Skip if we already need to drop whole cache */ - if (delayed_invalidation_whole_cache) - return; - - if (list_length(*inval_list) > INVAL_LIST_MAX_ITEMS) - { - /* Too many events, drop whole cache */ - delay_invalidation_whole_cache(); - return; - } - - list_add_unique(*inval_list, relation); -} - -/* Add new delayed invalidation job for a [ex-]parent relation */ -void -delay_invalidation_parent_rel(Oid parent) -{ - delay_invalidation_event(&delayed_invalidation_parent_rels, parent); -} - -/* Add new delayed invalidation job for a vague relation */ +/* Remove partition's constraint from cache */ void -delay_invalidation_vague_rel(Oid vague_rel) +forget_bounds_of_partition(Oid partition) { - delay_invalidation_event(&delayed_invalidation_vague_rels, vague_rel); -} + PartBoundInfo *pbin; -/* Finish all pending invalidation jobs if possible */ -void -finish_delayed_invalidation(void) -{ - /* Exit early if there's nothing to do */ - if (delayed_invalidation_whole_cache == false && - delayed_invalidation_parent_rels == NIL && - delayed_invalidation_vague_rels == NIL && - delayed_shutdown == false) - { - return; - } + /* Should we search in bounds cache? */ + pbin = pg_pathman_enable_bounds_cache ? + pathman_cache_search_relid(bounds_cache, + partition, + HASH_FIND, + NULL) : + NULL; /* don't even bother */ - /* Check that current state is transactional */ - if (IsTransactionState()) + /* Free this entry */ + if (pbin) { - Oid *parents = NULL; - int parents_count = 0; - bool parents_fetched = false; - ListCell *lc; - - AcceptInvalidationMessages(); - - /* Handle the probable 'DROP EXTENSION' case */ - if (delayed_shutdown) - { - Oid cur_pathman_config_relid; - - /* Unset 'shutdown' flag */ - delayed_shutdown = false; - - /* Get current PATHMAN_CONFIG relid */ - cur_pathman_config_relid = get_relname_relid(PATHMAN_CONFIG, - get_pathman_schema()); - - /* Check that PATHMAN_CONFIG table has indeed been dropped */ - if (cur_pathman_config_relid == InvalidOid || - cur_pathman_config_relid != get_pathman_config_relid(true)) - { - /* Ok, let's unload pg_pathman's config */ - unload_config(); - - /* Disregard all remaining invalidation jobs */ - delayed_invalidation_whole_cache = false; - free_invalidation_lists(); - - /* No need to continue, exit */ - return; - } - } - - /* We might be asked to perform a complete cache invalidation */ - if (delayed_invalidation_whole_cache) - { - /* Unset 'invalidation_whole_cache' flag */ - delayed_invalidation_whole_cache = false; - - /* Fetch all partitioned tables */ - if (!parents_fetched) - { - parents = read_parent_oids(&parents_count); - parents_fetched = true; - } - - /* Invalidate live entries and remove dead ones */ - invalidate_pathman_relation_info_cache(parents, parents_count); - } - - /* Process relations that are (or were) definitely partitioned */ - foreach (lc, delayed_invalidation_parent_rels) - { - Oid parent = lfirst_oid(lc); - - /* Skip if it's a TOAST table */ - if (IsToastNamespace(get_rel_namespace(parent))) - continue; - - /* Fetch all partitioned tables */ - if (!parents_fetched) - { - parents = read_parent_oids(&parents_count); - parents_fetched = true; - } - - /* Check if parent still exists */ - if (bsearch_oid(parent, parents, parents_count)) - /* get_pathman_relation_info() will refresh this entry */ - invalidate_pathman_relation_info(parent, NULL); - else - remove_pathman_relation_info(parent); - } - - /* Process all other vague cases */ - foreach (lc, delayed_invalidation_vague_rels) + /* Call pfree() if it's RANGE bounds */ + if (pbin->parttype == PT_RANGE) { - Oid vague_rel = lfirst_oid(lc); - - /* Skip if it's a TOAST table */ - if (IsToastNamespace(get_rel_namespace(vague_rel))) - continue; - - /* Fetch all partitioned tables */ - if (!parents_fetched) - { - parents = read_parent_oids(&parents_count); - parents_fetched = true; - } - - /* It might be a partitioned table or a partition */ - if (!try_invalidate_parent(vague_rel, parents, parents_count)) - { - PartParentSearch search; - Oid parent; - List *fresh_rels = delayed_invalidation_parent_rels; - - parent = get_parent_of_partition(vague_rel, &search); - - switch (search) - { - /* - * Two main cases: - * - It's *still* parent (in PATHMAN_CONFIG) - * - It *might have been* parent before (not in PATHMAN_CONFIG) - */ - case PPS_ENTRY_PART_PARENT: - case PPS_ENTRY_PARENT: - { - /* Skip if we've already refreshed this parent */ - if (!list_member_oid(fresh_rels, parent)) - try_invalidate_parent(parent, parents, parents_count); - } - break; - - /* How come we still don't know?? */ - case PPS_NOT_SURE: - elog(ERROR, "Unknown table status, this should never happen"); - break; - - default: - break; - } - } + FreeBound(&pbin->range_min, pbin->byval); + FreeBound(&pbin->range_max, pbin->byval); } - /* Finally, free invalidation jobs lists */ - free_invalidation_lists(); - - if (parents) - pfree(parents); + /* Finally remove this entry from cache */ + pathman_cache_search_relid(bounds_cache, + partition, + HASH_REMOVE, + NULL); } } +/* Return partition's constraint as expression tree */ +PartBoundInfo * +get_bounds_of_partition(Oid partition, const PartRelationInfo *prel) +{ + PartBoundInfo *pbin; -/* - * cache\forget\get PartParentInfo functions. - */ + /* + * We might end up building the constraint + * tree that we wouldn't want to keep. + */ + AssertTemporaryContext(); -/* Create "partition+parent" pair in local cache */ -void -cache_parent_of_partition(Oid partition, Oid parent) -{ - PartParentInfo *ppar; + /* PartRelationInfo must be provided */ + Assert(prel != NULL); - ppar = pathman_cache_search_relid(parent_cache, - partition, - HASH_ENTER, - NULL); + /* Should always be called in transaction */ + Assert(IsTransactionState()); - ppar->child_rel = partition; - ppar->parent_rel = parent; -} + /* Should we search in bounds cache? */ + pbin = pg_pathman_enable_bounds_cache ? + pathman_cache_search_relid(bounds_cache, + partition, + HASH_FIND, + NULL) : + NULL; /* don't even bother */ -/* Remove "partition+parent" pair from cache & return parent's Oid */ -Oid -forget_parent_of_partition(Oid partition, PartParentSearch *status) -{ - return get_parent_of_partition_internal(partition, status, HASH_REMOVE); -} + /* Build new entry */ + if (!pbin) + { + PartBoundInfo pbin_local; + Expr *con_expr; -/* Return partition parent's Oid */ -Oid -get_parent_of_partition(Oid partition, PartParentSearch *status) -{ - return get_parent_of_partition_internal(partition, status, HASH_FIND); -} + /* Initialize other fields */ + pbin_local.child_relid = partition; + pbin_local.byval = prel->ev_byval; -/* Check that expression is equal to expression of some partitioned table */ -bool -is_equal_to_partitioning_expression(Oid relid, char *expression, - Oid value_type) -{ - const PartRelationInfo *prel; - char *cexpr; - Oid expr_type; + /* Try to build constraint's expression tree (may emit ERROR) */ + con_expr = get_partition_constraint_expr(partition); - /* - * Cook and get a canonicalized expression, - * we don't need a result of the cooking - */ - cook_partitioning_expression(relid, expression, &expr_type); - cexpr = canonicalize_partitioning_expression(relid, expression); + /* Grab bounds/hash and fill in 'pbin_local' (may emit ERROR) */ + fill_pbin_with_bounds(&pbin_local, prel, con_expr); - prel = get_pathman_relation_info(relid); + /* We strive to delay the creation of cache's entry */ + pbin = pg_pathman_enable_bounds_cache ? + pathman_cache_search_relid(bounds_cache, + partition, + HASH_ENTER, + NULL) : + palloc(sizeof(PartBoundInfo)); - /* caller should have been check it already */ - Assert(prel != NULL); + /* Copy data from 'pbin_local' */ + memcpy(pbin, &pbin_local, sizeof(PartBoundInfo)); + } - return (getBaseType(expr_type) == value_type) && - (strcmp(cexpr, prel->expr_cstr) == 0); + return pbin; } /* Get lower bound of a partition */ Datum -get_lower_bound(Oid relid, Oid value_type) +get_lower_bound(Oid partition_relid, Oid value_type) { Oid parent_relid; Datum result; const PartRelationInfo *prel; - PartBoundInfo *pbin; - PartParentSearch parent_search; + const PartBoundInfo *pbin; - parent_relid = get_parent_of_partition(relid, &parent_search); - if (parent_search != PPS_ENTRY_PART_PARENT) + parent_relid = get_parent_of_partition(partition_relid); + if (!OidIsValid(parent_relid)) elog(ERROR, "relation \"%s\" is not a partition", - get_rel_name_or_relid(relid)); + get_rel_name_or_relid(partition_relid)); prel = get_pathman_relation_info(parent_relid); - Assert(prel && prel->parttype == PT_RANGE); - pbin = get_bounds_of_partition(relid, prel); - Assert(prel != NULL); + pbin = get_bounds_of_partition(partition_relid, prel); if (IsInfinite(&pbin->range_min)) return PointerGetDatum(NULL); @@ -1216,23 +808,20 @@ get_lower_bound(Oid relid, Oid value_type) /* Get upper bound of a partition */ Datum -get_upper_bound(Oid relid, Oid value_type) +get_upper_bound(Oid partition_relid, Oid value_type) { Oid parent_relid; Datum result; const PartRelationInfo *prel; - PartBoundInfo *pbin; - PartParentSearch parent_search; + const PartBoundInfo *pbin; - parent_relid = get_parent_of_partition(relid, &parent_search); - if (parent_search != PPS_ENTRY_PART_PARENT) + parent_relid = get_parent_of_partition(partition_relid); + if (!OidIsValid(parent_relid)) elog(ERROR, "relation \"%s\" is not a partition", - get_rel_name_or_relid(relid)); + get_rel_name_or_relid(partition_relid)); prel = get_pathman_relation_info(parent_relid); - Assert(prel && prel->parttype == PT_RANGE); - pbin = get_bounds_of_partition(relid, prel); - Assert(prel != NULL); + pbin = get_bounds_of_partition(partition_relid, prel); if (IsInfinite(&pbin->range_max)) return PointerGetDatum(NULL); @@ -1245,279 +834,73 @@ get_upper_bound(Oid relid, Oid value_type) } /* - * Get [and remove] "partition+parent" pair from cache, - * also check syscache if 'status' is provided. + * Get constraint expression tree of a partition. * - * "status == NULL" implies that we don't care about - * neither syscache nor PATHMAN_CONFIG table contents. + * build_check_constraint_name_internal() is used to build conname. */ -static Oid -get_parent_of_partition_internal(Oid partition, - PartParentSearch *status, - HASHACTION action) +static Expr * +get_partition_constraint_expr(Oid partition) { - Oid parent; - PartParentInfo *ppar = pathman_cache_search_relid(parent_cache, - partition, - HASH_FIND, - NULL); + Oid conid; /* constraint Oid */ + char *conname; /* constraint name */ + HeapTuple con_tuple; + Datum conbin_datum; + bool conbin_isnull; + Expr *expr; /* expression tree for constraint */ - if (ppar) - { - if (status) *status = PPS_ENTRY_PART_PARENT; - parent = ppar->parent_rel; + conname = build_check_constraint_name_relid_internal(partition); + conid = get_relation_constraint_oid(partition, conname, true); - /* Remove entry if necessary */ - if (action == HASH_REMOVE) - pathman_cache_search_relid(parent_cache, partition, - HASH_REMOVE, NULL); + if (!OidIsValid(conid)) + { + DisablePathman(); /* disable pg_pathman since config is broken */ + ereport(ERROR, + (errmsg("constraint \"%s\" of partition \"%s\" does not exist", + conname, get_rel_name_or_relid(partition)), + errhint(INIT_ERROR_HINT))); } - /* Try fetching parent from syscache if 'status' is provided */ - else if (status) - parent = try_catalog_parent_search(partition, status); - else - parent = InvalidOid; /* we don't have to set status */ - - return parent; -} -/* Try to find parent of a partition using catalog & PATHMAN_CONFIG */ -static Oid -try_catalog_parent_search(Oid partition, PartParentSearch *status) -{ - if (!IsTransactionState()) + con_tuple = SearchSysCache1(CONSTROID, ObjectIdGetDatum(conid)); + conbin_datum = SysCacheGetAttr(CONSTROID, con_tuple, + Anum_pg_constraint_conbin, + &conbin_isnull); + if (conbin_isnull) { - /* We could not perform search */ - if (status) *status = PPS_NOT_SURE; + DisablePathman(); /* disable pg_pathman since config is broken */ + ereport(WARNING, + (errmsg("constraint \"%s\" of partition \"%s\" has NULL conbin", + conname, get_rel_name_or_relid(partition)), + errhint(INIT_ERROR_HINT))); + pfree(conname); - return InvalidOid; + return NULL; /* could not parse */ } - else - { - Relation relation; - ScanKeyData key[1]; - SysScanDesc scan; - HeapTuple inheritsTuple; - Oid parent = InvalidOid; - - /* At first we assume parent does not exist (not a partition) */ - if (status) *status = PPS_ENTRY_NOT_FOUND; + pfree(conname); - relation = heap_open(InheritsRelationId, AccessShareLock); + /* Finally we get a constraint expression tree */ + expr = (Expr *) stringToNode(TextDatumGetCString(conbin_datum)); - ScanKeyInit(&key[0], - Anum_pg_inherits_inhrelid, - BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(partition)); + /* Don't foreget to release syscache tuple */ + ReleaseSysCache(con_tuple); - scan = systable_beginscan(relation, InheritsRelidSeqnoIndexId, - true, NULL, 1, key); + return expr; +} - while ((inheritsTuple = systable_getnext(scan)) != NULL) - { - parent = ((Form_pg_inherits) GETSTRUCT(inheritsTuple))->inhparent; +/* Fill PartBoundInfo with bounds/hash */ +static void +fill_pbin_with_bounds(PartBoundInfo *pbin, + const PartRelationInfo *prel, + const Expr *constraint_expr) +{ + AssertTemporaryContext(); - /* - * NB: don't forget that 'inh' flag does not immediately - * mean that this is a pg_pathman's partition. It might - * be just a casual inheriting table. - */ - if (status) *status = PPS_ENTRY_PARENT; + /* Copy partitioning type to 'pbin' */ + pbin->parttype = prel->parttype; - /* Check that PATHMAN_CONFIG contains this table */ - if (pathman_config_contains_relation(parent, NULL, NULL, NULL, NULL)) - { - /* We've found the entry, update status */ - if (status) *status = PPS_ENTRY_PART_PARENT; - } - - break; /* there should be no more rows */ - } - - systable_endscan(scan); - heap_close(relation, AccessShareLock); - - return parent; - } -} - -/* Try to invalidate cache entry for relation 'parent' */ -static bool -try_invalidate_parent(Oid relid, Oid *parents, int parents_count) -{ - /* Check if this is a partitioned table */ - if (bsearch_oid(relid, parents, parents_count)) - { - /* get_pathman_relation_info() will refresh this entry */ - invalidate_pathman_relation_info(relid, NULL); - - /* Success */ - return true; - } - - /* Clear remaining cache entry */ - remove_pathman_relation_info(relid); - - /* Not a partitioned relation */ - return false; -} - - -/* - * forget\get constraint functions. - */ - -/* Remove partition's constraint from cache */ -void -forget_bounds_of_partition(Oid partition) -{ - PartBoundInfo *pbin; - - /* Should we search in bounds cache? */ - pbin = pg_pathman_enable_bounds_cache ? - pathman_cache_search_relid(bound_cache, - partition, - HASH_FIND, - NULL) : - NULL; /* don't even bother */ - - /* Free this entry */ - if (pbin) - { - /* Call pfree() if it's RANGE bounds */ - if (pbin->parttype == PT_RANGE) - { - FreeBound(&pbin->range_min, pbin->byval); - FreeBound(&pbin->range_max, pbin->byval); - } - - /* Finally remove this entry from cache */ - pathman_cache_search_relid(bound_cache, - partition, - HASH_REMOVE, - NULL); - } -} - -/* Return partition's constraint as expression tree */ -PartBoundInfo * -get_bounds_of_partition(Oid partition, const PartRelationInfo *prel) -{ - PartBoundInfo *pbin; - - /* - * We might end up building the constraint - * tree that we wouldn't want to keep. - */ - AssertTemporaryContext(); - - /* Should we search in bounds cache? */ - pbin = pg_pathman_enable_bounds_cache ? - pathman_cache_search_relid(bound_cache, - partition, - HASH_FIND, - NULL) : - NULL; /* don't even bother */ - - /* Build new entry */ - if (!pbin) - { - PartBoundInfo pbin_local; - Expr *con_expr; - - /* Initialize other fields */ - pbin_local.child_rel = partition; - pbin_local.byval = prel->ev_byval; - - /* Try to build constraint's expression tree (may emit ERROR) */ - con_expr = get_partition_constraint_expr(partition); - - /* Grab bounds/hash and fill in 'pbin_local' (may emit ERROR) */ - fill_pbin_with_bounds(&pbin_local, prel, con_expr); - - /* We strive to delay the creation of cache's entry */ - pbin = pg_pathman_enable_bounds_cache ? - pathman_cache_search_relid(bound_cache, - partition, - HASH_ENTER, - NULL) : - palloc(sizeof(PartBoundInfo)); - - /* Copy data from 'pbin_local' */ - memcpy(pbin, &pbin_local, sizeof(PartBoundInfo)); - } - - return pbin; -} - -/* - * Get constraint expression tree of a partition. - * - * build_check_constraint_name_internal() is used to build conname. - */ -static Expr * -get_partition_constraint_expr(Oid partition) -{ - Oid conid; /* constraint Oid */ - char *conname; /* constraint name */ - HeapTuple con_tuple; - Datum conbin_datum; - bool conbin_isnull; - Expr *expr; /* expression tree for constraint */ - - conname = build_check_constraint_name_relid_internal(partition); - conid = get_relation_constraint_oid(partition, conname, true); - - if (!OidIsValid(conid)) - { - DisablePathman(); /* disable pg_pathman since config is broken */ - ereport(ERROR, - (errmsg("constraint \"%s\" of partition \"%s\" does not exist", - conname, get_rel_name_or_relid(partition)), - errhint(INIT_ERROR_HINT))); - } - - con_tuple = SearchSysCache1(CONSTROID, ObjectIdGetDatum(conid)); - conbin_datum = SysCacheGetAttr(CONSTROID, con_tuple, - Anum_pg_constraint_conbin, - &conbin_isnull); - if (conbin_isnull) - { - DisablePathman(); /* disable pg_pathman since config is broken */ - ereport(WARNING, - (errmsg("constraint \"%s\" of partition \"%s\" has NULL conbin", - conname, get_rel_name_or_relid(partition)), - errhint(INIT_ERROR_HINT))); - pfree(conname); - - return NULL; /* could not parse */ - } - pfree(conname); - - /* Finally we get a constraint expression tree */ - expr = (Expr *) stringToNode(TextDatumGetCString(conbin_datum)); - - /* Don't foreget to release syscache tuple */ - ReleaseSysCache(con_tuple); - - return expr; -} - -/* Fill PartBoundInfo with bounds/hash */ -static void -fill_pbin_with_bounds(PartBoundInfo *pbin, - const PartRelationInfo *prel, - const Expr *constraint_expr) -{ - AssertTemporaryContext(); - - /* Copy partitioning type to 'pbin' */ - pbin->parttype = prel->parttype; - - /* Perform a partitioning_type-dependent task */ - switch (prel->parttype) - { - case PT_HASH: + /* Perform a partitioning_type-dependent task */ + switch (prel->parttype) + { + case PT_HASH: { if (!validate_hash_constraint(constraint_expr, prel, &pbin->part_idx)) @@ -1525,7 +908,7 @@ fill_pbin_with_bounds(PartBoundInfo *pbin, DisablePathman(); /* disable pg_pathman since config is broken */ ereport(ERROR, (errmsg("wrong constraint format for HASH partition \"%s\"", - get_rel_name_or_relid(pbin->child_rel)), + get_rel_name_or_relid(pbin->child_relid)), errhint(INIT_ERROR_HINT))); } } @@ -1543,7 +926,7 @@ fill_pbin_with_bounds(PartBoundInfo *pbin, MemoryContext old_mcxt; /* Switch to the persistent memory context */ - old_mcxt = MemoryContextSwitchTo(PathmanBoundCacheContext); + old_mcxt = MemoryContextSwitchTo(PathmanBoundsCacheContext); pbin->range_min = lower_null ? MakeBoundInf(MINUS_INFINITY) : @@ -1565,7 +948,7 @@ fill_pbin_with_bounds(PartBoundInfo *pbin, DisablePathman(); /* disable pg_pathman since config is broken */ ereport(ERROR, (errmsg("wrong constraint format for RANGE partition \"%s\"", - get_rel_name_or_relid(pbin->child_rel)), + get_rel_name_or_relid(pbin->child_relid)), errhint(INIT_ERROR_HINT))); } } @@ -1582,113 +965,442 @@ fill_pbin_with_bounds(PartBoundInfo *pbin, /* - * Common PartRelationInfo checks. Emit ERROR if anything is wrong. + * Partition parents cache routines. */ + +/* Add parent of partition to cache */ void -shout_if_prel_is_invalid(const Oid parent_oid, - const PartRelationInfo *prel, - const PartType expected_part_type) +cache_parent_of_partition(Oid partition, Oid parent) { - if (!prel) - elog(ERROR, "relation \"%s\" has no partitions", - get_rel_name_or_relid(parent_oid)); + PartParentInfo *ppar; - if (!PrelIsValid(prel)) - elog(ERROR, "pg_pathman's cache contains invalid entry " - "for relation \"%s\" [%u]", - get_rel_name_or_relid(parent_oid), - MyProcPid); + /* Why would we want to call it not in transaction? */ + Assert(IsTransactionState()); - /* Check partitioning type unless it's "ANY" */ - if (expected_part_type != PT_ANY && - expected_part_type != prel->parttype) + /* Create a new cache entry */ + ppar = pathman_cache_search_relid(parents_cache, + partition, + HASH_ENTER, + NULL); + + /* Fill entry with parent */ + ppar->parent_relid = parent; +} + +/* Remove parent of partition from cache */ +void +forget_parent_of_partition(Oid partition) +{ + pathman_cache_search_relid(parents_cache, + partition, + HASH_REMOVE, + NULL); +} + +/* Return parent of partition */ +Oid +get_parent_of_partition(Oid partition) +{ + PartParentInfo *ppar; + + /* Should always be called in transaction */ + Assert(IsTransactionState()); + + /* We don't cache catalog objects */ + if (partition < FirstNormalObjectId) + return InvalidOid; + + ppar = pathman_cache_search_relid(parents_cache, + partition, + HASH_FIND, + NULL); + + /* Nice, we have a cached entry */ + if (ppar) { - char *expected_str; + return ppar->child_relid; + } + /* Bad luck, let's search in catalog */ + else + { + Relation relation; + ScanKeyData key[1]; + SysScanDesc scan; + HeapTuple htup; + Oid parent = InvalidOid; - switch (expected_part_type) + relation = heap_open(InheritsRelationId, AccessShareLock); + + ScanKeyInit(&key[0], + Anum_pg_inherits_inhrelid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(partition)); + + scan = systable_beginscan(relation, InheritsRelidSeqnoIndexId, + true, NULL, 1, key); + + while ((htup = systable_getnext(scan)) != NULL) { - case PT_HASH: - expected_str = "HASH"; - break; + /* Extract parent from catalog tuple */ + Oid inhparent = ((Form_pg_inherits) GETSTRUCT(htup))->inhparent; - case PT_RANGE: - expected_str = "RANGE"; - break; + /* Check that PATHMAN_CONFIG contains this table */ + if (pathman_config_contains_relation(inhparent, NULL, NULL, NULL, NULL)) + { + /* We should return this parent */ + parent = inhparent; - default: - WrongPartType(expected_part_type); - expected_str = NULL; /* keep compiler happy */ + /* Now, let's cache this parent */ + cache_parent_of_partition(partition, parent); + } + + break; /* there should be no more rows */ } - elog(ERROR, "relation \"%s\" is not partitioned by %s", - get_rel_name_or_relid(parent_oid), - expected_str); + systable_endscan(scan); + heap_close(relation, AccessShareLock); + + return parent; } } + /* - * Remap partitioning expression columns for tuple source relation. - * This is a simplified version of functions that return TupleConversionMap. - * It should be faster if expression uses a few fields of relation. + * Partitioning expression routines. */ -AttrNumber * -PrelExpressionAttributesMap(const PartRelationInfo *prel, - TupleDesc source_tupdesc, - int *map_length) -{ - Oid parent_relid = PrelParentRelid(prel); - int source_natts = source_tupdesc->natts, - expr_natts = 0; - AttrNumber *result, - i; - bool is_trivial = true; - /* Get largest attribute number used in expression */ - i = -1; - while ((i = bms_next_member(prel->expr_atts, i)) >= 0) - expr_natts = i; +/* Wraps expression in SELECT query and returns parse tree */ +Node * +parse_partitioning_expression(const Oid relid, + const char *expr_cstr, + char **query_string_out, /* ret value #1 */ + Node **parsetree_out) /* ret value #2 */ +{ + SelectStmt *select_stmt; + List *parsetree_list; + MemoryContext old_mcxt; - /* Allocate array for map */ - result = (AttrNumber *) palloc0(expr_natts * sizeof(AttrNumber)); + const char *sql = "SELECT (%s) FROM ONLY %s.%s"; + char *relname = get_rel_name(relid), + *nspname = get_namespace_name(get_rel_namespace(relid)); + char *query_string = psprintf(sql, expr_cstr, + quote_identifier(nspname), + quote_identifier(relname)); - /* Find a match for each attribute */ - i = -1; - while ((i = bms_next_member(prel->expr_atts, i)) >= 0) + old_mcxt = CurrentMemoryContext; + + PG_TRY(); { - AttrNumber attnum = i + FirstLowInvalidHeapAttributeNumber; - char *attname = get_attname(parent_relid, attnum); - int j; + parsetree_list = raw_parser(query_string); + } + PG_CATCH(); + { + ErrorData *error; - Assert(attnum <= expr_natts); + /* Switch to the original context & copy edata */ + MemoryContextSwitchTo(old_mcxt); + error = CopyErrorData(); + FlushErrorState(); - for (j = 0; j < source_natts; j++) + /* Adjust error message */ + error->detail = error->message; + error->message = psprintf(PARSE_PART_EXPR_ERROR, expr_cstr); + error->sqlerrcode = ERRCODE_INVALID_PARAMETER_VALUE; + error->cursorpos = 0; + error->internalpos = 0; + + ReThrowError(error); + } + PG_END_TRY(); + + if (list_length(parsetree_list) != 1) + elog(ERROR, "expression \"%s\" produced more than one query", expr_cstr); + +#if PG_VERSION_NUM >= 100000 + select_stmt = (SelectStmt *) ((RawStmt *) linitial(parsetree_list))->stmt; +#else + select_stmt = (SelectStmt *) linitial(parsetree_list); +#endif + + if (query_string_out) + *query_string_out = query_string; + + if (parsetree_out) + *parsetree_out = (Node *) linitial(parsetree_list); + + return ((ResTarget *) linitial(select_stmt->targetList))->val; +} + +/* Parse partitioning expression and return its type and nodeToString() as TEXT */ +Datum +cook_partitioning_expression(const Oid relid, + const char *expr_cstr, + Oid *expr_type_out) /* ret value #1 */ +{ + Node *parse_tree; + List *query_tree_list; + + char *query_string, + *expr_serialized = ""; /* keep compiler happy */ + + Datum expr_datum; + + MemoryContext parse_mcxt, + old_mcxt; + + AssertTemporaryContext(); + + /* + * We use separate memory context here, just to make sure we won't + * leave anything behind after parsing, rewriting and planning. + */ + parse_mcxt = AllocSetContextCreate(CurrentMemoryContext, + CppAsString(cook_partitioning_expression), + ALLOCSET_DEFAULT_SIZES); + + /* Switch to mcxt for cooking :) */ + old_mcxt = MemoryContextSwitchTo(parse_mcxt); + + /* First we have to build a raw AST */ + (void) parse_partitioning_expression(relid, expr_cstr, + &query_string, &parse_tree); + + /* We don't need pg_pathman's magic here */ + pathman_hooks_enabled = false; + + PG_TRY(); + { + Query *query; + Node *expr; + int expr_attr; + Relids expr_varnos; + Bitmapset *expr_varattnos = NULL; + + /* This will fail with ERROR in case of wrong expression */ + query_tree_list = pg_analyze_and_rewrite_compat(parse_tree, query_string, + NULL, 0, NULL); + + /* Sanity check #1 */ + if (list_length(query_tree_list) != 1) + elog(ERROR, "partitioning expression produced more than 1 query"); + + query = (Query *) linitial(query_tree_list); + + /* Sanity check #2 */ + if (list_length(query->targetList) != 1) + elog(ERROR, "there should be exactly 1 partitioning expression"); + + /* Sanity check #3 */ + if (query_tree_walker(query, query_contains_subqueries, NULL, 0)) + elog(ERROR, "subqueries are not allowed in partitioning expression"); + + expr = (Node *) ((TargetEntry *) linitial(query->targetList))->expr; + expr = eval_const_expressions(NULL, expr); + + /* Sanity check #4 */ + if (contain_mutable_functions(expr)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("functions in partitioning expression" + " must be marked IMMUTABLE"))); + + /* Sanity check #5 */ + expr_varnos = pull_varnos(expr); + if (bms_num_members(expr_varnos) != 1 || + relid != ((RangeTblEntry *) linitial(query->rtable))->relid) { - Form_pg_attribute att = TupleDescAttr(source_tupdesc, j); + elog(ERROR, "partitioning expression should reference table \"%s\"", + get_rel_name(relid)); + } - if (att->attisdropped) - continue; /* attrMap[attnum - 1] is already 0 */ + /* Sanity check #6 */ + pull_varattnos(expr, bms_singleton_member(expr_varnos), &expr_varattnos); + expr_attr = -1; + while ((expr_attr = bms_next_member(expr_varattnos, expr_attr)) >= 0) + { + AttrNumber attnum = expr_attr + FirstLowInvalidHeapAttributeNumber; + HeapTuple htup; - if (strcmp(NameStr(att->attname), attname) == 0) + /* Check that there's no system attributes in expression */ + if (attnum < InvalidAttrNumber) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("system attributes are not supported"))); + + htup = SearchSysCache2(ATTNUM, + ObjectIdGetDatum(relid), + Int16GetDatum(attnum)); + if (HeapTupleIsValid(htup)) { - result[attnum - 1] = (AttrNumber) (j + 1); - break; + bool nullable; + + /* Fetch 'nullable' and free syscache tuple */ + nullable = !((Form_pg_attribute) GETSTRUCT(htup))->attnotnull; + ReleaseSysCache(htup); + + if (nullable) + ereport(ERROR, (errcode(ERRCODE_NOT_NULL_VIOLATION), + errmsg("column \"%s\" should be marked NOT NULL", + get_attname(relid, attnum)))); } } - if (result[attnum - 1] == 0) - elog(ERROR, "cannot find column \"%s\" in child relation", attname); + /* Free sets */ + bms_free(expr_varnos); + bms_free(expr_varattnos); - if (result[attnum - 1] != attnum) - is_trivial = false; - } + Assert(expr); + expr_serialized = nodeToString(expr); - /* Check if map is trivial */ - if (is_trivial) + /* Set 'expr_type_out' if needed */ + if (expr_type_out) + *expr_type_out = exprType(expr); + } + PG_CATCH(); { - pfree(result); - return NULL; + ErrorData *error; + + /* Don't forget to enable pg_pathman's hooks */ + pathman_hooks_enabled = true; + + /* Switch to the original context & copy edata */ + MemoryContextSwitchTo(old_mcxt); + error = CopyErrorData(); + FlushErrorState(); + + /* Adjust error message */ + error->detail = error->message; + error->message = psprintf(COOK_PART_EXPR_ERROR, expr_cstr); + error->sqlerrcode = ERRCODE_INVALID_PARAMETER_VALUE; + error->cursorpos = 0; + error->internalpos = 0; + + ReThrowError(error); } + PG_END_TRY(); - *map_length = expr_natts; - return result; + /* Don't forget to enable pg_pathman's hooks */ + pathman_hooks_enabled = true; + + /* Switch to previous mcxt */ + MemoryContextSwitchTo(old_mcxt); + + /* Get Datum of serialized expression (right mcxt) */ + expr_datum = CStringGetTextDatum(expr_serialized); + + /* Free memory */ + MemoryContextDelete(parse_mcxt); + + return expr_datum; +} + +/* Canonicalize user's expression (trim whitespaces etc) */ +char * +canonicalize_partitioning_expression(const Oid relid, + const char *expr_cstr) +{ + Node *parse_tree; + Expr *expr; + char *query_string; + Query *query; + + AssertTemporaryContext(); + + /* First we have to build a raw AST */ + (void) parse_partitioning_expression(relid, expr_cstr, + &query_string, &parse_tree); + + query = parse_analyze_compat(parse_tree, query_string, NULL, 0, NULL); + expr = ((TargetEntry *) linitial(query->targetList))->expr; + + /* We don't care about memory efficiency here */ + return deparse_expression((Node *) expr, + deparse_context_for(get_rel_name(relid), relid), + false, false); +} + +/* Check that expression is equal to expression of some partitioned table */ +bool +is_equal_to_partitioning_expression(const Oid relid, + const char *expression, + const Oid value_type) +{ + const PartRelationInfo *prel; + char *cexpr; + Oid expr_type; + + /* + * Cook and get a canonicalized expression, + * we don't need a result of the cooking + */ + cook_partitioning_expression(relid, expression, &expr_type); + cexpr = canonicalize_partitioning_expression(relid, expression); + + prel = get_pathman_relation_info(relid); + Assert(prel); + + return (getBaseType(expr_type) == value_type) && + (strcmp(cexpr, prel->expr_cstr) == 0); +} + +/* Check if query has subqueries */ +static bool +query_contains_subqueries(Node *node, void *context) +{ + if (node == NULL) + return false; + + /* We've met a subquery */ + if (IsA(node, Query)) + return true; + + return expression_tree_walker(node, query_contains_subqueries, NULL); +} + + +/* + * Functions for delayed invalidation. + */ + +/* Add new delayed pathman shutdown job (DROP EXTENSION) */ +void +delay_pathman_shutdown(void) +{ + delayed_shutdown = true; +} + +/* Finish all pending invalidation jobs if possible */ +void +finish_delayed_invalidation(void) +{ + /* Check that current state is transactional */ + if (IsTransactionState()) + { + AcceptInvalidationMessages(); + + /* Handle the probable 'DROP EXTENSION' case */ + if (delayed_shutdown) + { + Oid cur_pathman_config_relid; + + /* Unset 'shutdown' flag */ + delayed_shutdown = false; + + /* Get current PATHMAN_CONFIG relid */ + cur_pathman_config_relid = get_relname_relid(PATHMAN_CONFIG, + get_pathman_schema()); + + /* Check that PATHMAN_CONFIG table has indeed been dropped */ + if (cur_pathman_config_relid == InvalidOid || + cur_pathman_config_relid != get_pathman_config_relid(true)) + { + /* Ok, let's unload pg_pathman's config */ + unload_config(); + + /* No need to continue, exit */ + return; + } + } + + + } } diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 2c8a7249..b08b53e1 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -146,7 +146,6 @@ is_pathman_related_table_rename(Node *parsetree, RenameStmt *rename_stmt = (RenameStmt *) parsetree; Oid relation_oid, parent_relid; - PartParentSearch parent_search; const PartRelationInfo *prel; Assert(IsPathmanReady()); @@ -177,8 +176,7 @@ is_pathman_related_table_rename(Node *parsetree, } /* Assume it's a partition, fetch its parent */ - parent_relid = get_parent_of_partition(relation_oid, &parent_search); - if (parent_search != PPS_ENTRY_PART_PARENT) + if (!OidIsValid(parent_relid = get_parent_of_partition(relation_oid))) return false; /* Is parent partitioned? */ From 01d9ad605658713877f66b9bc3830c2094d29cdb Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 10 Nov 2017 17:15:48 +0300 Subject: [PATCH 190/528] move refcount etc into PartRelationInfo --- src/hooks.c | 4 +- src/include/relation_info.h | 33 ++--- src/init.c | 2 +- src/relation_info.c | 275 ++++++++++++++++++------------------ 4 files changed, 153 insertions(+), 161 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 2a968683..d02ec265 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -822,7 +822,7 @@ pathman_relcache_hook(Datum arg, Oid relid) /* Invalidation event for whole cache */ if (relid == InvalidOid) { - invalidate_pathman_status_info_cache(); + invalidate_pathman_relation_info_cache(); } /* Invalidation event for PATHMAN_CONFIG table (probably DROP) */ @@ -841,7 +841,7 @@ pathman_relcache_hook(Datum arg, Oid relid) forget_parent_of_partition(relid); /* Invalidate PartStatusInfo entry if needed */ - invalidate_pathman_status_info(relid); + invalidate_pathman_relation_info(relid); } } diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 70f2eedc..1a07ba00 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -133,16 +133,14 @@ typedef struct */ typedef struct PartStatusInfo { - Oid relid; /* key */ - int32 refcount; /* reference counter */ - bool is_valid; /* is this entry fresh? */ + Oid relid; /* key */ struct PartRelationInfo *prel; } PartStatusInfo; /* * PartParentInfo * Cached parent of the specified partition. - * Allows us to quickly search for PartRelationInfo. + * Allows us to quickly search for parent PartRelationInfo. */ typedef struct PartParentInfo { @@ -177,7 +175,9 @@ typedef struct PartBoundInfo */ typedef struct PartRelationInfo { - PartStatusInfo *psin; /* entry holding this prel */ + Oid relid; /* key */ + int32 refcount; /* reference counter */ + bool fresh; /* is this entry fresh? */ bool enable_parent; /* should plan include parent? */ @@ -214,7 +214,7 @@ typedef struct PartRelationInfo * PartRelationInfo field access macros & functions. */ -#define PrelParentRelid(prel) ( (prel)->psin->relid ) +#define PrelParentRelid(prel) ( (prel)->relid ) #define PrelGetChildrenArray(prel) ( (prel)->children ) @@ -222,6 +222,10 @@ typedef struct PartRelationInfo #define PrelChildrenCount(prel) ( (prel)->children_count ) +#define PrelReferenceCount(prel) ( (prel)->refcount ) + +#define PrelIsFresh(prel) ( (prel)->fresh ) + static inline uint32 PrelLastChild(const PartRelationInfo *prel) { @@ -265,14 +269,6 @@ AttrNumber *PrelExpressionAttributesMap(const PartRelationInfo *prel, TupleDesc source_tupdesc, int *map_length); -/* - * PartStatusInfo field access macros & functions. - */ - -#define PsinIsValid(psin) ( (psin)->is_valid ) - -#define PsinReferenceCount(psin) ( (psin)->refcount ) - /* PartType wrappers */ static inline void @@ -312,6 +308,9 @@ PartTypeToCString(PartType parttype) /* Dispatch cache */ void refresh_pathman_relation_info(Oid relid); +void invalidate_pathman_relation_info(Oid relid); +void invalidate_pathman_relation_info_cache(void); +void close_pathman_relation_info(PartRelationInfo *prel); const PartRelationInfo *get_pathman_relation_info(Oid relid); const PartRelationInfo *get_pathman_relation_info_after_lock(Oid relid, bool unlock_if_not_found, @@ -321,12 +320,6 @@ void shout_if_prel_is_invalid(const Oid parent_oid, const PartRelationInfo *prel, const PartType expected_part_type); -/* Status cache */ -PartStatusInfo *open_pathman_status_info(Oid relid); -void close_pathman_status_info(PartStatusInfo *psin); -void invalidate_pathman_status_info(Oid relid); -void invalidate_pathman_status_info_cache(void); - /* Bounds cache */ void forget_bounds_of_partition(Oid partition); PartBoundInfo *get_bounds_of_partition(Oid partition, const PartRelationInfo *prel); diff --git a/src/init.c b/src/init.c index 545eb670..58479939 100644 --- a/src/init.c +++ b/src/init.c @@ -692,7 +692,7 @@ pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, UnregisterSnapshot(snapshot); heap_close(rel, AccessShareLock); - elog(DEBUG2, "PATHMAN_CONFIG table %s relation %u", + elog(DEBUG2, "PATHMAN_CONFIG %s relation %u", (contains_rel ? "contains" : "doesn't contain"), relid); return contains_rel; diff --git a/src/relation_info.c b/src/relation_info.c index 77a81fc0..9537f9a9 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -83,6 +83,7 @@ static bool delayed_shutdown = false; /* pathman was dropped */ bsearch((const void *) &(key), (array), (array_size), sizeof(Oid), oid_cmp) +static void invalidate_pathman_status_info(PartStatusInfo *psin); static PartRelationInfo *build_pathman_relation_info(Oid relid, Datum *values); static void free_pathman_relation_info(PartRelationInfo *prel); @@ -128,20 +129,147 @@ refresh_pathman_relation_info(Oid relid) } +/* TODO: comment */ +void +invalidate_pathman_relation_info(Oid relid) +{ + PartStatusInfo *psin; + + psin = pathman_cache_search_relid(status_cache, + relid, HASH_FIND, + NULL); + + if (psin) + { +#ifdef USE_RELINFO_LOGGING + elog(DEBUG2, "invalidation message for relation %u [%u]", + relid, MyProcPid); +#endif + + invalidate_pathman_status_info(psin); + } +} + +/* TODO: comment */ +void +invalidate_pathman_relation_info_cache(void) +{ + HASH_SEQ_STATUS status; + PartStatusInfo *psin; + + while ((psin = (PartStatusInfo *) hash_seq_search(&status)) != NULL) + { +#ifdef USE_RELINFO_LOGGING + elog(DEBUG2, "invalidation message for relation %u [%u]", + psin->relid, MyProcPid); +#endif + + invalidate_pathman_status_info(psin); + } +} + +/* TODO: comment */ +static void +invalidate_pathman_status_info(PartStatusInfo *psin) +{ + /* Mark entry as invalid */ + if (psin->prel && PrelReferenceCount(psin->prel) > 0) + { + PrelIsFresh(psin->prel) = false; + } + else + { + (void) pathman_cache_search_relid(status_cache, + psin->relid, + HASH_REMOVE, + NULL); + } +} + +/* TODO: comment */ +void +close_pathman_relation_info(PartRelationInfo *prel) +{ + +} + /* Get PartRelationInfo from local cache */ const PartRelationInfo * get_pathman_relation_info(Oid relid) { - PartStatusInfo *psin = open_pathman_status_info(relid); - PartRelationInfo *prel = psin ? psin->prel : NULL; + PartStatusInfo *psin; + bool refresh; + + /* Should always be called in transaction */ + Assert(IsTransactionState()); + + /* We don't create entries for catalog */ + if (relid < FirstNormalObjectId) + return NULL; + + /* Create a new entry for this table if needed */ + psin = pathman_cache_search_relid(status_cache, + relid, HASH_FIND, + NULL); + + /* Should we build a new PartRelationInfo? */ + refresh = psin ? + (psin->prel && + !PrelIsFresh(psin->prel) && + PrelReferenceCount(psin->prel) == 0) : + true; + + if (refresh) + { + PartRelationInfo *prel = NULL; + ItemPointerData iptr; + Datum values[Natts_pathman_config]; + bool isnull[Natts_pathman_config]; + + /* Check if PATHMAN_CONFIG table contains this relation */ + if (pathman_config_contains_relation(relid, values, isnull, NULL, &iptr)) + { + bool upd_expr = isnull[Anum_pathman_config_cooked_expr - 1]; + + /* Update pending partitioning expression */ + if (upd_expr) + pathman_config_refresh_parsed_expression(relid, values, + isnull, &iptr); + + /* Build a partitioned table cache entry (might emit ERROR) */ + prel = build_pathman_relation_info(relid, values); + } + + /* Create a new entry for this table if needed */ + if (!psin) + { + bool found; + + psin = pathman_cache_search_relid(status_cache, + relid, HASH_ENTER, + &found); + Assert(!found); + } + /* Otherwise, free old entry */ + else if (psin->prel) + { + free_pathman_relation_info(psin->prel); + } + + /* Cache fresh entry */ + psin->prel = prel; + } #ifdef USE_RELINFO_LOGGING elog(DEBUG2, "fetching %s record for parent %u [%u]", - (prel ? "live" : "NULL"), relid, MyProcPid); + (psin->prel ? "live" : "NULL"), relid, MyProcPid); #endif - return prel; + if (psin->prel) + PrelReferenceCount(psin->prel) += 1; + + return psin->prel; } /* Acquire lock on a table and try to get PartRelationInfo */ @@ -156,9 +284,6 @@ get_pathman_relation_info_after_lock(Oid relid, /* Restrict concurrent partition creation (it's dangerous) */ acquire_result = xact_lock_rel(relid, ShareUpdateExclusiveLock, false); - /* Invalidate cache entry (see AcceptInvalidationMessages()) */ - refresh_pathman_relation_info(relid); - /* Set 'lock_result' if asked to */ if (lock_result) *lock_result = acquire_result; @@ -170,7 +295,7 @@ get_pathman_relation_info_after_lock(Oid relid, return prel; } -/* Build a new PartRelationInfo for relation (might emit ERROR) */ +/* Build a new PartRelationInfo for partitioned relation */ static PartRelationInfo * build_pathman_relation_info(Oid relid, Datum *values) { @@ -198,7 +323,10 @@ build_pathman_relation_info(Oid relid, Datum *values) /* Create a new PartRelationInfo */ prel = MemoryContextAlloc(prel_mcxt, sizeof(PartRelationInfo)); - prel->mcxt = prel_mcxt; + prel->relid = relid; + prel->refcount = 0; + prel->fresh = true; + prel->mcxt = prel_mcxt; /* Memory leak protection */ PG_TRY(); @@ -557,135 +685,6 @@ PrelExpressionAttributesMap(const PartRelationInfo *prel, } -/* - * Partitioning status cache routines. - */ - -PartStatusInfo * -open_pathman_status_info(Oid relid) -{ - PartStatusInfo *psin; - bool found; - bool refresh; - - /* Should always be called in transaction */ - Assert(IsTransactionState()); - - /* We don't cache catalog objects */ - if (relid < FirstNormalObjectId) - return NULL; - - /* Create a new entry for this table if needed */ - psin = pathman_cache_search_relid(status_cache, - relid, HASH_ENTER, - &found); - - /* Initialize new entry */ - if (!found) - { - psin->refcount = 0; - psin->is_valid = false; - psin->prel = NULL; - } - - /* Should we refresh this entry? */ - refresh = !psin->is_valid && psin->refcount == 0; - - if (refresh) - { - ItemPointerData iptr; - Datum values[Natts_pathman_config]; - bool isnull[Natts_pathman_config]; - - /* Set basic fields */ - psin->is_valid = false; - - /* Free old dispatch info */ - if (psin->prel) - { - free_pathman_relation_info(psin->prel); - psin->prel = NULL; - } - - /* Check if PATHMAN_CONFIG table contains this relation */ - if (pathman_config_contains_relation(relid, values, isnull, NULL, &iptr)) - { - bool upd_expr = isnull[Anum_pathman_config_cooked_expr - 1]; - - if (upd_expr) - pathman_config_refresh_parsed_expression(relid, values, - isnull, &iptr); - - /* Build a partitioned table cache entry (might emit ERROR) */ - psin->prel = build_pathman_relation_info(relid, values); - } - - /* Good, entry is valid */ - psin->is_valid = true; - } - - /* Increase refcount */ - psin->refcount++; - - return psin; -} - -void -close_pathman_status_info(PartStatusInfo *psin) -{ - /* Should always be called in transaction */ - Assert(IsTransactionState()); - - /* Should not be NULL */ - Assert(psin); - - /* Should be referenced elsewhere */ - Assert(psin->refcount > 0); - - /* Decrease recount */ - psin->refcount--; -} - -void -invalidate_pathman_status_info(Oid relid) -{ - PartStatusInfo *psin; - - psin = pathman_cache_search_relid(status_cache, - relid, HASH_FIND, - NULL); - - if (psin) - { -#ifdef USE_RELINFO_LOGGING - elog(DEBUG2, "invalidation message for relation %u [%u]", - relid, MyProcPid); -#endif - - /* Mark entry as invalid */ - psin->is_valid = false; - } -} - -void -invalidate_pathman_status_info_cache(void) -{ - HASH_SEQ_STATUS status; - PartStatusInfo *psin; - - while ((psin = (PartStatusInfo *) hash_seq_search(&status)) != NULL) - { -#ifdef USE_RELINFO_LOGGING - elog(DEBUG2, "invalidation message for relation %u [%u]", - psin->relid, MyProcPid); -#endif - - /* Mark entry as invalid */ - psin->is_valid = false; - } -} - - /* * Partition bounds cache routines. */ From b5d6405699223e9d931aff63dd621b5393d12c1c Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 13 Nov 2017 16:15:11 +0300 Subject: [PATCH 191/528] WIP refactoring, introduce has_pathman_relation_info() --- init.sql | 90 ++++++++++++++----------------------- range.sql | 43 +++++++++++++++++- src/hooks.c | 39 ++++++++-------- src/include/relation_info.h | 9 ++-- src/include/xact_handling.h | 2 - src/pathman_workers.c | 2 +- src/pl_funcs.c | 80 ++++++++++++++++----------------- src/relation_info.c | 32 +++++++++---- src/utility_stmt_hooking.c | 26 ++++++----- src/xact_handling.c | 20 --------- 10 files changed, 177 insertions(+), 166 deletions(-) diff --git a/init.sql b/init.sql index f54d48eb..001bb097 100644 --- a/init.sql +++ b/init.sql @@ -513,38 +513,6 @@ BEGIN END $$ LANGUAGE plpgsql; - -CREATE OR REPLACE FUNCTION @extschema@.create_naming_sequence( - parent_relid REGCLASS) -RETURNS TEXT AS $$ -DECLARE - seq_name TEXT; - -BEGIN - seq_name := @extschema@.build_sequence_name(parent_relid); - - EXECUTE format('DROP SEQUENCE IF EXISTS %s', seq_name); - EXECUTE format('CREATE SEQUENCE %s START 1', seq_name); - - RETURN seq_name; -END -$$ LANGUAGE plpgsql -SET client_min_messages = WARNING; /* mute NOTICE message */ - -CREATE OR REPLACE FUNCTION @extschema@.drop_naming_sequence( - parent_relid REGCLASS) -RETURNS VOID AS $$ -DECLARE - seq_name TEXT; - -BEGIN - seq_name := @extschema@.build_sequence_name(parent_relid); - - EXECUTE format('DROP SEQUENCE IF EXISTS %s', seq_name); -END -$$ LANGUAGE plpgsql -SET client_min_messages = WARNING; /* mute NOTICE message */ - /* * Drop partitions. If delete_data set to TRUE, partitions * will be dropped with all the data. @@ -686,43 +654,51 @@ EXECUTE PROCEDURE @extschema@.pathman_ddl_trigger_func(); /* - * Partitioning key. + * Get partitioning key. */ CREATE OR REPLACE FUNCTION @extschema@.get_partition_key( - relid REGCLASS) + parent_relid REGCLASS) RETURNS TEXT AS $$ - SELECT expr FROM @extschema@.pathman_config WHERE partrel = relid; + SELECT expr + FROM @extschema@.pathman_config + WHERE partrel = parent_relid; $$ LANGUAGE sql STRICT; /* - * Partitioning key type. + * Get partitioning key type. */ CREATE OR REPLACE FUNCTION @extschema@.get_partition_key_type( - relid REGCLASS) -RETURNS REGTYPE AS 'pg_pathman', 'get_partition_key_type' + parent_relid REGCLASS) +RETURNS REGTYPE AS 'pg_pathman', 'get_partition_key_type_pl' LANGUAGE C STRICT; /* - * Partitioning type. + * Get partitioning type. */ CREATE OR REPLACE FUNCTION @extschema@.get_partition_type( - relid REGCLASS) + parent_relid REGCLASS) RETURNS INT4 AS $$ - SELECT parttype FROM @extschema@.pathman_config WHERE partrel = relid; + SELECT parttype + FROM @extschema@.pathman_config + WHERE partrel = parent_relid; $$ LANGUAGE sql STRICT; - /* * Get number of partitions managed by pg_pathman. */ CREATE OR REPLACE FUNCTION @extschema@.get_number_of_partitions( - parent_relid REGCLASS) -RETURNS INT4 AS 'pg_pathman', 'get_number_of_partitions_pl' -LANGUAGE C STRICT; + parent_relid REGCLASS) +RETURNS INT4 AS +$$ + SELECT count(*)::INT4 + FROM pg_catalog.pg_inherits + WHERE inhparent = parent_relid; +$$ +LANGUAGE sql STRICT; /* * Get parent of pg_pathman's partition. @@ -806,9 +782,9 @@ LANGUAGE C STRICT; * Add record to pathman_config (RANGE) and validate partitions. */ CREATE OR REPLACE FUNCTION @extschema@.add_to_pathman_config( - parent_relid REGCLASS, - expression TEXT, - range_interval TEXT) + parent_relid REGCLASS, + expression TEXT, + range_interval TEXT) RETURNS BOOLEAN AS 'pg_pathman', 'add_to_pathman_config' LANGUAGE C; @@ -816,8 +792,8 @@ LANGUAGE C; * Add record to pathman_config (HASH) and validate partitions. */ CREATE OR REPLACE FUNCTION @extschema@.add_to_pathman_config( - parent_relid REGCLASS, - expression TEXT) + parent_relid REGCLASS, + expression TEXT) RETURNS BOOLEAN AS 'pg_pathman', 'add_to_pathman_config' LANGUAGE C; @@ -866,9 +842,9 @@ LANGUAGE C; * Get parent of pg_pathman's partition. */ CREATE OR REPLACE FUNCTION @extschema@.is_equal_to_partitioning_expression( - parent_relid REGCLASS, - expression TEXT, - value_type OID) + parent_relid REGCLASS, + expression TEXT, + value_type OID) RETURNS BOOL AS 'pg_pathman', 'is_equal_to_partitioning_expression_pl' LANGUAGE C STRICT; @@ -877,8 +853,8 @@ LANGUAGE C STRICT; * bound_value is used to determine the type of bound */ CREATE OR REPLACE FUNCTION @extschema@.get_lower_bound( - relid REGCLASS, - bound_value ANYELEMENT + relid REGCLASS, + bound_value ANYELEMENT ) RETURNS ANYELEMENT AS 'pg_pathman', 'get_lower_bound_pl' LANGUAGE C STRICT; @@ -887,8 +863,8 @@ LANGUAGE C STRICT; * Get upper bound of a partition */ CREATE OR REPLACE FUNCTION @extschema@.get_upper_bound( - relid REGCLASS, - bound_value ANYELEMENT + relid REGCLASS, + bound_value ANYELEMENT ) RETURNS ANYELEMENT AS 'pg_pathman', 'get_upper_bound_pl' LANGUAGE C STRICT; diff --git a/range.sql b/range.sql index fa72df8d..8e64adb3 100644 --- a/range.sql +++ b/range.sql @@ -1020,6 +1020,44 @@ END $$ LANGUAGE plpgsql; +/* + * Create a naming sequence for partitioned table. + */ +CREATE OR REPLACE FUNCTION @extschema@.create_naming_sequence( + parent_relid REGCLASS) +RETURNS TEXT AS $$ +DECLARE + seq_name TEXT; + +BEGIN + seq_name := @extschema@.build_sequence_name(parent_relid); + + EXECUTE format('DROP SEQUENCE IF EXISTS %s', seq_name); + EXECUTE format('CREATE SEQUENCE %s START 1', seq_name); + + RETURN seq_name; +END +$$ LANGUAGE plpgsql +SET client_min_messages = WARNING; /* mute NOTICE message */ + +/* + * Drop a naming sequence for partitioned table. + */ +CREATE OR REPLACE FUNCTION @extschema@.drop_naming_sequence( + parent_relid REGCLASS) +RETURNS VOID AS $$ +DECLARE + seq_name TEXT; + +BEGIN + seq_name := @extschema@.build_sequence_name(parent_relid); + + EXECUTE format('DROP SEQUENCE IF EXISTS %s', seq_name); +END +$$ LANGUAGE plpgsql +SET client_min_messages = WARNING; /* mute NOTICE message */ + + /* * Merge multiple partitions. All data will be copied to the first one. * The rest of partitions will be dropped. @@ -1041,7 +1079,6 @@ CREATE OR REPLACE FUNCTION @extschema@.drop_range_partition_expand_next( RETURNS VOID AS 'pg_pathman', 'drop_range_partition_expand_next' LANGUAGE C STRICT; - CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions_internal( parent_relid REGCLASS, bounds ANYARRAY, @@ -1075,12 +1112,14 @@ CREATE OR REPLACE FUNCTION @extschema@.build_range_condition( RETURNS TEXT AS 'pg_pathman', 'build_range_condition' LANGUAGE C; +/* + * Generate a name for naming sequence. + */ CREATE OR REPLACE FUNCTION @extschema@.build_sequence_name( parent_relid REGCLASS) RETURNS TEXT AS 'pg_pathman', 'build_sequence_name' LANGUAGE C STRICT; - /* * Returns N-th range (as an array of two elements). */ diff --git a/src/hooks.c b/src/hooks.c index d02ec265..e8a882d9 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -63,13 +63,13 @@ allow_star_schema_join(PlannerInfo *root, } -set_join_pathlist_hook_type set_join_pathlist_next = NULL; -set_rel_pathlist_hook_type set_rel_pathlist_hook_next = NULL; -planner_hook_type planner_hook_next = NULL; -post_parse_analyze_hook_type post_parse_analyze_hook_next = NULL; -shmem_startup_hook_type shmem_startup_hook_next = NULL; -ProcessUtility_hook_type process_utility_hook_next = NULL; -ExecutorRun_hook_type executor_run_hook_next = NULL; +set_join_pathlist_hook_type set_join_pathlist_next = NULL; +set_rel_pathlist_hook_type set_rel_pathlist_hook_next = NULL; +planner_hook_type planner_hook_next = NULL; +post_parse_analyze_hook_type post_parse_analyze_hook_next = NULL; +shmem_startup_hook_type shmem_startup_hook_next = NULL; +ProcessUtility_hook_type process_utility_hook_next = NULL; +ExecutorRun_hook_type executor_run_hook_next = NULL; /* Take care of joins */ @@ -101,7 +101,7 @@ pathman_join_pathlist_hook(PlannerInfo *root, if (!IsPathmanReady() || !pg_pathman_enable_runtimeappend) return; - /* We should only consider base relations */ + /* We should only consider base inner relations */ if (innerrel->reloptkind != RELOPT_BASEREL) return; @@ -113,9 +113,13 @@ pathman_join_pathlist_hook(PlannerInfo *root, if (jointype == JOIN_FULL || jointype == JOIN_RIGHT) return; - /* Check that innerrel is a BASEREL with PartRelationInfo */ - if (innerrel->reloptkind != RELOPT_BASEREL || - !(inner_prel = get_pathman_relation_info(inner_rte->relid))) + /* Skip if inner table is not allowed to act as parent (e.g. FROM ONLY) */ + if (PARENTHOOD_DISALLOWED == get_rel_parenthood_status(root->parse->queryId, + inner_rte)) + return; + + /* Proceed iff relation 'innerrel' is partitioned */ + if ((inner_prel = get_pathman_relation_info(inner_rte->relid)) == NULL) return; /* @@ -142,7 +146,7 @@ pathman_join_pathlist_hook(PlannerInfo *root, Oid outer_baserel = root->simple_rte_array[rti]->relid; /* Is it partitioned? */ - if (get_pathman_relation_info(outer_baserel)) + if (has_pathman_relation_info(outer_baserel)) count++; } @@ -153,11 +157,6 @@ pathman_join_pathlist_hook(PlannerInfo *root, "of partitioned tables are not supported"))); } - /* Skip if inner table is not allowed to act as parent (e.g. FROM ONLY) */ - if (PARENTHOOD_DISALLOWED == get_rel_parenthood_status(root->parse->queryId, - inner_rte)) - return; - /* * These codes are used internally in the planner, but are not supported * by the executor (nor, indeed, by most of the planner). @@ -223,7 +222,7 @@ pathman_join_pathlist_hook(PlannerInfo *root, Assert(outer); } - /* No way to do this in a parameterized inner path */ + /* No way to do this in a parameterized inner path */ if (saved_jointype == JOIN_UNIQUE_INNER) return; @@ -607,7 +606,7 @@ pathman_enable_assign_hook(bool newval, void *extra) /* * Planner hook. It disables inheritance for tables that have been partitioned * by pathman to prevent standart PostgreSQL partitioning mechanism from - * handling that tables. + * handling those tables. */ PlannedStmt * pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) @@ -679,7 +678,7 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) /* * Post parse analysis hook. It makes sure the config is loaded before executing - * any statement, including utility commands + * any statement, including utility commands. */ void pathman_post_parse_analysis_hook(ParseState *pstate, Query *query) diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 1a07ba00..e5503fe7 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -311,10 +311,11 @@ void refresh_pathman_relation_info(Oid relid); void invalidate_pathman_relation_info(Oid relid); void invalidate_pathman_relation_info_cache(void); void close_pathman_relation_info(PartRelationInfo *prel); -const PartRelationInfo *get_pathman_relation_info(Oid relid); -const PartRelationInfo *get_pathman_relation_info_after_lock(Oid relid, - bool unlock_if_not_found, - LockAcquireResult *lock_result); +bool has_pathman_relation_info(Oid relid); +PartRelationInfo *get_pathman_relation_info(Oid relid); +PartRelationInfo *get_pathman_relation_info_after_lock(Oid relid, + bool unlock_if_not_found, + LockAcquireResult *lock_result); void shout_if_prel_is_invalid(const Oid parent_oid, const PartRelationInfo *prel, diff --git a/src/include/xact_handling.h b/src/include/xact_handling.h index a762f197..fe9f976c 100644 --- a/src/include/xact_handling.h +++ b/src/include/xact_handling.h @@ -32,7 +32,5 @@ bool xact_is_set_stmt(Node *stmt, const char *name); bool xact_is_alter_pathman_stmt(Node *stmt); bool xact_object_is_visible(TransactionId obj_xmin); -void prevent_data_modification_internal(Oid relid); - #endif /* XACT_HANDLING_H */ diff --git a/src/pathman_workers.c b/src/pathman_workers.c index db0e1da7..2db579b3 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -547,7 +547,7 @@ bgw_main_concurrent_part(Datum main_arg) } /* Make sure that relation has partitions */ - if (get_pathman_relation_info(part_slot->relid) == NULL) + if (!has_pathman_relation_info(part_slot->relid)) { /* Exit after we raise ERROR */ failures_count = PART_WORKER_MAX_ATTEMPTS; diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 197c2347..35312ff9 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -42,9 +42,9 @@ /* Function declarations */ PG_FUNCTION_INFO_V1( get_number_of_partitions_pl ); +PG_FUNCTION_INFO_V1( get_partition_key_type_pl ); PG_FUNCTION_INFO_V1( get_parent_of_partition_pl ); PG_FUNCTION_INFO_V1( get_base_type_pl ); -PG_FUNCTION_INFO_V1( get_partition_key_type ); PG_FUNCTION_INFO_V1( get_tablespace_pl ); PG_FUNCTION_INFO_V1( show_cache_stats_internal ); @@ -105,32 +105,36 @@ typedef struct */ /* - * Get number of relation's partitions managed by pg_pathman. + * Return parent of a specified partition. */ Datum -get_number_of_partitions_pl(PG_FUNCTION_ARGS) +get_parent_of_partition_pl(PG_FUNCTION_ARGS) { - Oid parent = PG_GETARG_OID(0); - const PartRelationInfo *prel; + Oid partition = PG_GETARG_OID(0), + parent = get_parent_of_partition(partition); - /* If we couldn't find PartRelationInfo, return 0 */ - if ((prel = get_pathman_relation_info(parent)) == NULL) - PG_RETURN_INT32(0); + if (OidIsValid(parent)) + PG_RETURN_OID(parent); - PG_RETURN_INT32(PrelChildrenCount(prel)); + PG_RETURN_NULL(); } /* - * Get parent of a specified partition. + * Return partition key type. */ Datum -get_parent_of_partition_pl(PG_FUNCTION_ARGS) +get_partition_key_type_pl(PG_FUNCTION_ARGS) { - Oid partition = PG_GETARG_OID(0), - parent = get_parent_of_partition(partition); + Oid relid = PG_GETARG_OID(0); + PartRelationInfo *prel; - if (OidIsValid(parent)) - PG_RETURN_OID(parent); + if ((prel = get_pathman_relation_info(relid)) != NULL) + { + Oid result = prel->ev_type; + close_pathman_relation_info(prel); + + PG_RETURN_OID(result); + } PG_RETURN_NULL(); } @@ -151,7 +155,7 @@ is_equal_to_partitioning_expression_pl(PG_FUNCTION_ARGS) } /* - * Get min bound value for parent relation + * Get min bound value for parent relation. */ Datum get_lower_bound_pl(PG_FUNCTION_ARGS) @@ -163,7 +167,7 @@ get_lower_bound_pl(PG_FUNCTION_ARGS) } /* - * Get min bound value for parent relation + * Get min bound value for parent relation. */ Datum get_upper_bound_pl(PG_FUNCTION_ARGS) @@ -183,21 +187,6 @@ get_base_type_pl(PG_FUNCTION_ARGS) PG_RETURN_OID(getBaseType(PG_GETARG_OID(0))); } -/* - * Return partition key type. - */ -Datum -get_partition_key_type(PG_FUNCTION_ARGS) -{ - Oid relid = PG_GETARG_OID(0); - const PartRelationInfo *prel; - - prel = get_pathman_relation_info(relid); - shout_if_prel_is_invalid(relid, prel, PT_ANY); - - PG_RETURN_OID(prel->ev_type); -} - /* * Return tablespace name of a specified relation. */ @@ -650,7 +639,6 @@ is_date_type(PG_FUNCTION_ARGS) PG_RETURN_BOOL(is_date_type_internal(PG_GETARG_OID(0))); } - Datum is_tuple_convertible(PG_FUNCTION_ARGS) { @@ -685,13 +673,13 @@ is_tuple_convertible(PG_FUNCTION_ARGS) PG_RETURN_BOOL(res); } + /* * ------------------------ * Useful string builders * ------------------------ */ - Datum build_check_constraint_name(PG_FUNCTION_ARGS) { @@ -706,13 +694,13 @@ build_check_constraint_name(PG_FUNCTION_ARGS) PG_RETURN_TEXT_P(cstring_to_text(quote_identifier(result))); } + /* * ------------------------ * Cache & config updates * ------------------------ */ - /* * Try to add previously partitioned table to PATHMAN_CONFIG. */ @@ -889,7 +877,6 @@ add_to_pathman_config(PG_FUNCTION_ARGS) PG_RETURN_BOOL(true); } - /* * Invalidate relcache to refresh PartRelationInfo. */ @@ -954,12 +941,12 @@ pathman_config_params_trigger_func(PG_FUNCTION_ARGS) */ /* - * Acquire appropriate lock on a partitioned relation. + * Prevent concurrent modifiction of partitioning schema. */ Datum prevent_part_modification(PG_FUNCTION_ARGS) { - Oid relid = PG_GETARG_OID(0); + Oid relid = PG_GETARG_OID(0); /* Lock partitioned relation till transaction's end */ LockRelationOid(relid, ShareUpdateExclusiveLock); @@ -973,7 +960,19 @@ prevent_part_modification(PG_FUNCTION_ARGS) Datum prevent_data_modification(PG_FUNCTION_ARGS) { - prevent_data_modification_internal(PG_GETARG_OID(0)); + Oid relid = PG_GETARG_OID(0); + + /* + * Check that isolation level is READ COMMITTED. + * Else we won't be able to see new rows + * which could slip through locks. + */ + if (!xact_is_level_read_committed()) + ereport(ERROR, + (errmsg("Cannot perform blocking partitioning operation"), + errdetail("Expected READ COMMITTED isolation level"))); + + LockRelationOid(relid, AccessExclusiveLock); PG_RETURN_VOID(); } @@ -1126,6 +1125,7 @@ is_operator_supported(PG_FUNCTION_ARGS) PG_RETURN_BOOL(OidIsValid(opid)); } + /* * ------- * DEBUG @@ -1145,7 +1145,7 @@ debug_capture(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } -/* NOTE: just in case */ +/* Return pg_pathman's shared library version */ Datum pathman_version(PG_FUNCTION_ARGS) { diff --git a/src/relation_info.c b/src/relation_info.c index 9537f9a9..030407c5 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -190,11 +190,27 @@ invalidate_pathman_status_info(PartStatusInfo *psin) void close_pathman_relation_info(PartRelationInfo *prel) { + PrelReferenceCount(prel) -= 1; +} + +/* Check if relation is partitioned by pg_pathman */ +bool +has_pathman_relation_info(Oid relid) +{ + PartRelationInfo *prel; + + if ((prel = get_pathman_relation_info(relid)) != NULL) + { + close_pathman_relation_info(prel); + return true; + } + + return false; } /* Get PartRelationInfo from local cache */ -const PartRelationInfo * +PartRelationInfo * get_pathman_relation_info(Oid relid) { PartStatusInfo *psin; @@ -273,13 +289,13 @@ get_pathman_relation_info(Oid relid) } /* Acquire lock on a table and try to get PartRelationInfo */ -const PartRelationInfo * +PartRelationInfo * get_pathman_relation_info_after_lock(Oid relid, bool unlock_if_not_found, LockAcquireResult *lock_result) { - const PartRelationInfo *prel; - LockAcquireResult acquire_result; + PartRelationInfo *prel; + LockAcquireResult acquire_result; /* Restrict concurrent partition creation (it's dangerous) */ acquire_result = xact_lock_rel(relid, ShareUpdateExclusiveLock, false); @@ -1297,10 +1313,10 @@ char * canonicalize_partitioning_expression(const Oid relid, const char *expr_cstr) { - Node *parse_tree; - Expr *expr; - char *query_string; - Query *query; + Node *parse_tree; + Expr *expr; + char *query_string; + Query *query; AssertTemporaryContext(); diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index b08b53e1..60581ed9 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -105,7 +105,7 @@ is_pathman_related_copy(Node *parsetree) false); /* Check that relation is partitioned */ - if (get_pathman_relation_info(parent_relid)) + if (has_pathman_relation_info(parent_relid)) { ListCell *lc; @@ -143,10 +143,9 @@ is_pathman_related_table_rename(Node *parsetree, Oid *relation_oid_out, /* ret value #1 */ bool *is_parent_out) /* ret value #2 */ { - RenameStmt *rename_stmt = (RenameStmt *) parsetree; - Oid relation_oid, - parent_relid; - const PartRelationInfo *prel; + RenameStmt *rename_stmt = (RenameStmt *) parsetree; + Oid relation_oid, + parent_relid; Assert(IsPathmanReady()); @@ -166,7 +165,7 @@ is_pathman_related_table_rename(Node *parsetree, false); /* Assume it's a parent */ - if (get_pathman_relation_info(relation_oid)) + if (has_pathman_relation_info(relation_oid)) { if (relation_oid_out) *relation_oid_out = relation_oid; @@ -176,11 +175,12 @@ is_pathman_related_table_rename(Node *parsetree, } /* Assume it's a partition, fetch its parent */ - if (!OidIsValid(parent_relid = get_parent_of_partition(relation_oid))) + parent_relid = get_parent_of_partition(relation_oid); + if (!OidIsValid(parent_relid)) return false; /* Is parent partitioned? */ - if ((prel = get_pathman_relation_info(parent_relid)) != NULL) + if (has_pathman_relation_info(parent_relid)) { if (relation_oid_out) *relation_oid_out = relation_oid; @@ -201,10 +201,10 @@ is_pathman_related_alter_column_type(Node *parsetree, AttrNumber *attr_number_out, PartType *part_type_out) { - AlterTableStmt *alter_table_stmt = (AlterTableStmt *) parsetree; - ListCell *lc; - Oid parent_relid; - const PartRelationInfo *prel; + AlterTableStmt *alter_table_stmt = (AlterTableStmt *) parsetree; + ListCell *lc; + Oid parent_relid; + PartRelationInfo *prel; Assert(IsPathmanReady()); @@ -226,6 +226,8 @@ is_pathman_related_alter_column_type(Node *parsetree, /* Return 'parent_relid' and 'prel->parttype' */ if (parent_relid_out) *parent_relid_out = parent_relid; if (part_type_out) *part_type_out = prel->parttype; + + close_pathman_relation_info(prel); } else return false; diff --git a/src/xact_handling.c b/src/xact_handling.c index c6696cce..ff22a040 100644 --- a/src/xact_handling.c +++ b/src/xact_handling.c @@ -212,23 +212,3 @@ SetLocktagRelationOid(LOCKTAG *tag, Oid relid) SET_LOCKTAG_RELATION(*tag, dbid, relid); } - - -/* - * Lock relation exclusively & check for current isolation level. - */ -void -prevent_data_modification_internal(Oid relid) -{ - /* - * Check that isolation level is READ COMMITTED. - * Else we won't be able to see new rows - * which could slip through locks. - */ - if (!xact_is_level_read_committed()) - ereport(ERROR, - (errmsg("Cannot perform blocking partitioning operation"), - errdetail("Expected READ COMMITTED isolation level"))); - - LockRelationOid(relid, AccessExclusiveLock); -} From d1a20b69b0f9cadcac3ba1890246676a15e6c994 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 13 Nov 2017 18:19:00 +0300 Subject: [PATCH 192/528] WIP huge refactoring in *.sql files, remove checks for subpartitions --- init.sql | 34 +---- range.sql | 256 ++++++------------------------------ src/include/relation_info.h | 6 - src/pl_funcs.c | 44 ------- src/relation_info.c | 86 +----------- 5 files changed, 42 insertions(+), 384 deletions(-) diff --git a/init.sql b/init.sql index 001bb097..12546cca 100644 --- a/init.sql +++ b/init.sql @@ -704,8 +704,7 @@ LANGUAGE sql STRICT; * Get parent of pg_pathman's partition. */ CREATE OR REPLACE FUNCTION @extschema@.get_parent_of_partition( - partition_relid REGCLASS, - raise_error BOOL DEFAULT TRUE) + partition_relid REGCLASS) RETURNS REGCLASS AS 'pg_pathman', 'get_parent_of_partition_pl' LANGUAGE C STRICT; @@ -838,37 +837,6 @@ CREATE OR REPLACE FUNCTION @extschema@.invoke_on_partition_created_callback( RETURNS VOID AS 'pg_pathman', 'invoke_on_partition_created_callback' LANGUAGE C; -/* - * Get parent of pg_pathman's partition. - */ -CREATE OR REPLACE FUNCTION @extschema@.is_equal_to_partitioning_expression( - parent_relid REGCLASS, - expression TEXT, - value_type OID) -RETURNS BOOL AS 'pg_pathman', 'is_equal_to_partitioning_expression_pl' -LANGUAGE C STRICT; - -/* - * Get lower bound of a partitioned relation - * bound_value is used to determine the type of bound - */ -CREATE OR REPLACE FUNCTION @extschema@.get_lower_bound( - relid REGCLASS, - bound_value ANYELEMENT -) -RETURNS ANYELEMENT AS 'pg_pathman', 'get_lower_bound_pl' -LANGUAGE C STRICT; - -/* - * Get upper bound of a partition - */ -CREATE OR REPLACE FUNCTION @extschema@.get_upper_bound( - relid REGCLASS, - bound_value ANYELEMENT -) -RETURNS ANYELEMENT AS 'pg_pathman', 'get_upper_bound_pl' -LANGUAGE C STRICT; - /* * DEBUG: Place this inside some plpgsql fuction and set breakpoint. */ diff --git a/range.sql b/range.sql index 8e64adb3..dad82ff2 100644 --- a/range.sql +++ b/range.sql @@ -46,29 +46,6 @@ BEGIN END $$ LANGUAGE plpgsql; -CREATE OR REPLACE FUNCTION @extschema@.has_parent_partitioned_by_expression( - parent_relid REGCLASS, - expression TEXT, - expr_type REGTYPE) -RETURNS BOOL AS $$ -DECLARE - relid REGCLASS; - part_type INTEGER; -BEGIN - relid := @extschema@.get_parent_of_partition(parent_relid, false); - IF relid IS NOT NULL THEN - part_type := @extschema@.get_partition_type(relid); - IF (part_type = 2) AND @extschema@.is_equal_to_partitioning_expression( - relid, expression, expr_type) - THEN - RETURN TRUE; - END IF; - END IF; - - RETURN FALSE; -END -$$ LANGUAGE plpgsql; - /* * Creates RANGE partitions for specified relation based on datetime attribute */ @@ -82,37 +59,17 @@ CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( RETURNS INTEGER AS $$ DECLARE rows_count BIGINT; - value_type REGTYPE; max_value start_value%TYPE; cur_value start_value%TYPE := start_value; end_value start_value%TYPE; - lower_bound start_value%TYPE = NULL; - upper_bound start_value%TYPE = NULL; part_count INTEGER := 0; i INTEGER; + BEGIN PERFORM @extschema@.prepare_for_partitioning(parent_relid, expression, partition_data); - value_type := @extschema@.get_base_type(pg_typeof(start_value)); - - /* - * Check that we're trying to make subpartitions. - * If expressions are same then we set and use upper bound. - * We change start_value if it's greater than lower bound. - */ - IF @extschema@.has_parent_partitioned_by_expression(parent_relid, - expression, value_type) - THEN - lower_bound := @extschema@.get_lower_bound(parent_relid, start_value); - upper_bound := @extschema@.get_upper_bound(parent_relid, start_value); - IF lower_bound != start_value THEN - start_value := lower_bound; - RAISE WARNING '"start_value" was set to %', start_value; - END IF; - END IF; - IF p_count < 0 THEN RAISE EXCEPTION '"p_count" must not be less than 0'; END IF; @@ -128,7 +85,6 @@ BEGIN p_count := 0; WHILE cur_value <= max_value - OR (upper_bound IS NOT NULL AND cur_value < upper_bound) LOOP cur_value := cur_value + p_interval; p_count := p_count + 1; @@ -140,36 +96,18 @@ BEGIN * and specifies partition count as 0 then do not check boundaries */ IF p_count != 0 THEN - /* compute right bound of partitioning through additions */ + /* Compute right bound of partitioning through additions */ end_value := start_value; FOR i IN 1..p_count LOOP end_value := end_value + p_interval; - IF upper_bound IS NOT NULL AND end_value >= upper_bound THEN - part_count := i; - IF end_value > upper_bound THEN - RAISE WARNING '"p_interval" is not multiple of range (%, %)', - start_value, end_value; - END IF; - IF p_count != part_count THEN - p_count := part_count; - RAISE NOTICE '"p_count" was limited to %', p_count; - END IF; - - /* we got our partitions count */ - EXIT; - END IF; END LOOP; /* Check boundaries */ - EXECUTE - format('SELECT @extschema@.check_boundaries(''%s'', $1, ''%s'', ''%s''::%s)', - parent_relid, - start_value, - end_value, - value_type::TEXT) - USING - expression; + PERFORM @extschema@.check_boundaries(parent_relid, + expression, + start_value, + end_value); END IF; /* Create sequence for child partitions names */ @@ -213,38 +151,18 @@ CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( partition_data BOOLEAN DEFAULT TRUE) RETURNS INTEGER AS $$ DECLARE - value_type REGTYPE; rows_count BIGINT; max_value start_value%TYPE; cur_value start_value%TYPE := start_value; end_value start_value%TYPE; - lower_bound start_value%TYPE = NULL; - upper_bound start_value%TYPE = NULL; part_count INTEGER := 0; i INTEGER; + BEGIN PERFORM @extschema@.prepare_for_partitioning(parent_relid, expression, partition_data); - value_type := @extschema@.get_base_type(pg_typeof(start_value)); - - /* - * Check that we're trying to make subpartitions. - * If expressions are same then we set and use upper bound. - * We change start_value if it's greater than lower bound. - */ - IF @extschema@.has_parent_partitioned_by_expression(parent_relid, - expression, value_type) - THEN - lower_bound := @extschema@.get_lower_bound(parent_relid, start_value); - upper_bound := @extschema@.get_upper_bound(parent_relid, start_value); - IF lower_bound != start_value THEN - start_value := lower_bound; - RAISE WARNING '"start_value" was set to %', start_value; - END IF; - END IF; - IF p_count < 0 THEN RAISE EXCEPTION 'partitions count must not be less than zero'; END IF; @@ -264,7 +182,6 @@ BEGIN p_count := 0; WHILE cur_value <= max_value - OR (upper_bound IS NOT NULL AND cur_value < upper_bound) LOOP cur_value := cur_value + p_interval; p_count := p_count + 1; @@ -276,28 +193,14 @@ BEGIN * and specifies partition count as 0 then do not check boundaries */ IF p_count != 0 THEN - /* compute right bound of partitioning through additions */ + /* Compute right bound of partitioning through additions */ end_value := start_value; FOR i IN 1..p_count LOOP end_value := end_value + p_interval; - IF upper_bound IS NOT NULL AND end_value >= upper_bound THEN - part_count := i; - IF end_value > upper_bound THEN - RAISE WARNING '"p_interval" is not multiple of range (%, %)', - start_value, end_value; - END IF; - IF p_count != part_count THEN - p_count := part_count; - RAISE NOTICE '"p_count" was limited to %', p_count; - END IF; - - /* we got our partitions count */ - EXIT; - END IF; END LOOP; - /* check boundaries */ + /* Check boundaries */ PERFORM @extschema@.check_boundaries(parent_relid, expression, start_value, @@ -346,7 +249,6 @@ CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( RETURNS INTEGER AS $$ DECLARE part_count INTEGER := 0; - part_bounds bounds%TYPE; BEGIN IF array_ndims(bounds) > 1 THEN @@ -361,26 +263,6 @@ BEGIN expression, partition_data); - /* - * Subpartitions checks, in array version of create_range_partitions - * we raise exception instead of notice - */ - IF @extschema@.has_parent_partitioned_by_expression(parent_relid, - expression, pg_typeof(bounds[1])) - THEN - part_bounds[1] := @extschema@.get_lower_bound(parent_relid, bounds[1]); - part_bounds[2] := @extschema@.get_upper_bound(parent_relid, bounds[1]); - IF part_bounds[1] != bounds[1] THEN - RAISE EXCEPTION 'Bounds should start from %', part_bounds[1]; - END IF; - END IF; - - IF part_bounds[2] IS NOT NULL AND - bounds[array_length(bounds, 1) - 1] > part_bounds[2] - THEN - RAISE EXCEPTION 'Lower bound of rightmost partition should be less than %', part_bounds[2]; - END IF; - /* Check boundaries */ PERFORM @extschema@.check_boundaries(parent_relid, expression, @@ -425,7 +307,6 @@ CREATE OR REPLACE FUNCTION @extschema@.split_range_partition( RETURNS ANYARRAY AS $$ DECLARE parent_relid REGCLASS; - inhparent REGCLASS; part_type INTEGER; part_expr TEXT; part_expr_type REGTYPE; @@ -439,23 +320,22 @@ BEGIN PERFORM @extschema@.validate_relname(parent_relid); PERFORM @extschema@.validate_relname(partition_relid); - EXECUTE format('SELECT inhparent::REGCLASS FROM pg_inherits WHERE inhparent = $1 LIMIT 1') - USING partition_relid - INTO inhparent; - - if inhparent IS NOT NULL THEN - RAISE EXCEPTION 'could not split partition if it has children'; - END IF; - - /* Acquire lock on parent */ + /* Acquire lock on parent's scheme */ PERFORM @extschema@.prevent_part_modification(parent_relid); + /* Acquire lock on partition's scheme */ + PERFORM @extschema@.prevent_part_modification(partition_relid); + /* Acquire data modification lock (prevent further modifications) */ PERFORM @extschema@.prevent_data_modification(partition_relid); + /* Check that partition is not partitioned */ + if @extschema@.get_number_of_partitions(partition_relid) > 0 THEN + RAISE EXCEPTION 'cannot split partition that has children'; + END IF; + part_expr_type = @extschema@.get_partition_key_type(parent_relid); part_expr := @extschema@.get_partition_key(parent_relid); - part_type := @extschema@.get_partition_type(parent_relid); /* Check if this is a RANGE partition */ @@ -540,7 +420,7 @@ DECLARE BEGIN PERFORM @extschema@.validate_relname(parent_relid); - /* Acquire lock on parent */ + /* Acquire lock on parent's scheme */ PERFORM @extschema@.prevent_part_modification(parent_relid); part_expr_type := @extschema@.get_partition_key_type(parent_relid); @@ -571,26 +451,6 @@ BEGIN END $$ LANGUAGE plpgsql; - -/* - * NOTE: we need this function just to determine the type - * of "upper_bound" var - */ -CREATE OR REPLACE FUNCTION @extschema@.check_against_upper_bound_internal( - relid REGCLASS, - bound_value ANYELEMENT, - error_message TEXT) -RETURNS VOID AS $$ -DECLARE - upper_bound bound_value%TYPE; -BEGIN - upper_bound := get_upper_bound(relid, bound_value); - IF bound_value >= upper_bound THEN - RAISE EXCEPTION '%', error_message; - END IF; -END -$$ LANGUAGE plpgsql; - /* * Spawn logic for append_partition(). We have to * separate this in order to pass the 'p_range'. @@ -606,12 +466,10 @@ CREATE OR REPLACE FUNCTION @extschema@.append_partition_internal( tablespace TEXT DEFAULT NULL) RETURNS TEXT AS $$ DECLARE - relid REGCLASS; part_expr_type REGTYPE; part_name TEXT; v_args_format TEXT; - part_expr TEXT; - part_type INTEGER; + BEGIN IF @extschema@.get_number_of_partitions(parent_relid) = 0 THEN RAISE EXCEPTION 'cannot append to empty partitions set'; @@ -629,24 +487,6 @@ BEGIN RAISE EXCEPTION 'Cannot append partition because last partition''s range is half open'; END IF; - /* - * In case a user has used same expression on two levels, we need to check - * that we've not reached upper bound of higher partitioned table - */ - relid := @extschema@.get_parent_of_partition(parent_relid, false); - IF relid IS NOT NULL THEN - SELECT expr FROM @extschema@.pathman_config WHERE partrel = parent_relid - INTO part_expr; - - part_type := @extschema@.get_partition_type(relid); - IF (part_type = 2) AND @extschema@.is_equal_to_partitioning_expression( - relid, part_expr, part_expr_type) - THEN - PERFORM @extschema@.check_against_upper_bound_internal(parent_relid, - p_range[2], 'reached upper bound in the current level of subpartitions'); - END IF; - END IF; - IF @extschema@.is_date_type(p_atttype) THEN v_args_format := format('$1, $2, ($2 + $3::interval)::%s, $4, $5', part_expr_type::TEXT); ELSE @@ -684,7 +524,7 @@ DECLARE BEGIN PERFORM @extschema@.validate_relname(parent_relid); - /* Acquire lock on parent */ + /* Acquire lock on parent's scheme */ PERFORM @extschema@.prevent_part_modification(parent_relid); part_expr_type := @extschema@.get_partition_key_type(parent_relid); @@ -788,14 +628,14 @@ DECLARE BEGIN PERFORM @extschema@.validate_relname(parent_relid); - /* Acquire lock on parent */ + /* Acquire lock on parent's scheme */ PERFORM @extschema@.prevent_part_modification(parent_relid); IF start_value >= end_value THEN RAISE EXCEPTION 'failed to create partition: start_value is greater than end_value'; END IF; - /* check range overlap */ + /* Check range overlap */ IF @extschema@.get_number_of_partitions(parent_relid) > 0 THEN PERFORM @extschema@.check_range_available(parent_relid, start_value, @@ -842,7 +682,7 @@ BEGIN RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; END IF; - /* Acquire lock on parent */ + /* Acquire lock on parent's scheme */ PERFORM @extschema@.prevent_part_modification(parent_relid); IF NOT delete_data THEN @@ -886,16 +726,15 @@ CREATE OR REPLACE FUNCTION @extschema@.attach_range_partition( RETURNS TEXT AS $$ DECLARE part_expr TEXT; - part_expr_type REGTYPE; part_type INTEGER; rel_persistence CHAR; v_init_callback REGPROCEDURE; - relid REGCLASS; + BEGIN PERFORM @extschema@.validate_relname(parent_relid); PERFORM @extschema@.validate_relname(partition_relid); - /* Acquire lock on parent */ + /* Acquire lock on parent's scheme */ PERFORM @extschema@.prevent_part_modification(parent_relid); /* Ignore temporary tables */ @@ -907,41 +746,28 @@ BEGIN partition_relid::TEXT; END IF; - /* check range overlap */ + /* Check range overlap */ PERFORM @extschema@.check_range_available(parent_relid, start_value, end_value); IF NOT @extschema@.is_tuple_convertible(parent_relid, partition_relid) THEN RAISE EXCEPTION 'partition must have a compatible tuple format'; END IF; - /* - * In case a user has used same expression on two levels, we need to check - * that we've not reached upper bound of higher partitioned table - */ - relid := @extschema@.get_parent_of_partition(parent_relid, false); - IF relid IS NOT NULL THEN - part_expr_type := @extschema@.get_partition_key_type(parent_relid); - SELECT expr FROM @extschema@.pathman_config WHERE partrel = parent_relid - INTO part_expr; - - part_type := @extschema@.get_partition_type(relid); - IF (part_type = 2) AND @extschema@.is_equal_to_partitioning_expression( - relid, part_expr, part_expr_type) - THEN - PERFORM @extschema@.check_against_upper_bound_internal(parent_relid, - start_value, '"start value" exceeds upper bound of the current level of subpartitions'); - END IF; - END IF; - - /* Set inheritance */ - EXECUTE format('ALTER TABLE %s INHERIT %s', partition_relid, parent_relid); - part_expr := @extschema@.get_partition_key(parent_relid); + part_type := @extschema@.get_partition_type(parent_relid); IF part_expr IS NULL THEN RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; END IF; + /* Check if this is a RANGE partition */ + IF part_type != 2 THEN + RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; + END IF; + + /* Set inheritance */ + EXECUTE format('ALTER TABLE %s INHERIT %s', partition_relid, parent_relid); + /* Set check constraint */ EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', partition_relid::TEXT, @@ -978,7 +804,6 @@ CREATE OR REPLACE FUNCTION @extschema@.detach_range_partition( RETURNS TEXT AS $$ DECLARE parent_relid REGCLASS; - inhparent REGCLASS; part_type INTEGER; BEGIN @@ -987,13 +812,8 @@ BEGIN PERFORM @extschema@.validate_relname(parent_relid); PERFORM @extschema@.validate_relname(partition_relid); - EXECUTE format('SELECT inhparent::REGCLASS FROM pg_inherits WHERE inhparent = $1 LIMIT 1') - USING partition_relid - INTO inhparent; - - if inhparent IS NOT NULL THEN - RAISE EXCEPTION 'could not detach partition if it has children'; - END IF; + /* Acquire lock on partition's scheme */ + PERFORM @extschema@.prevent_part_modification(partition_relid); /* Acquire lock on parent */ PERFORM @extschema@.prevent_data_modification(parent_relid); diff --git a/src/include/relation_info.h b/src/include/relation_info.h index e5503fe7..cd262532 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -324,8 +324,6 @@ void shout_if_prel_is_invalid(const Oid parent_oid, /* Bounds cache */ void forget_bounds_of_partition(Oid partition); PartBoundInfo *get_bounds_of_partition(Oid partition, const PartRelationInfo *prel); -Datum get_lower_bound(Oid partition_relid, Oid value_type); -Datum get_upper_bound(Oid partition_relid, Oid value_type); /* Parent cache */ void cache_parent_of_partition(Oid partition, Oid parent); @@ -345,10 +343,6 @@ Datum cook_partitioning_expression(const Oid relid, char *canonicalize_partitioning_expression(const Oid relid, const char *expr_cstr); -bool is_equal_to_partitioning_expression(const Oid relid, - const char *expression, - const Oid value_type); - /* Partitioning expression routines */ Node *parse_partitioning_expression(const Oid relid, const char *expr_cstr, diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 35312ff9..07d0cfb3 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -72,11 +72,6 @@ PG_FUNCTION_INFO_V1( check_security_policy ); PG_FUNCTION_INFO_V1( debug_capture ); PG_FUNCTION_INFO_V1( pathman_version ); -PG_FUNCTION_INFO_V1( get_lower_bound_pl ); -PG_FUNCTION_INFO_V1( get_upper_bound_pl ); -PG_FUNCTION_INFO_V1( is_equal_to_partitioning_expression_pl ); - - /* User context for function show_partition_list_internal() */ typedef struct { @@ -139,45 +134,6 @@ get_partition_key_type_pl(PG_FUNCTION_ARGS) PG_RETURN_NULL(); } -/* - * Get parent of a specified partition. - */ -Datum -is_equal_to_partitioning_expression_pl(PG_FUNCTION_ARGS) -{ - bool result; - Oid parent_relid = PG_GETARG_OID(0); - char *expr = TextDatumGetCString(PG_GETARG_TEXT_P(1)); - Oid value_type = PG_GETARG_OID(2); - - result = is_equal_to_partitioning_expression(parent_relid, expr, value_type); - PG_RETURN_BOOL(result); -} - -/* - * Get min bound value for parent relation. - */ -Datum -get_lower_bound_pl(PG_FUNCTION_ARGS) -{ - Oid partition_relid = PG_GETARG_OID(0); - Oid value_type = get_fn_expr_argtype(fcinfo->flinfo, 1); - - PG_RETURN_POINTER(get_lower_bound(partition_relid, value_type)); -} - -/* - * Get min bound value for parent relation. - */ -Datum -get_upper_bound_pl(PG_FUNCTION_ARGS) -{ - Oid partition_relid = PG_GETARG_OID(0); - Oid value_type = get_fn_expr_argtype(fcinfo->flinfo, 1); - - PG_RETURN_POINTER(get_upper_bound(partition_relid, value_type)); -} - /* * Extract basic type of a domain. */ diff --git a/src/relation_info.c b/src/relation_info.c index 030407c5..df348914 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -179,6 +179,9 @@ invalidate_pathman_status_info(PartStatusInfo *psin) } else { + if (psin->prel) + free_pathman_relation_info(psin->prel); + (void) pathman_cache_search_relid(status_cache, psin->relid, HASH_REMOVE, @@ -266,11 +269,6 @@ get_pathman_relation_info(Oid relid) &found); Assert(!found); } - /* Otherwise, free old entry */ - else if (psin->prel) - { - free_pathman_relation_info(psin->prel); - } /* Cache fresh entry */ psin->prel = prel; @@ -794,60 +792,6 @@ get_bounds_of_partition(Oid partition, const PartRelationInfo *prel) return pbin; } -/* Get lower bound of a partition */ -Datum -get_lower_bound(Oid partition_relid, Oid value_type) -{ - Oid parent_relid; - Datum result; - const PartRelationInfo *prel; - const PartBoundInfo *pbin; - - parent_relid = get_parent_of_partition(partition_relid); - if (!OidIsValid(parent_relid)) - elog(ERROR, "relation \"%s\" is not a partition", - get_rel_name_or_relid(partition_relid)); - - prel = get_pathman_relation_info(parent_relid); - pbin = get_bounds_of_partition(partition_relid, prel); - - if (IsInfinite(&pbin->range_min)) - return PointerGetDatum(NULL); - - result = BoundGetValue(&pbin->range_min); - if (value_type != prel->ev_type) - result = perform_type_cast(result, prel->ev_type, value_type, NULL); - - return result; -} - -/* Get upper bound of a partition */ -Datum -get_upper_bound(Oid partition_relid, Oid value_type) -{ - Oid parent_relid; - Datum result; - const PartRelationInfo *prel; - const PartBoundInfo *pbin; - - parent_relid = get_parent_of_partition(partition_relid); - if (!OidIsValid(parent_relid)) - elog(ERROR, "relation \"%s\" is not a partition", - get_rel_name_or_relid(partition_relid)); - - prel = get_pathman_relation_info(parent_relid); - pbin = get_bounds_of_partition(partition_relid, prel); - - if (IsInfinite(&pbin->range_max)) - return PointerGetDatum(NULL); - - result = BoundGetValue(&pbin->range_max); - if (value_type != prel->ev_type) - result = perform_type_cast(result, prel->ev_type, value_type, NULL); - - return result; -} - /* * Get constraint expression tree of a partition. * @@ -1333,30 +1277,6 @@ canonicalize_partitioning_expression(const Oid relid, false, false); } -/* Check that expression is equal to expression of some partitioned table */ -bool -is_equal_to_partitioning_expression(const Oid relid, - const char *expression, - const Oid value_type) -{ - const PartRelationInfo *prel; - char *cexpr; - Oid expr_type; - - /* - * Cook and get a canonicalized expression, - * we don't need a result of the cooking - */ - cook_partitioning_expression(relid, expression, &expr_type); - cexpr = canonicalize_partitioning_expression(relid, expression); - - prel = get_pathman_relation_info(relid); - Assert(prel); - - return (getBaseType(expr_type) == value_type) && - (strcmp(cexpr, prel->expr_cstr) == 0); -} - /* Check if query has subqueries */ static bool query_contains_subqueries(Node *node, void *context) From 8a4698ca93190c8ec94333e9e02c9ce1e143fdc5 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 14 Nov 2017 14:58:46 +0300 Subject: [PATCH 193/528] WIP refactoring, fix functions close_pathman_relation_info() & drop_range_partition_expand_next() --- src/include/partition_creation.h | 10 --- src/include/relation_info.h | 10 +++ src/include/utils.h | 3 +- src/partition_creation.c | 139 ++++++++++++------------------- src/pl_funcs.c | 8 +- src/pl_hash_funcs.c | 2 +- src/pl_range_funcs.c | 136 ++++++++++++++++-------------- src/relation_info.c | 7 +- src/utility_stmt_hooking.c | 1 + src/utils.c | 1 + 10 files changed, 148 insertions(+), 169 deletions(-) diff --git a/src/include/partition_creation.h b/src/include/partition_creation.h index c0dd91e6..63768a95 100644 --- a/src/include/partition_creation.h +++ b/src/include/partition_creation.h @@ -81,16 +81,6 @@ void drop_pathman_check_constraint(Oid relid); void add_pathman_check_constraint(Oid relid, Constraint *constraint); -/* Update triggers */ -void create_single_update_trigger_internal(Oid partition_relid, - const char *trigname, - List *columns); - -bool has_update_trigger_internal(Oid parent); - -void drop_single_update_trigger_internal(Oid relid, - const char *trigname); - /* Partitioning callback type */ typedef enum { diff --git a/src/include/relation_info.h b/src/include/relation_info.h index cd262532..34d88f0c 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -12,6 +12,8 @@ #define RELATION_INFO_H +#include "utils.h" + #include "postgres.h" #include "access/attnum.h" #include "access/sysattr.h" @@ -89,6 +91,14 @@ FreeBound(Bound *bound, bool byval) pfree(DatumGetPointer(BoundGetValue(bound))); } +static inline char * +BoundToCString(const Bound *bound, Oid value_type) +{ + return IsInfinite(bound) ? + pstrdup("NULL") : + datum_to_cstring(bound->value, value_type); +} + static inline int cmp_bounds(FmgrInfo *cmp_func, const Oid collid, diff --git a/src/include/utils.h b/src/include/utils.h index 8fccded1..b45ed1db 100644 --- a/src/include/utils.h +++ b/src/include/utils.h @@ -12,10 +12,9 @@ #define PATHMAN_UTILS_H -#include "pathman.h" - #include "postgres.h" #include "parser/parse_oper.h" +#include "fmgr.h" /* diff --git a/src/partition_creation.c b/src/partition_creation.c index e1c1f1bb..05a6f508 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -334,16 +334,16 @@ create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type, PG_TRY(); { - const PartRelationInfo *prel; - LockAcquireResult lock_result; /* could we lock the parent? */ - Datum values[Natts_pathman_config]; - bool isnull[Natts_pathman_config]; + LockAcquireResult lock_result; /* could we lock the parent? */ + Datum values[Natts_pathman_config]; + bool isnull[Natts_pathman_config]; /* Get both PartRelationInfo & PATHMAN_CONFIG contents for this relation */ if (pathman_config_contains_relation(relid, values, isnull, NULL, NULL)) { - Oid base_bound_type; /* base type of prel->ev_type */ - Oid base_value_type; /* base type of value_type */ + PartRelationInfo *prel; + Oid base_bound_type; /* base type of prel->ev_type */ + Oid base_value_type; /* base type of value_type */ /* Fetch PartRelationInfo by 'relid' */ prel = get_pathman_relation_info_after_lock(relid, true, &lock_result); @@ -426,6 +426,9 @@ create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type, value, base_value_type, prel->ev_collid); } + + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); } else elog(ERROR, "table \"%s\" is not partitioned", @@ -1356,56 +1359,57 @@ check_range_available(Oid parent_relid, Oid value_type, bool raise_error) { - const PartRelationInfo *prel; - RangeEntry *ranges; - FmgrInfo cmp_func; - uint32 i; + PartRelationInfo *prel; + bool result = true; /* Try fetching the PartRelationInfo structure */ - prel = get_pathman_relation_info(parent_relid); - - /* If there's no prel, return TRUE (overlap is not possible) */ - if (!prel) + if ((prel = get_pathman_relation_info(parent_relid)) != NULL) { - ereport(WARNING, (errmsg("table \"%s\" is not partitioned", - get_rel_name_or_relid(parent_relid)))); - return true; - } + RangeEntry *ranges; + FmgrInfo cmp_func; + uint32 i; - /* Emit an error if it is not partitioned by RANGE */ - shout_if_prel_is_invalid(parent_relid, prel, PT_RANGE); + /* Emit an error if it is not partitioned by RANGE */ + shout_if_prel_is_invalid(parent_relid, prel, PT_RANGE); - /* Fetch comparison function */ - fill_type_cmp_fmgr_info(&cmp_func, - getBaseType(value_type), - getBaseType(prel->ev_type)); + /* Fetch comparison function */ + fill_type_cmp_fmgr_info(&cmp_func, + getBaseType(value_type), + getBaseType(prel->ev_type)); - ranges = PrelGetRangesArray(prel); - for (i = 0; i < PrelChildrenCount(prel); i++) - { - int c1, c2; + ranges = PrelGetRangesArray(prel); + for (i = 0; i < PrelChildrenCount(prel); i++) + { + int c1, c2; - c1 = cmp_bounds(&cmp_func, prel->ev_collid, start, &ranges[i].max); - c2 = cmp_bounds(&cmp_func, prel->ev_collid, end, &ranges[i].min); + c1 = cmp_bounds(&cmp_func, prel->ev_collid, start, &ranges[i].max); + c2 = cmp_bounds(&cmp_func, prel->ev_collid, end, &ranges[i].min); - /* There's something! */ - if (c1 < 0 && c2 > 0) - { - if (raise_error) - elog(ERROR, "specified range [%s, %s) overlaps " - "with existing partitions", - IsInfinite(start) ? - "NULL" : - datum_to_cstring(BoundGetValue(start), value_type), - IsInfinite(end) ? - "NULL" : - datum_to_cstring(BoundGetValue(end), value_type)); - - else return false; + /* There's something! */ + if (c1 < 0 && c2 > 0) + { + if (raise_error) + { + elog(ERROR, "specified range [%s, %s) overlaps " + "with existing partitions", + BoundToCString(start, value_type), + BoundToCString(end, value_type)); + } + /* Too bad, so sad */ + else result = false; + } } + + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); + } + else + { + ereport(WARNING, (errmsg("table \"%s\" is not partitioned", + get_rel_name_or_relid(parent_relid)))); } - return true; + return result; } /* Build HASH check constraint expression tree */ @@ -1669,15 +1673,15 @@ invoke_init_callback_internal(init_callback_params *cb_params) *end_value = NULL; Bound sv_datum = cb_params->params.range_params.start_value, ev_datum = cb_params->params.range_params.end_value; - Oid type = cb_params->params.range_params.value_type; + Oid value_type = cb_params->params.range_params.value_type; /* Convert min to CSTRING */ if (!IsInfinite(&sv_datum)) - start_value = datum_to_cstring(BoundGetValue(&sv_datum), type); + start_value = BoundToCString(&sv_datum, value_type); /* Convert max to CSTRING */ if (!IsInfinite(&ev_datum)) - end_value = datum_to_cstring(BoundGetValue(&ev_datum), type); + end_value = BoundToCString(&ev_datum, value_type); pushJsonbValue(&jsonb_state, WJB_BEGIN_OBJECT, NULL); @@ -1861,42 +1865,3 @@ build_partitioning_expression(Oid parent_relid, return expr; } - -/* - * ------------------------- - * Update triggers management - * ------------------------- - */ - -/* Create trigger for partition */ -void -create_single_update_trigger_internal(Oid partition_relid, - const char *trigname, - List *columns) -{ - CreateTrigStmt *stmt; - List *func; - - func = list_make2(makeString(get_namespace_name(get_pathman_schema())), - makeString(CppAsString(pathman_update_trigger_func))); - - stmt = makeNode(CreateTrigStmt); - stmt->trigname = (char *) trigname; - stmt->relation = makeRangeVarFromRelid(partition_relid); - stmt->funcname = func; - stmt->args = NIL; - stmt->row = true; - stmt->timing = TRIGGER_TYPE_BEFORE; - stmt->events = TRIGGER_TYPE_UPDATE; - stmt->columns = columns; - stmt->whenClause = NULL; - stmt->isconstraint = false; - stmt->deferrable = false; - stmt->initdeferred = false; - stmt->constrrel = NULL; - - (void) CreateTrigger(stmt, NULL, InvalidOid, InvalidOid, - InvalidOid, InvalidOid, false); - - CommandCounterIncrement(); -} diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 07d0cfb3..ef2288f3 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -437,8 +437,8 @@ show_partition_list_internal(PG_FUNCTION_ARGS) if (!IsInfinite(&re->min)) { Datum rmin = CStringGetTextDatum( - datum_to_cstring(BoundGetValue(&re->min), - prel->ev_type)); + BoundToCString(&re->min, + prel->ev_type)); values[Anum_pathman_pl_range_min - 1] = rmin; } @@ -448,8 +448,8 @@ show_partition_list_internal(PG_FUNCTION_ARGS) if (!IsInfinite(&re->max)) { Datum rmax = CStringGetTextDatum( - datum_to_cstring(BoundGetValue(&re->max), - prel->ev_type)); + BoundToCString(&re->max, + prel->ev_type)); values[Anum_pathman_pl_range_max - 1] = rmax; } diff --git a/src/pl_hash_funcs.c b/src/pl_hash_funcs.c index 4f4238f5..f4a44b71 100644 --- a/src/pl_hash_funcs.c +++ b/src/pl_hash_funcs.c @@ -54,7 +54,7 @@ create_hash_partitions_internal(PG_FUNCTION_ARGS) RangeVar **rangevars = NULL; /* Check that there's no partitions yet */ - if (get_pathman_relation_info(parent_relid)) + if (has_pathman_relation_info(parent_relid)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot add new HASH partitions"))); diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 93a78241..007a2937 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -72,7 +72,6 @@ static void modify_range_constraint(Oid partition_relid, Oid expression_type, const Bound *lower, const Bound *upper); -static void drop_table_by_oid(Oid relid); static bool interval_is_trivial(Oid atttype, Datum interval, Oid interval_type); @@ -710,11 +709,13 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) /* Look for the specified partition */ for (j = 0; j < PrelChildrenCount(prel); j++) + { if (ranges[j].child_oid == parts[i]) { rentry_list = lappend(rentry_list, &ranges[j]); break; } + } } /* Check that partitions are adjacent */ @@ -765,67 +766,97 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) /* Drop obsolete partitions */ for (i = 1; i < nparts; i++) - drop_table_by_oid(parts[i]); + { + ObjectAddress object; + + ObjectAddressSet(object, RelationRelationId, parts[i]); + performDeletion(&object, DROP_CASCADE, 0); + } } /* * Drops partition and expands the next partition - * so that it could cover the dropped one + * so that it could cover the dropped one. * - * This function was written in order to support Oracle-like ALTER TABLE ... - * DROP PARTITION. In Oracle partitions only have upper bound and when - * partition is dropped the next one automatically covers freed range + * This function was written in order to support + * Oracle-like ALTER TABLE ... DROP PARTITION. + * + * In Oracle partitions only have upper bound and when partition + * is dropped the next one automatically covers freed range. */ Datum drop_range_partition_expand_next(PG_FUNCTION_ARGS) { - const PartRelationInfo *prel; - Oid relid = PG_GETARG_OID(0), - parent; - RangeEntry *ranges; - int i; + Oid partition = PG_GETARG_OID(0), + parent; + PartRelationInfo *prel; + + /* Lock the partition we're going to drop */ + LockRelationOid(partition, AccessExclusiveLock); + + /* Check if partition exists */ + if (!SearchSysCacheExists1(RELOID, partition)) + elog(ERROR, "relation %u does not exist", partition); /* Get parent's relid */ - parent = get_parent_of_partition(relid); + parent = get_parent_of_partition(partition); if (!OidIsValid(parent)) elog(ERROR, "relation \"%s\" is not a partition", - get_rel_name_or_relid(relid)); + get_rel_name(partition)); - /* Fetch PartRelationInfo and perform some checks */ - prel = get_pathman_relation_info(parent); - shout_if_prel_is_invalid(parent, prel, PT_RANGE); + if ((prel = get_pathman_relation_info(parent)) != NULL) + { + ObjectAddress object; + RangeEntry *ranges; + int i; - /* Fetch ranges array */ - ranges = PrelGetRangesArray(prel); + /* Emit an error if it is not partitioned by RANGE */ + shout_if_prel_is_invalid(parent, prel, PT_RANGE); - /* Looking for partition in child relations */ - for (i = 0; i < PrelChildrenCount(prel); i++) - if (ranges[i].child_oid == relid) - break; + /* Fetch ranges array */ + ranges = PrelGetRangesArray(prel); - /* - * It must be in ranges array because we already - * know that this table is a partition - */ - Assert(i < PrelChildrenCount(prel)); + /* Looking for partition in child relations */ + for (i = 0; i < PrelChildrenCount(prel); i++) + if (ranges[i].child_oid == partition) + break; - /* Expand next partition if it exists */ - if (i < PrelChildrenCount(prel) - 1) - { - RangeEntry *cur = &ranges[i], - *next = &ranges[i + 1]; - - /* Drop old constraint and create a new one */ - modify_range_constraint(next->child_oid, - prel->expr_cstr, - prel->ev_type, - &cur->min, - &next->max); - } + /* Should have found it */ + Assert(i < PrelChildrenCount(prel)); + + /* Expand next partition if it exists */ + if (i < PrelChildrenCount(prel) - 1) + { + RangeEntry *cur = &ranges[i], + *next = &ranges[i + 1]; + Oid next_partition = next->child_oid; + LOCKMODE lockmode = AccessExclusiveLock; + + /* Lock next partition */ + LockRelationOid(next_partition, lockmode); + + /* Does next partition exist? */ + if (SearchSysCacheExists1(RELOID, next_partition)) + { + /* Stretch next partition to cover range */ + modify_range_constraint(next_partition, + prel->expr_cstr, + prel->ev_type, + &cur->min, + &next->max); + } + /* Bad luck, unlock missing partition */ + else UnlockRelationOid(next_partition, lockmode); + } - /* Finally drop this partition */ - drop_table_by_oid(relid); + /* Drop partition */ + ObjectAddressSet(object, RelationRelationId, partition); + performDeletion(&object, DROP_CASCADE, 0); + + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); + } PG_RETURN_VOID(); } @@ -1226,24 +1257,3 @@ check_range_adjacence(Oid cmp_proc, Oid collid, List *ranges) last = cur; } } - -/* - * Drop table using it's Oid - */ -static void -drop_table_by_oid(Oid relid) -{ - DropStmt *n = makeNode(DropStmt); - const char *relname = get_qualified_rel_name(relid); - - n->removeType = OBJECT_TABLE; - n->missing_ok = false; - n->objects = list_make1(stringToQualifiedNameList(relname)); -#if PG_VERSION_NUM < 100000 - n->arguments = NIL; -#endif - n->behavior = DROP_RESTRICT; /* default behavior */ - n->concurrent = false; - - RemoveRelations(n); -} diff --git a/src/relation_info.c b/src/relation_info.c index df348914..44635ebf 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -193,6 +193,9 @@ invalidate_pathman_status_info(PartStatusInfo *psin) void close_pathman_relation_info(PartRelationInfo *prel) { + /* Check that refcount is valid */ + Assert(PrelReferenceCount(prel) > 0); + PrelReferenceCount(prel) -= 1; } @@ -977,7 +980,7 @@ get_parent_of_partition(Oid partition) /* Nice, we have a cached entry */ if (ppar) { - return ppar->child_relid; + return ppar->parent_relid; } /* Bad luck, let's search in catalog */ else @@ -1115,7 +1118,7 @@ cook_partitioning_expression(const Oid relid, */ parse_mcxt = AllocSetContextCreate(CurrentMemoryContext, CppAsString(cook_partitioning_expression), - ALLOCSET_DEFAULT_SIZES); + ALLOCSET_SMALL_SIZES); /* Switch to mcxt for cooking :) */ old_mcxt = MemoryContextSwitchTo(parse_mcxt); diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 60581ed9..1f376c20 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -26,6 +26,7 @@ #include "commands/tablecmds.h" #include "foreign/fdwapi.h" #include "miscadmin.h" +#include "nodes/makefuncs.h" #include "utils/builtins.h" #include "utils/lsyscache.h" #include "utils/memutils.h" diff --git a/src/utils.c b/src/utils.c index ba4a754f..da0d314b 100644 --- a/src/utils.c +++ b/src/utils.c @@ -10,6 +10,7 @@ * ------------------------------------------------------------------------ */ +#include "pathman.h" #include "utils.h" #include "access/htup_details.h" From f81fcf7211dddcf80928a01ea56a827f4548ab28 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 14 Nov 2017 19:48:19 +0300 Subject: [PATCH 194/528] WIP simplified find_deepest_partition(), make use of has_pathman_relation_info() --- src/planner_tree_modification.c | 184 ++++++++++++++------------------ 1 file changed, 78 insertions(+), 106 deletions(-) diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 9e6d64e1..7b465dee 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -98,15 +98,6 @@ typedef struct } transform_query_cxt; - -typedef enum -{ - FP_FOUND, /* Found partition */ - FP_PLAIN_TABLE, /* Table isn't partitioned by pg_pathman */ - FP_NON_SINGULAR_RESULT /* Multiple or no partitions */ -} FindPartitionResult; - - static bool pathman_transform_query_walker(Node *node, void *context); static void disable_standard_inheritance(Query *parse, transform_query_cxt *context); @@ -117,8 +108,7 @@ static void partition_router_visitor(Plan *plan, void *context); static rel_parenthood_status tag_extract_parenthood_status(List *relation_tag); -static FindPartitionResult find_deepest_partition(Oid relid, Index idx, - Expr *quals, Oid *partition); +static Oid find_deepest_partition(Oid relid, Index rti, Expr *quals); static Node *eval_extern_params_mutator(Node *node, ParamListInfo params); static bool modifytable_contains_fdw(List *rtable, ModifyTable *node); @@ -283,8 +273,8 @@ pathman_transform_query_walker(Node *node, void *context) default: break; } - next_context.parent_sublink = NULL; - next_context.parent_cte = NULL; + next_context.parent_sublink = NULL; + next_context.parent_cte = NULL; /* Assign Query a 'queryId' */ assign_query_id(query); @@ -350,7 +340,7 @@ disable_standard_inheritance(Query *parse, transform_query_cxt *context) /* Table may be partitioned */ if (rte->inh) { - const PartRelationInfo *prel; + PartRelationInfo *prel; #ifdef LEGACY_ROWMARKS_95 /* Don't process queries with RowMarks on 9.5 */ @@ -361,15 +351,15 @@ disable_standard_inheritance(Query *parse, transform_query_cxt *context) /* Proceed if table is partitioned by pg_pathman */ if ((prel = get_pathman_relation_info(rte->relid)) != NULL) { - /* - * HACK: unset the 'inh' flag to disable standard - * planning. We'll set it again later. - */ + /* HACK: unset the 'inh' flag to disable standard planning */ rte->inh = false; /* Try marking it using PARENTHOOD_ALLOWED */ assign_rel_parenthood_status(parse->queryId, rte, PARENTHOOD_ALLOWED); + + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); } } /* Else try marking it using PARENTHOOD_DISALLOWED */ @@ -382,15 +372,11 @@ disable_standard_inheritance(Query *parse, transform_query_cxt *context) static void handle_modification_query(Query *parse, transform_query_cxt *context) { - RangeTblEntry *rte; - Expr *quals; - Index result_rel; - Oid child; - FindPartitionResult fp_result; - ParamListInfo params; - - /* Fetch index of result relation */ - result_rel = parse->resultRelation; + RangeTblEntry *rte; + Expr *quals; + Oid child; + Index result_rel = parse->resultRelation; + ParamListInfo params = context->query_params; /* Exit if it's not a DELETE or UPDATE query */ if (result_rel == 0 || (parse->commandType != CMD_UPDATE && @@ -400,24 +386,20 @@ handle_modification_query(Query *parse, transform_query_cxt *context) rte = rt_fetch(result_rel, parse->rtable); /* Exit if it's DELETE FROM ONLY table */ - if (!rte->inh) return; + if (!rte->inh) + return; quals = (Expr *) eval_const_expressions(NULL, parse->jointree->quals); - params = context->query_params; - /* Check if we can replace PARAMs with CONSTs */ if (params && clause_contains_params((Node *) quals)) quals = (Expr *) eval_extern_params_mutator((Node *) quals, params); - /* Parse syntax tree and extract deepest partition */ - fp_result = find_deepest_partition(rte->relid, result_rel, quals, &child); + /* Parse syntax tree and extract deepest partition if possible */ + child = find_deepest_partition(rte->relid, result_rel, quals); - /* - * If only one partition is affected, - * substitute parent table with partition. - */ - if (fp_result == FP_FOUND) + /* Substitute parent table with partition */ + if (OidIsValid(child)) { Relation child_rel, parent_rel; @@ -450,7 +432,7 @@ handle_modification_query(Query *parse, transform_query_cxt *context) } /* Both tables are already locked */ - child_rel = heap_open(child, NoLock); + child_rel = heap_open(child, NoLock); parent_rel = heap_open(parent, NoLock); /* Build a conversion map (may be trivial, i.e. NULL) */ @@ -459,15 +441,15 @@ handle_modification_query(Query *parse, transform_query_cxt *context) free_conversion_map((TupleConversionMap *) tuple_map); /* Close relations (should remain locked, though) */ - heap_close(child_rel, NoLock); + heap_close(child_rel, NoLock); heap_close(parent_rel, NoLock); - /* Exit if tuple map was NOT trivial */ - if (tuple_map) /* just checking the pointer! */ + /* Exit if tuple map WAS NOT trivial */ + if (tuple_map) return; /* Update RTE's relid and relkind (for FDW) */ - rte->relid = child; + rte->relid = child; rte->relkind = child_relkind; /* HACK: unset the 'inh' flag (no children) */ @@ -521,12 +503,11 @@ partition_filter_visitor(Plan *plan, void *context) lc3 = list_head(modify_table->returningLists); forboth (lc1, modify_table->plans, lc2, modify_table->resultRelations) { - Index rindex = lfirst_int(lc2); - Oid relid = getrelid(rindex, rtable); - const PartRelationInfo *prel = get_pathman_relation_info(relid); + Index rindex = lfirst_int(lc2); + Oid relid = getrelid(rindex, rtable); /* Check that table is partitioned */ - if (prel) + if (has_pathman_relation_info(relid)) { List *returning_list = NIL; @@ -578,19 +559,16 @@ partition_router_visitor(Plan *plan, void *context) lc3 = list_head(modify_table->returningLists); forboth (lc1, modify_table->plans, lc2, modify_table->resultRelations) { - Index rindex = lfirst_int(lc2); - Oid tmp_relid, - relid = getrelid(rindex, rtable); - const PartRelationInfo *prel; + Index rindex = lfirst_int(lc2); + Oid relid = getrelid(rindex, rtable), + tmp_relid; /* Find topmost parent */ while (OidIsValid(tmp_relid = get_parent_of_partition(relid))) relid = tmp_relid; /* Check that table is partitioned */ - prel = get_pathman_relation_info(relid); - - if (prel) + if (has_pathman_relation_info(relid)) { List *returning_list = NIL; @@ -694,75 +672,69 @@ modifytable_contains_fdw(List *rtable, ModifyTable *node) } /* - * Find a single deepest subpartition. - * Return InvalidOid if that's impossible. + * Find a single deepest subpartition using quals. + * Return InvalidOid if it's not possible. */ -static FindPartitionResult -find_deepest_partition(Oid relid, Index idx, Expr *quals, Oid *partition) +static Oid +find_deepest_partition(Oid relid, Index rti, Expr *quals) { - const PartRelationInfo *prel; - Node *prel_expr; - WalkerContext context; - List *ranges; - WrapperNode *wrap; - - prel = get_pathman_relation_info(relid); - - /* Exit if it's not partitioned */ - if (!prel) - return FP_PLAIN_TABLE; - - /* Exit if we must include parent */ - if (prel->enable_parent) - return FP_NON_SINGULAR_RESULT; + PartRelationInfo *prel; + Oid result = InvalidOid; /* Exit if there's no quals (no use) */ if (!quals) - return FP_NON_SINGULAR_RESULT; + return result; - /* Prepare partitioning expression */ - prel_expr = PrelExpressionForRelid(prel, idx); + /* Try pruning if table is partitioned */ + if ((prel = get_pathman_relation_info(relid)) != NULL) + { + Node *prel_expr; + WalkerContext context; + List *ranges; + WrapperNode *wrap; - ranges = list_make1_irange_full(prel, IR_COMPLETE); + /* Prepare partitioning expression */ + prel_expr = PrelExpressionForRelid(prel, rti); - /* Parse syntax tree and extract partition ranges */ - InitWalkerContext(&context, prel_expr, prel, NULL); - wrap = walk_expr_tree(quals, &context); - ranges = irange_list_intersection(ranges, wrap->rangeset); + /* First we select all available partitions... */ + ranges = list_make1_irange_full(prel, IR_COMPLETE); - if (irange_list_length(ranges) == 1) - { - IndexRange irange = linitial_irange(ranges); + /* Parse syntax tree and extract partition ranges */ + InitWalkerContext(&context, prel_expr, prel, NULL); + wrap = walk_expr_tree(quals, &context); + ranges = irange_list_intersection(ranges, wrap->rangeset); - if (irange_lower(irange) == irange_upper(irange)) + switch (irange_list_length(ranges)) { - Oid *children = PrelGetChildrenArray(prel), - child = children[irange_lower(irange)], - subpartition; - FindPartitionResult result; - - /* Try to go deeper and see if there is subpartition */ - result = find_deepest_partition(child, - idx, - quals, - &subpartition); - switch(result) - { - case FP_FOUND: - *partition = subpartition; - return FP_FOUND; + /* Scan only parent (don't do constraint elimination) */ + case 0: + result = relid; + break; - case FP_PLAIN_TABLE: - *partition = child; - return FP_FOUND; + /* Handle the remaining partition */ + case 1: + if (!prel->enable_parent) + { + IndexRange irange = linitial_irange(ranges); + Oid *children = PrelGetChildrenArray(prel), + child = children[irange_lower(irange)]; - case FP_NON_SINGULAR_RESULT: - return FP_NON_SINGULAR_RESULT; - } + /* Try to go deeper and see if there are subpartitions */ + result = find_deepest_partition(child, rti, quals); + } + break; + + default: + break; } + + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); } + /* Otherwise, return this table */ + else result = relid; - return FP_NON_SINGULAR_RESULT; + return result; } /* Replace extern param nodes with consts */ From d383d71dbb551b892084f095351daf37098841a6 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 17 Nov 2017 18:07:40 +0300 Subject: [PATCH 195/528] WIP place some more close_pathman_relation_info() --- src/hooks.c | 398 ++++++++++++++++---------------- src/include/relation_info.h | 3 - src/partition_creation.c | 16 +- src/pathman_workers.c | 16 +- src/pl_funcs.c | 33 +-- src/pl_range_funcs.c | 203 ++++++++-------- src/planner_tree_modification.c | 7 +- src/relation_info.c | 23 -- src/utility_stmt_hooking.c | 24 +- 9 files changed, 363 insertions(+), 360 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index e8a882d9..e9b894c7 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -84,7 +84,7 @@ pathman_join_pathlist_hook(PlannerInfo *root, JoinCostWorkspace workspace; JoinType saved_jointype = jointype; RangeTblEntry *inner_rte = root->simple_rte_array[innerrel->relid]; - const PartRelationInfo *inner_prel; + PartRelationInfo *inner_prel; List *joinclauses, *otherclauses; WalkerContext context; @@ -109,8 +109,10 @@ pathman_join_pathlist_hook(PlannerInfo *root, if (inner_rte->inh) return; - /* We can't handle full or right outer joins */ - if (jointype == JOIN_FULL || jointype == JOIN_RIGHT) + /* We don't support these join types (since inner will be parameterized) */ + if (jointype == JOIN_FULL || + jointype == JOIN_RIGHT || + jointype == JOIN_UNIQUE_INNER) return; /* Skip if inner table is not allowed to act as parent (e.g. FROM ONLY) */ @@ -157,12 +159,9 @@ pathman_join_pathlist_hook(PlannerInfo *root, "of partitioned tables are not supported"))); } - /* - * These codes are used internally in the planner, but are not supported - * by the executor (nor, indeed, by most of the planner). - */ + /* Replace virtual join types with a real one */ if (jointype == JOIN_UNIQUE_OUTER || jointype == JOIN_UNIQUE_INNER) - jointype = JOIN_INNER; /* replace with a proper value */ + jointype = JOIN_INNER; /* Extract join clauses which will separate partitions */ if (IS_OUTER_JOIN(extra->sjinfo->jointype)) @@ -222,11 +221,6 @@ pathman_join_pathlist_hook(PlannerInfo *root, Assert(outer); } - /* No way to do this in a parameterized inner path */ - if (saved_jointype == JOIN_UNIQUE_INNER) - return; - - /* Make inner path depend on outerrel's columns */ required_inner = bms_union(PATH_REQ_OUTER((Path *) cur_inner_path), outerrel->relids); @@ -245,10 +239,10 @@ pathman_join_pathlist_hook(PlannerInfo *root, innerrel->relid))) continue; + /* Try building RuntimeAppend path, skip if it's not possible */ inner = create_runtimeappend_path(root, cur_inner_path, ppi, paramsel); if (!inner) - return; /* could not build it, retreat! */ - + continue; required_nestloop = calc_nestloop_required_outer_compat(outer, inner); @@ -263,7 +257,7 @@ pathman_join_pathlist_hook(PlannerInfo *root, ((!bms_overlap(required_nestloop, extra->param_source_rels) && !allow_star_schema_join(root, outer, inner)) || have_dangerous_phv(root, outer->parent->relids, required_inner))) - return; + continue; initial_cost_nestloop_compat(root, &workspace, jointype, outer, inner, extra); @@ -299,6 +293,9 @@ pathman_join_pathlist_hook(PlannerInfo *root, /* Finally we can add the new NestLoop path */ add_path(joinrel, (Path *) nest_path); } + + /* Don't forget to close 'inner_prel'! */ + close_pathman_relation_info(inner_prel); } /* Cope with simple relations */ @@ -308,8 +305,21 @@ pathman_rel_pathlist_hook(PlannerInfo *root, Index rti, RangeTblEntry *rte) { - const PartRelationInfo *prel; - int irange_len; + PartRelationInfo *prel; + Relation parent_rel; /* parent's relation (heap) */ + PlanRowMark *parent_rowmark; /* parent's rowmark */ + Oid *children; /* selected children oids */ + List *ranges, /* a list of IndexRanges */ + *wrappers; /* a list of WrapperNodes */ + PathKey *pathkeyAsc = NULL, + *pathkeyDesc = NULL; + double paramsel = 1.0; /* default part selectivity */ + WalkerContext context; + Node *part_expr; + List *part_clauses; + ListCell *lc; + int irange_len, + i; /* Invoke original hook if needed */ if (set_rel_pathlist_hook_next != NULL) @@ -344,231 +354,221 @@ pathman_rel_pathlist_hook(PlannerInfo *root, return; /* Proceed iff relation 'rel' is partitioned */ - if ((prel = get_pathman_relation_info(rte->relid)) != NULL) - { - Relation parent_rel; /* parent's relation (heap) */ - PlanRowMark *parent_rowmark; /* parent's rowmark */ - Oid *children; /* selected children oids */ - List *ranges, /* a list of IndexRanges */ - *wrappers; /* a list of WrapperNodes */ - PathKey *pathkeyAsc = NULL, - *pathkeyDesc = NULL; - double paramsel = 1.0; /* default part selectivity */ - WalkerContext context; - Node *part_expr; - List *part_clauses; - ListCell *lc; - int i; + if ((prel = get_pathman_relation_info(rte->relid)) == NULL) + return; - /* - * Check that this child is not the parent table itself. - * This is exactly how standard inheritance works. - * - * Helps with queries like this one: - * - * UPDATE test.tmp t SET value = 2 - * WHERE t.id IN (SELECT id - * FROM test.tmp2 t2 - * WHERE id = t.id); - * - * Since we disable optimizations on 9.5, we - * have to skip parent table that has already - * been expanded by standard inheritance. - */ - if (rel->reloptkind == RELOPT_OTHER_MEMBER_REL) + /* + * Check that this child is not the parent table itself. + * This is exactly how standard inheritance works. + * + * Helps with queries like this one: + * + * UPDATE test.tmp t SET value = 2 + * WHERE t.id IN (SELECT id + * FROM test.tmp2 t2 + * WHERE id = t.id); + * + * Since we disable optimizations on 9.5, we + * have to skip parent table that has already + * been expanded by standard inheritance. + */ + if (rel->reloptkind == RELOPT_OTHER_MEMBER_REL) + { + foreach (lc, root->append_rel_list) { - foreach (lc, root->append_rel_list) - { - AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(lc); - RangeTblEntry *cur_parent_rte, - *cur_child_rte; + AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(lc); + RangeTblEntry *cur_parent_rte, + *cur_child_rte; - /* This 'appinfo' is not for this child */ - if (appinfo->child_relid != rti) - continue; + /* This 'appinfo' is not for this child */ + if (appinfo->child_relid != rti) + continue; - cur_parent_rte = root->simple_rte_array[appinfo->parent_relid]; - cur_child_rte = rte; /* we already have it, saves time */ + cur_parent_rte = root->simple_rte_array[appinfo->parent_relid]; + cur_child_rte = rte; /* we already have it, saves time */ - /* This child == its own parent table! */ - if (cur_parent_rte->relid == cur_child_rte->relid) - return; - } + /* This child == its own parent table! */ + if (cur_parent_rte->relid == cur_child_rte->relid) + goto cleanup; } + } - /* Make copy of partitioning expression and fix Var's varno attributes */ - part_expr = PrelExpressionForRelid(prel, rti); + /* Make copy of partitioning expression and fix Var's varno attributes */ + part_expr = PrelExpressionForRelid(prel, rti); - /* Get partitioning-related clauses (do this before append_child_relation()) */ - part_clauses = get_partitioning_clauses(rel->baserestrictinfo, prel, rti); + /* Get partitioning-related clauses (do this before append_child_relation()) */ + part_clauses = get_partitioning_clauses(rel->baserestrictinfo, prel, rti); - if (prel->parttype == PT_RANGE) - { - /* - * Get pathkeys for ascending and descending sort by partitioned column. - */ - List *pathkeys; - TypeCacheEntry *tce; - - /* Determine operator type */ - tce = lookup_type_cache(prel->ev_type, TYPECACHE_LT_OPR | TYPECACHE_GT_OPR); - - /* Make pathkeys */ - pathkeys = build_expression_pathkey(root, (Expr *) part_expr, NULL, - tce->lt_opr, NULL, false); - if (pathkeys) - pathkeyAsc = (PathKey *) linitial(pathkeys); - pathkeys = build_expression_pathkey(root, (Expr *) part_expr, NULL, - tce->gt_opr, NULL, false); - if (pathkeys) - pathkeyDesc = (PathKey *) linitial(pathkeys); - } + if (prel->parttype == PT_RANGE) + { + /* + * Get pathkeys for ascending and descending sort by partitioned column. + */ + List *pathkeys; + TypeCacheEntry *tce; + + /* Determine operator type */ + tce = lookup_type_cache(prel->ev_type, TYPECACHE_LT_OPR | TYPECACHE_GT_OPR); + + /* Make pathkeys */ + pathkeys = build_expression_pathkey(root, (Expr *) part_expr, NULL, + tce->lt_opr, NULL, false); + if (pathkeys) + pathkeyAsc = (PathKey *) linitial(pathkeys); + pathkeys = build_expression_pathkey(root, (Expr *) part_expr, NULL, + tce->gt_opr, NULL, false); + if (pathkeys) + pathkeyDesc = (PathKey *) linitial(pathkeys); + } - /* mark as partitioned table */ - MarkPartitionedRTE(rti); + /* mark as partitioned table */ + MarkPartitionedRTE(rti); - children = PrelGetChildrenArray(prel); - ranges = list_make1_irange_full(prel, IR_COMPLETE); + children = PrelGetChildrenArray(prel); + ranges = list_make1_irange_full(prel, IR_COMPLETE); - /* Make wrappers over restrictions and collect final rangeset */ - InitWalkerContext(&context, part_expr, prel, NULL); - wrappers = NIL; - foreach(lc, rel->baserestrictinfo) - { - WrapperNode *wrap; - RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc); + /* Make wrappers over restrictions and collect final rangeset */ + InitWalkerContext(&context, part_expr, prel, NULL); + wrappers = NIL; + foreach(lc, rel->baserestrictinfo) + { + WrapperNode *wrap; + RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc); - wrap = walk_expr_tree(rinfo->clause, &context); + wrap = walk_expr_tree(rinfo->clause, &context); - paramsel *= wrap->paramsel; - wrappers = lappend(wrappers, wrap); - ranges = irange_list_intersection(ranges, wrap->rangeset); - } + paramsel *= wrap->paramsel; + wrappers = lappend(wrappers, wrap); + ranges = irange_list_intersection(ranges, wrap->rangeset); + } - /* Get number of selected partitions */ - irange_len = irange_list_length(ranges); - if (prel->enable_parent) - irange_len++; /* also add parent */ + /* Get number of selected partitions */ + irange_len = irange_list_length(ranges); + if (prel->enable_parent) + irange_len++; /* also add parent */ - /* Expand simple_rte_array and simple_rel_array */ - if (irange_len > 0) - { - int current_len = root->simple_rel_array_size, - new_len = current_len + irange_len; + /* Expand simple_rte_array and simple_rel_array */ + if (irange_len > 0) + { + int current_len = root->simple_rel_array_size, + new_len = current_len + irange_len; - /* Expand simple_rel_array */ - root->simple_rel_array = (RelOptInfo **) - repalloc(root->simple_rel_array, - new_len * sizeof(RelOptInfo *)); + /* Expand simple_rel_array */ + root->simple_rel_array = (RelOptInfo **) + repalloc(root->simple_rel_array, + new_len * sizeof(RelOptInfo *)); - memset((void *) &root->simple_rel_array[current_len], 0, - irange_len * sizeof(RelOptInfo *)); + memset((void *) &root->simple_rel_array[current_len], 0, + irange_len * sizeof(RelOptInfo *)); - /* Expand simple_rte_array */ - root->simple_rte_array = (RangeTblEntry **) - repalloc(root->simple_rte_array, - new_len * sizeof(RangeTblEntry *)); + /* Expand simple_rte_array */ + root->simple_rte_array = (RangeTblEntry **) + repalloc(root->simple_rte_array, + new_len * sizeof(RangeTblEntry *)); - memset((void *) &root->simple_rte_array[current_len], 0, - irange_len * sizeof(RangeTblEntry *)); + memset((void *) &root->simple_rte_array[current_len], 0, + irange_len * sizeof(RangeTblEntry *)); - /* Don't forget to update array size! */ - root->simple_rel_array_size = new_len; - } + /* Don't forget to update array size! */ + root->simple_rel_array_size = new_len; + } - /* Parent has already been locked by rewriter */ - parent_rel = heap_open(rte->relid, NoLock); + /* Parent has already been locked by rewriter */ + parent_rel = heap_open(rte->relid, NoLock); - parent_rowmark = get_plan_rowmark(root->rowMarks, rti); + parent_rowmark = get_plan_rowmark(root->rowMarks, rti); - /* - * WARNING: 'prel' might become invalid after append_child_relation(). - */ + /* + * WARNING: 'prel' might become invalid after append_child_relation(). + */ - /* Add parent if asked to */ - if (prel->enable_parent) - append_child_relation(root, parent_rel, parent_rowmark, - rti, 0, rte->relid, NULL); + /* Add parent if asked to */ + if (prel->enable_parent) + append_child_relation(root, parent_rel, parent_rowmark, + rti, 0, rte->relid, NULL); - /* Iterate all indexes in rangeset and append child relations */ - foreach(lc, ranges) - { - IndexRange irange = lfirst_irange(lc); + /* Iterate all indexes in rangeset and append child relations */ + foreach(lc, ranges) + { + IndexRange irange = lfirst_irange(lc); - for (i = irange_lower(irange); i <= irange_upper(irange); i++) - append_child_relation(root, parent_rel, parent_rowmark, - rti, i, children[i], wrappers); - } + for (i = irange_lower(irange); i <= irange_upper(irange); i++) + append_child_relation(root, parent_rel, parent_rowmark, + rti, i, children[i], wrappers); + } - /* Now close parent relation */ - heap_close(parent_rel, NoLock); + /* Now close parent relation */ + heap_close(parent_rel, NoLock); - /* Clear path list and make it point to NIL */ - list_free_deep(rel->pathlist); - rel->pathlist = NIL; + /* Clear path list and make it point to NIL */ + list_free_deep(rel->pathlist); + rel->pathlist = NIL; #if PG_VERSION_NUM >= 90600 - /* Clear old partial path list */ - list_free(rel->partial_pathlist); - rel->partial_pathlist = NIL; + /* Clear old partial path list */ + list_free(rel->partial_pathlist); + rel->partial_pathlist = NIL; #endif - /* Generate new paths using the rels we've just added */ - set_append_rel_pathlist(root, rel, rti, pathkeyAsc, pathkeyDesc); - set_append_rel_size_compat(root, rel, rti); + /* Generate new paths using the rels we've just added */ + set_append_rel_pathlist(root, rel, rti, pathkeyAsc, pathkeyDesc); + set_append_rel_size_compat(root, rel, rti); #if PG_VERSION_NUM >= 90600 - /* consider gathering partial paths for the parent appendrel */ - generate_gather_paths(root, rel); + /* consider gathering partial paths for the parent appendrel */ + generate_gather_paths(root, rel); #endif - /* No need to go further (both nodes are disabled), return */ - if (!(pg_pathman_enable_runtimeappend || - pg_pathman_enable_runtime_merge_append)) - return; + /* Skip if both custom nodes are disabled */ + if (!(pg_pathman_enable_runtimeappend || + pg_pathman_enable_runtime_merge_append)) + goto cleanup; - /* Skip if there's no PARAMs in partitioning-related clauses */ - if (!clause_contains_params((Node *) part_clauses)) - return; + /* Skip if there's no PARAMs in partitioning-related clauses */ + if (!clause_contains_params((Node *) part_clauses)) + goto cleanup; - /* Generate Runtime[Merge]Append paths if needed */ - foreach (lc, rel->pathlist) + /* Generate Runtime[Merge]Append paths if needed */ + foreach (lc, rel->pathlist) + { + AppendPath *cur_path = (AppendPath *) lfirst(lc); + Relids inner_required = PATH_REQ_OUTER((Path *) cur_path); + Path *inner_path = NULL; + ParamPathInfo *ppi; + + /* Skip if rel contains some join-related stuff or path type mismatched */ + if (!(IsA(cur_path, AppendPath) || IsA(cur_path, MergeAppendPath)) || + rel->has_eclass_joins || rel->joininfo) { - AppendPath *cur_path = (AppendPath *) lfirst(lc); - Relids inner_required = PATH_REQ_OUTER((Path *) cur_path); - Path *inner_path = NULL; - ParamPathInfo *ppi; - - /* Skip if rel contains some join-related stuff or path type mismatched */ - if (!(IsA(cur_path, AppendPath) || IsA(cur_path, MergeAppendPath)) || - rel->has_eclass_joins || rel->joininfo) - { - continue; - } - - /* Get existing parameterization */ - ppi = get_appendrel_parampathinfo(rel, inner_required); + continue; + } - if (IsA(cur_path, AppendPath) && pg_pathman_enable_runtimeappend) - inner_path = create_runtimeappend_path(root, cur_path, - ppi, paramsel); - else if (IsA(cur_path, MergeAppendPath) && - pg_pathman_enable_runtime_merge_append) - { - /* Check struct layout compatibility */ - if (offsetof(AppendPath, subpaths) != - offsetof(MergeAppendPath, subpaths)) - elog(FATAL, "Struct layouts of AppendPath and " - "MergeAppendPath differ"); - - inner_path = create_runtimemergeappend_path(root, cur_path, - ppi, paramsel); - } + /* Get existing parameterization */ + ppi = get_appendrel_parampathinfo(rel, inner_required); - if (inner_path) - add_path(rel, inner_path); + if (IsA(cur_path, AppendPath) && pg_pathman_enable_runtimeappend) + inner_path = create_runtimeappend_path(root, cur_path, + ppi, paramsel); + else if (IsA(cur_path, MergeAppendPath) && + pg_pathman_enable_runtime_merge_append) + { + /* Check struct layout compatibility */ + if (offsetof(AppendPath, subpaths) != + offsetof(MergeAppendPath, subpaths)) + elog(FATAL, "Struct layouts of AppendPath and " + "MergeAppendPath differ"); + + inner_path = create_runtimemergeappend_path(root, cur_path, + ppi, paramsel); } + + if (inner_path) + add_path(rel, inner_path); } + +cleanup: + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); } /* diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 34d88f0c..f3796d28 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -323,9 +323,6 @@ void invalidate_pathman_relation_info_cache(void); void close_pathman_relation_info(PartRelationInfo *prel); bool has_pathman_relation_info(Oid relid); PartRelationInfo *get_pathman_relation_info(Oid relid); -PartRelationInfo *get_pathman_relation_info_after_lock(Oid relid, - bool unlock_if_not_found, - LockAcquireResult *lock_result); void shout_if_prel_is_invalid(const Oid parent_oid, const PartRelationInfo *prel, diff --git a/src/partition_creation.c b/src/partition_creation.c index 05a6f508..32ad269b 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -334,7 +334,6 @@ create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type, PG_TRY(); { - LockAcquireResult lock_result; /* could we lock the parent? */ Datum values[Natts_pathman_config]; bool isnull[Natts_pathman_config]; @@ -342,18 +341,29 @@ create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type, if (pathman_config_contains_relation(relid, values, isnull, NULL, NULL)) { PartRelationInfo *prel; + LockAcquireResult lock_result; /* could we lock the parent? */ Oid base_bound_type; /* base type of prel->ev_type */ Oid base_value_type; /* base type of value_type */ + /* Prevent modifications of partitioning scheme */ + lock_result = xact_lock_rel(relid, ShareUpdateExclusiveLock, false); + /* Fetch PartRelationInfo by 'relid' */ - prel = get_pathman_relation_info_after_lock(relid, true, &lock_result); + prel = get_pathman_relation_info(relid); shout_if_prel_is_invalid(relid, prel, PT_RANGE); /* Fetch base types of prel->ev_type & value_type */ base_bound_type = getBaseType(prel->ev_type); base_value_type = getBaseType(value_type); - /* Search for a suitable partition if we didn't hold it */ + /* + * Search for a suitable partition if we didn't hold it, + * since somebody might have just created it for us. + * + * If the table is locked, it means that we've + * already failed to find a suitable partition + * and called this function to do the job. + */ Assert(lock_result != LOCKACQUIRE_NOT_AVAIL); if (lock_result == LOCKACQUIRE_OK) { diff --git a/src/pathman_workers.c b/src/pathman_workers.c index 2db579b3..c48451d9 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -691,6 +691,7 @@ partition_table_concurrently(PG_FUNCTION_ARGS) int empty_slot_idx = -1, /* do we have a slot for BGWorker? */ i; TransactionId rel_xmin; + LOCKMODE lockmode = ShareUpdateExclusiveLock; /* Check batch_size */ if (batch_size < 1 || batch_size > 10000) @@ -703,12 +704,12 @@ partition_table_concurrently(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("'sleep_time' should not be less than 0.5"))); + /* Prevent concurrent function calls */ + LockRelationOid(relid, lockmode); + /* Check if relation is a partitioned table */ - shout_if_prel_is_invalid(relid, - /* We also lock the parent relation */ - get_pathman_relation_info_after_lock(relid, true, NULL), - /* Partitioning type does not matter here */ - PT_ANY); + if (!has_pathman_relation_info(relid)) + shout_if_prel_is_invalid(relid, NULL, PT_ANY); /* Check that partitioning operation result is visible */ if (pathman_config_contains_relation(relid, NULL, NULL, &rel_xmin, NULL)) @@ -723,7 +724,7 @@ partition_table_concurrently(PG_FUNCTION_ARGS) /* * Look for an empty slot and also check that a concurrent - * partitioning operation for this table hasn't been started yet + * partitioning operation for this table hasn't started yet. */ for (i = 0; i < PART_WORKER_SLOTS; i++) { @@ -797,6 +798,9 @@ partition_table_concurrently(PG_FUNCTION_ARGS) CppAsString(stop_concurrent_part_task), get_rel_name(relid)); + /* We don't need this lock anymore */ + UnlockRelationOid(relid, lockmode); + PG_RETURN_VOID(); } diff --git a/src/pl_funcs.c b/src/pl_funcs.c index ef2288f3..ef68c11e 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -75,22 +75,22 @@ PG_FUNCTION_INFO_V1( pathman_version ); /* User context for function show_partition_list_internal() */ typedef struct { - Relation pathman_config; - HeapScanDesc pathman_config_scan; - Snapshot snapshot; + Relation pathman_config; + HeapScanDesc pathman_config_scan; + Snapshot snapshot; - const PartRelationInfo *current_prel; /* selected PartRelationInfo */ + PartRelationInfo *current_prel; /* selected PartRelationInfo */ - Size child_number; /* child we're looking at */ - SPITupleTable *tuptable; /* buffer for tuples */ + Size child_number; /* child we're looking at */ + SPITupleTable *tuptable; /* buffer for tuples */ } show_partition_list_cxt; /* User context for function show_pathman_cache_stats_internal() */ typedef struct { - MemoryContext pathman_contexts[PATHMAN_MCXT_COUNT]; - HTAB *pathman_htables[PATHMAN_MCXT_COUNT]; - int current_item; + MemoryContext pathman_contexts[PATHMAN_MCXT_COUNT]; + HTAB *pathman_htables[PATHMAN_MCXT_COUNT]; + int current_item; } show_cache_stats_cxt; /* @@ -362,10 +362,10 @@ show_partition_list_internal(PG_FUNCTION_ARGS) /* Iterate through pathman cache */ for (;;) { - const PartRelationInfo *prel; - HeapTuple htup; - Datum values[Natts_pathman_partition_list]; - bool isnull[Natts_pathman_partition_list] = { 0 }; + HeapTuple htup; + Datum values[Natts_pathman_partition_list]; + bool isnull[Natts_pathman_partition_list] = { 0 }; + PartRelationInfo *prel; /* Fetch next PartRelationInfo if needed */ if (usercxt->current_prel == NULL) @@ -401,6 +401,9 @@ show_partition_list_internal(PG_FUNCTION_ARGS) /* If we've run out of partitions, switch to the next 'prel' */ if (usercxt->child_number >= PrelChildrenCount(prel)) { + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); + usercxt->current_prel = NULL; usercxt->child_number = 0; @@ -787,13 +790,13 @@ add_to_pathman_config(PG_FUNCTION_ARGS) { pfree(children); - /* Now try to create a PartRelationInfo */ PG_TRY(); { /* Some flags might change during refresh attempt */ save_pathman_init_state(&init_state); - get_pathman_relation_info(relid); + /* Now try to create a PartRelationInfo */ + has_pathman_relation_info(relid); } PG_CATCH(); { diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 007a2937..0e40dcb8 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -56,22 +56,29 @@ PG_FUNCTION_INFO_V1( drop_range_partition_expand_next ); PG_FUNCTION_INFO_V1( validate_interval_value ); -static char *deparse_constraint(Oid relid, Node *expr); -static ArrayType *construct_infinitable_array(Bound *elems, - int nelems, - Oid elmtype, - int elmlen, - bool elmbyval, - char elmalign); -static void check_range_adjacence(Oid cmp_proc, Oid collid, List *ranges); +static ArrayType *construct_bounds_array(Bound *elems, + int nelems, + Oid elmtype, + int elmlen, + bool elmbyval, + char elmalign); + +static void check_range_adjacence(Oid cmp_proc, + Oid collid, + List *ranges); + static void merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts); + +static char *deparse_constraint(Oid relid, Node *expr); + static void modify_range_constraint(Oid partition_relid, const char *expression, Oid expression_type, const Bound *lower, const Bound *upper); + static bool interval_is_trivial(Oid atttype, Datum interval, Oid interval_type); @@ -400,11 +407,12 @@ generate_range_bounds_pl(PG_FUNCTION_ARGS) Datum get_part_range_by_oid(PG_FUNCTION_ARGS) { - Oid partition_relid, - parent_relid; - RangeEntry *ranges; - const PartRelationInfo *prel; - uint32 i; + Oid partition_relid, + parent_relid; + Oid arg_type; + RangeEntry *ranges; + PartRelationInfo *prel; + uint32 i; if (!PG_ARGISNULL(0)) { @@ -419,11 +427,13 @@ get_part_range_by_oid(PG_FUNCTION_ARGS) errmsg("relation \"%s\" is not a partition", get_rel_name_or_relid(partition_relid)))); + /* Emit an error if it is not partitioned by RANGE */ prel = get_pathman_relation_info(parent_relid); shout_if_prel_is_invalid(parent_relid, prel, PT_RANGE); /* Check type of 'dummy' (for correct output) */ - if (getBaseType(get_fn_expr_argtype(fcinfo->flinfo, 1)) != getBaseType(prel->ev_type)) + arg_type = get_fn_expr_argtype(fcinfo->flinfo, 2); + if (getBaseType(arg_type) != getBaseType(prel->ev_type)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("pg_typeof(dummy) should be %s", format_type_be(getBaseType(prel->ev_type))))); @@ -432,6 +442,7 @@ get_part_range_by_oid(PG_FUNCTION_ARGS) /* Look for the specified partition */ for (i = 0; i < PrelChildrenCount(prel); i++) + { if (ranges[i].child_oid == partition_relid) { ArrayType *arr; @@ -440,12 +451,15 @@ get_part_range_by_oid(PG_FUNCTION_ARGS) elems[0] = ranges[i].min; elems[1] = ranges[i].max; - arr = construct_infinitable_array(elems, 2, - prel->ev_type, prel->ev_len, - prel->ev_byval, prel->ev_align); + arr = construct_bounds_array(elems, 2, + prel->ev_type, + prel->ev_len, + prel->ev_byval, + prel->ev_align); PG_RETURN_ARRAYTYPE_P(arr); } + } /* No partition found, report error */ ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), @@ -466,11 +480,13 @@ get_part_range_by_oid(PG_FUNCTION_ARGS) Datum get_part_range_by_idx(PG_FUNCTION_ARGS) { - Oid parent_relid; - int partition_idx = 0; - Bound elems[2]; - RangeEntry *ranges; - const PartRelationInfo *prel; + Oid parent_relid; + int partition_idx = 0; + Oid arg_type; + Bound elems[2]; + RangeEntry *ranges; + PartRelationInfo *prel; + ArrayType *arr; if (!PG_ARGISNULL(0)) { @@ -486,11 +502,13 @@ get_part_range_by_idx(PG_FUNCTION_ARGS) else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("'partition_idx' should not be NULL"))); + /* Emit an error if it is not partitioned by RANGE */ prel = get_pathman_relation_info(parent_relid); shout_if_prel_is_invalid(parent_relid, prel, PT_RANGE); /* Check type of 'dummy' (for correct output) */ - if (getBaseType(get_fn_expr_argtype(fcinfo->flinfo, 2)) != getBaseType(prel->ev_type)) + arg_type = get_fn_expr_argtype(fcinfo->flinfo, 2); + if (getBaseType(arg_type) != getBaseType(prel->ev_type)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("pg_typeof(dummy) should be %s", format_type_be(getBaseType(prel->ev_type))))); @@ -520,11 +538,13 @@ get_part_range_by_idx(PG_FUNCTION_ARGS) elems[0] = ranges[partition_idx].min; elems[1] = ranges[partition_idx].max; - PG_RETURN_ARRAYTYPE_P(construct_infinitable_array(elems, 2, - prel->ev_type, - prel->ev_len, - prel->ev_byval, - prel->ev_align)); + arr = construct_bounds_array(elems, 2, + prel->ev_type, + prel->ev_len, + prel->ev_byval, + prel->ev_align); + + PG_RETURN_ARRAYTYPE_P(arr); } @@ -688,8 +708,10 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) *first, *last; FmgrInfo cmp_proc; + ObjectAddresses *objects = new_object_addresses(); int i; + /* Emit an error if it is not partitioned by RANGE */ prel = get_pathman_relation_info(parent); shout_if_prel_is_invalid(parent, prel, PT_RANGE); @@ -702,7 +724,8 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) /* Process partitions */ for (i = 0; i < nparts; i++) { - int j; + ObjectAddress object; + int j; /* Prevent modification of partitions */ LockRelationOid(parts[0], AccessExclusiveLock); @@ -716,6 +739,9 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) break; } } + + ObjectAddressSet(object, RelationRelationId, parts[i]); + add_exact_object_address(&object, objects); } /* Check that partitions are adjacent */ @@ -765,13 +791,8 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) SPI_finish(); /* Drop obsolete partitions */ - for (i = 1; i < nparts; i++) - { - ObjectAddress object; - - ObjectAddressSet(object, RelationRelationId, parts[i]); - performDeletion(&object, DROP_CASCADE, 0); - } + performMultipleDeletions(objects, DROP_CASCADE, 0); + free_object_addresses(objects); } @@ -791,72 +812,74 @@ drop_range_partition_expand_next(PG_FUNCTION_ARGS) Oid partition = PG_GETARG_OID(0), parent; PartRelationInfo *prel; + ObjectAddress object; + RangeEntry *ranges; + int i; /* Lock the partition we're going to drop */ LockRelationOid(partition, AccessExclusiveLock); /* Check if partition exists */ - if (!SearchSysCacheExists1(RELOID, partition)) + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(partition))) elog(ERROR, "relation %u does not exist", partition); /* Get parent's relid */ parent = get_parent_of_partition(partition); - if (!OidIsValid(parent)) + + /* Prevent changes in partitioning scheme */ + LockRelationOid(parent, ShareUpdateExclusiveLock); + + /* Check if parent exists */ + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(parent))) elog(ERROR, "relation \"%s\" is not a partition", get_rel_name(partition)); - if ((prel = get_pathman_relation_info(parent)) != NULL) - { - ObjectAddress object; - RangeEntry *ranges; - int i; - - /* Emit an error if it is not partitioned by RANGE */ - shout_if_prel_is_invalid(parent, prel, PT_RANGE); + /* Emit an error if it is not partitioned by RANGE */ + prel = get_pathman_relation_info(parent); + shout_if_prel_is_invalid(parent, prel, PT_RANGE); - /* Fetch ranges array */ - ranges = PrelGetRangesArray(prel); + /* Fetch ranges array */ + ranges = PrelGetRangesArray(prel); - /* Looking for partition in child relations */ - for (i = 0; i < PrelChildrenCount(prel); i++) - if (ranges[i].child_oid == partition) - break; + /* Looking for partition in child relations */ + for (i = 0; i < PrelChildrenCount(prel); i++) + if (ranges[i].child_oid == partition) + break; - /* Should have found it */ - Assert(i < PrelChildrenCount(prel)); + /* Should have found it */ + Assert(i < PrelChildrenCount(prel)); - /* Expand next partition if it exists */ - if (i < PrelChildrenCount(prel) - 1) - { - RangeEntry *cur = &ranges[i], - *next = &ranges[i + 1]; - Oid next_partition = next->child_oid; - LOCKMODE lockmode = AccessExclusiveLock; + /* Expand next partition if it exists */ + if (i < PrelLastChild(prel)) + { + RangeEntry *cur = &ranges[i], + *next = &ranges[i + 1]; + Oid next_partition = next->child_oid; + LOCKMODE lockmode = AccessExclusiveLock; - /* Lock next partition */ - LockRelationOid(next_partition, lockmode); + /* Lock next partition */ + LockRelationOid(next_partition, lockmode); - /* Does next partition exist? */ - if (SearchSysCacheExists1(RELOID, next_partition)) - { - /* Stretch next partition to cover range */ - modify_range_constraint(next_partition, - prel->expr_cstr, - prel->ev_type, - &cur->min, - &next->max); - } - /* Bad luck, unlock missing partition */ - else UnlockRelationOid(next_partition, lockmode); + /* Does next partition exist? */ + if (SearchSysCacheExists1(RELOID, ObjectIdGetDatum(next_partition))) + { + /* Stretch next partition to cover range */ + modify_range_constraint(next_partition, + prel->expr_cstr, + prel->ev_type, + &cur->min, + &next->max); } + /* Bad luck, unlock missing partition */ + else UnlockRelationOid(next_partition, lockmode); + } - /* Drop partition */ - ObjectAddressSet(object, RelationRelationId, partition); - performDeletion(&object, DROP_CASCADE, 0); + /* Drop partition */ + ObjectAddressSet(object, RelationRelationId, partition); + performDeletion(&object, DROP_CASCADE, 0); - /* Don't forget to close 'prel'! */ - close_pathman_relation_info(prel); - } + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); PG_RETURN_VOID(); } @@ -1182,18 +1205,18 @@ deparse_constraint(Oid relid, Node *expr) } /* - * Build an 1d array of Bound elements + * Build an 1d array of Bound elements. * - * The main difference from construct_array() is that - * it will substitute infinite values with NULLs + * The main difference from construct_array() is that + * it will substitute infinite values with NULLs. */ static ArrayType * -construct_infinitable_array(Bound *elems, - int nelems, - Oid elemtype, - int elemlen, - bool elembyval, - char elemalign) +construct_bounds_array(Bound *elems, + int nelems, + Oid elemtype, + int elemlen, + bool elembyval, + char elemalign) { ArrayType *arr; Datum *datums; diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 7b465dee..cd811cdd 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -340,8 +340,6 @@ disable_standard_inheritance(Query *parse, transform_query_cxt *context) /* Table may be partitioned */ if (rte->inh) { - PartRelationInfo *prel; - #ifdef LEGACY_ROWMARKS_95 /* Don't process queries with RowMarks on 9.5 */ if (get_parse_rowmark(parse, current_rti)) @@ -349,7 +347,7 @@ disable_standard_inheritance(Query *parse, transform_query_cxt *context) #endif /* Proceed if table is partitioned by pg_pathman */ - if ((prel = get_pathman_relation_info(rte->relid)) != NULL) + if (has_pathman_relation_info(rte->relid)) { /* HACK: unset the 'inh' flag to disable standard planning */ rte->inh = false; @@ -357,9 +355,6 @@ disable_standard_inheritance(Query *parse, transform_query_cxt *context) /* Try marking it using PARENTHOOD_ALLOWED */ assign_rel_parenthood_status(parse->queryId, rte, PARENTHOOD_ALLOWED); - - /* Don't forget to close 'prel'! */ - close_pathman_relation_info(prel); } } /* Else try marking it using PARENTHOOD_DISALLOWED */ diff --git a/src/relation_info.c b/src/relation_info.c index 44635ebf..c2563d4e 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -289,29 +289,6 @@ get_pathman_relation_info(Oid relid) return psin->prel; } -/* Acquire lock on a table and try to get PartRelationInfo */ -PartRelationInfo * -get_pathman_relation_info_after_lock(Oid relid, - bool unlock_if_not_found, - LockAcquireResult *lock_result) -{ - PartRelationInfo *prel; - LockAcquireResult acquire_result; - - /* Restrict concurrent partition creation (it's dangerous) */ - acquire_result = xact_lock_rel(relid, ShareUpdateExclusiveLock, false); - - /* Set 'lock_result' if asked to */ - if (lock_result) - *lock_result = acquire_result; - - prel = get_pathman_relation_info(relid); - if (!prel && unlock_if_not_found) - UnlockRelationOid(relid, ShareUpdateExclusiveLock); - - return prel; -} - /* Build a new PartRelationInfo for partitioned relation */ static PartRelationInfo * build_pathman_relation_info(Oid relid, Datum *values) diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 1f376c20..f90cca36 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -476,7 +476,6 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, ResultRelInfo *parent_rri; EState *estate = CreateExecutorState(); /* for ExecConstraints() */ - ExprContext *econtext; TupleTableSlot *myslot; MemoryContext oldcontext = CurrentMemoryContext; @@ -529,15 +528,14 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, values = (Datum *) palloc(tupDesc->natts * sizeof(Datum)); nulls = (bool *) palloc(tupDesc->natts * sizeof(bool)); - econtext = GetPerTupleExprContext(estate); - for (;;) { TupleTableSlot *slot; bool skip_tuple; Oid tuple_oid = InvalidOid; + ExprContext *econtext = GetPerTupleExprContext(estate); - const PartRelationInfo *prel; + PartRelationInfo *prel; ResultRelInfoHolder *rri_holder; ResultRelInfo *child_result_rel; @@ -551,7 +549,7 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, /* Initialize expression and expression state */ if (expr == NULL) { - expr = copyObject(prel->expr); + expr = PrelExpressionForRelid(prel, PART_EXPR_VARNO); expr_state = ExecInitExpr((Expr *) expr, NULL); } @@ -575,10 +573,7 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, /* Store slot for expression evaluation */ econtext->ecxt_scantuple = slot; - /* - * Search for a matching partition. - * WARNING: 'prel' might change after this call! - */ + /* Search for a matching partition */ rri_holder = select_partition_for_insert(expr_state, econtext, estate, prel, &parts_storage); @@ -598,13 +593,12 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, { HeapTuple tuple_old; - /* TODO: use 'tuple_map' directly instead of do_convert_tuple() */ tuple_old = tuple; tuple = do_convert_tuple(tuple, rri_holder->tuple_map); heap_freetuple(tuple_old); } - /* now we can set proper tuple descriptor according to child relation */ + /* Now we can set proper tuple descriptor according to child relation */ ExecSetSlotDescriptor(slot, RelationGetDescr(child_result_rel->ri_RelationDesc)); ExecStoreTuple(tuple, slot, InvalidBuffer, false); @@ -656,12 +650,10 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, } } + /* Switch back to query context */ MemoryContextSwitchTo(oldcontext); - /* - * In the old protocol, tell pqcomm that we can process normal protocol - * messages again. - */ + /* Required for old protocol */ if (old_protocol) pq_endmsgread(); @@ -674,6 +666,7 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, pfree(values); pfree(nulls); + /* Release resources for tuple table */ ExecResetTupleTable(estate->es_tupleTable, false); /* Close partitions and destroy hash table */ @@ -682,6 +675,7 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, /* Close parent's indices */ ExecCloseIndices(parent_rri); + /* Release an EState along with all remaining working storage */ FreeExecutorState(estate); return processed; From c96393352dc7d6e110adf3dd299ddd7dfce791ed Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 20 Nov 2017 14:21:59 +0300 Subject: [PATCH 196/528] fix late updates (e.g. 1.1 => 1.4) --- src/pl_range_funcs.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 1b8b2ade..5e3a7696 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -870,8 +870,11 @@ validate_interval_value(PG_FUNCTION_ARGS) /* * Fetch partitioning expression's type using * either user's expression or parsed expression. + * + * NOTE: we check number of function's arguments + * in case of late updates (e.g. 1.1 => 1.4). */ - if (PG_ARGISNULL(ARG_EXPRESSION_P)) + if (PG_ARGISNULL(ARG_EXPRESSION_P) || PG_NARGS() <= ARG_EXPRESSION_P) { Datum expr_datum; From 81be0d8c6d5cc216e4d28ae5f851366de98610b4 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 21 Nov 2017 15:11:46 +0300 Subject: [PATCH 197/528] WIP fix more places that use cache --- src/hooks.c | 4 +- src/include/relation_info.h | 21 ++++++-- src/nodes_common.c | 36 ++++++++----- src/pl_range_funcs.c | 54 ++++++++++--------- src/relation_info.c | 105 +++++++++++++++++++++++++----------- 5 files changed, 148 insertions(+), 72 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index e9b894c7..83f040d8 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -821,7 +821,7 @@ pathman_relcache_hook(Datum arg, Oid relid) /* Invalidation event for whole cache */ if (relid == InvalidOid) { - invalidate_pathman_relation_info_cache(); + invalidate_pathman_status_info_cache(); } /* Invalidation event for PATHMAN_CONFIG table (probably DROP) */ @@ -840,7 +840,7 @@ pathman_relcache_hook(Datum arg, Oid relid) forget_parent_of_partition(relid); /* Invalidate PartStatusInfo entry if needed */ - invalidate_pathman_relation_info(relid); + invalidate_pathman_status_info(relid); } } diff --git a/src/include/relation_info.h b/src/include/relation_info.h index f3796d28..14286546 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -236,6 +236,19 @@ typedef struct PartRelationInfo #define PrelIsFresh(prel) ( (prel)->fresh ) +static inline uint32 +PrelHasPartition(const PartRelationInfo *prel, Oid partition_relid) +{ + Oid *children = PrelGetChildrenArray(prel); + uint32 i; + + for (i = 0; i < PrelChildrenCount(prel); i++) + if (children[i] == partition_relid) + return i + 1; + + return 0; +} + static inline uint32 PrelLastChild(const PartRelationInfo *prel) { @@ -316,10 +329,12 @@ PartTypeToCString(PartType parttype) } +/* Status chache */ +void invalidate_pathman_status_info(Oid relid); +void invalidate_pathman_status_info_cache(void); + /* Dispatch cache */ void refresh_pathman_relation_info(Oid relid); -void invalidate_pathman_relation_info(Oid relid); -void invalidate_pathman_relation_info_cache(void); void close_pathman_relation_info(PartRelationInfo *prel); bool has_pathman_relation_info(Oid relid); PartRelationInfo *get_pathman_relation_info(Oid relid); @@ -332,7 +347,7 @@ void shout_if_prel_is_invalid(const Oid parent_oid, void forget_bounds_of_partition(Oid partition); PartBoundInfo *get_bounds_of_partition(Oid partition, const PartRelationInfo *prel); -/* Parent cache */ +/* Parents cache */ void cache_parent_of_partition(Oid partition, Oid parent); void forget_parent_of_partition(Oid partition); Oid get_parent_of_partition(Oid partition); diff --git a/src/nodes_common.c b/src/nodes_common.c index 7a4b71fe..66f2df12 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -558,9 +558,9 @@ create_append_plan_common(PlannerInfo *root, RelOptInfo *rel, List *clauses, List *custom_plans, CustomScanMethods *scan_methods) { - RuntimeAppendPath *rpath = (RuntimeAppendPath *) best_path; - const PartRelationInfo *prel; - CustomScan *cscan; + RuntimeAppendPath *rpath = (RuntimeAppendPath *) best_path; + PartRelationInfo *prel; + CustomScan *cscan; prel = get_pathman_relation_info(rpath->relid); Assert(prel); @@ -630,6 +630,9 @@ create_append_plan_common(PlannerInfo *root, RelOptInfo *rel, /* Cache 'prel->enable_parent' as well */ pack_runtimeappend_private(cscan, rpath, prel->enable_parent); + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); + return &cscan->scan.plan; } @@ -659,14 +662,15 @@ create_append_scan_state_common(CustomScan *node, void begin_append_common(CustomScanState *node, EState *estate, int eflags) { - RuntimeAppendState *scan_state = (RuntimeAppendState *) node; - const PartRelationInfo *prel; + RuntimeAppendState *scan_state = (RuntimeAppendState *) node; + PartRelationInfo *prel; #if PG_VERSION_NUM < 100000 node->ss.ps.ps_TupFromTlist = false; #endif prel = get_pathman_relation_info(scan_state->relid); + Assert(prel); /* Prepare expression according to set_set_customscan_references() */ scan_state->prel_expr = PrelExpressionForRelid(prel, INDEX_VAR); @@ -674,6 +678,9 @@ begin_append_common(CustomScanState *node, EState *estate, int eflags) /* Prepare custom expression according to set_set_customscan_references() */ scan_state->canon_custom_exprs = canonicalize_custom_exprs(scan_state->custom_exprs); + + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); } TupleTableSlot * @@ -754,14 +761,14 @@ end_append_common(CustomScanState *node) void rescan_append_common(CustomScanState *node) { - RuntimeAppendState *scan_state = (RuntimeAppendState *) node; - ExprContext *econtext = node->ss.ps.ps_ExprContext; - const PartRelationInfo *prel; - List *ranges; - ListCell *lc; - WalkerContext wcxt; - Oid *parts; - int nparts; + RuntimeAppendState *scan_state = (RuntimeAppendState *) node; + ExprContext *econtext = node->ss.ps.ps_ExprContext; + PartRelationInfo *prel; + List *ranges; + ListCell *lc; + WalkerContext wcxt; + Oid *parts; + int nparts; prel = get_pathman_relation_info(scan_state->relid); Assert(prel); @@ -797,6 +804,9 @@ rescan_append_common(CustomScanState *node) scan_state->ncur_plans, scan_state->css.ss.ps.state); + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); + scan_state->running_idx = 0; } diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 0e40dcb8..997547f2 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -412,7 +412,7 @@ get_part_range_by_oid(PG_FUNCTION_ARGS) Oid arg_type; RangeEntry *ranges; PartRelationInfo *prel; - uint32 i; + uint32 idx; if (!PG_ARGISNULL(0)) { @@ -441,24 +441,24 @@ get_part_range_by_oid(PG_FUNCTION_ARGS) ranges = PrelGetRangesArray(prel); /* Look for the specified partition */ - for (i = 0; i < PrelChildrenCount(prel); i++) + if ((idx = PrelHasPartition(prel, partition_relid)) > 0) { - if (ranges[i].child_oid == partition_relid) - { - ArrayType *arr; - Bound elems[2]; + ArrayType *arr; + Bound elems[2]; - elems[0] = ranges[i].min; - elems[1] = ranges[i].max; + elems[0] = ranges[idx - 1].min; + elems[1] = ranges[idx - 1].max; - arr = construct_bounds_array(elems, 2, - prel->ev_type, - prel->ev_len, - prel->ev_byval, - prel->ev_align); + arr = construct_bounds_array(elems, 2, + prel->ev_type, + prel->ev_len, + prel->ev_byval, + prel->ev_align); - PG_RETURN_ARRAYTYPE_P(arr); - } + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); + + PG_RETURN_ARRAYTYPE_P(arr); } /* No partition found, report error */ @@ -544,6 +544,9 @@ get_part_range_by_idx(PG_FUNCTION_ARGS) prel->ev_byval, prel->ev_align); + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); + PG_RETURN_ARRAYTYPE_P(arr); } @@ -702,14 +705,14 @@ merge_range_partitions(PG_FUNCTION_ARGS) static void merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) { - const PartRelationInfo *prel; - List *rentry_list = NIL; - RangeEntry *ranges, - *first, - *last; - FmgrInfo cmp_proc; - ObjectAddresses *objects = new_object_addresses(); - int i; + PartRelationInfo *prel; + List *rentry_list = NIL; + RangeEntry *ranges, + *first, + *last; + FmgrInfo cmp_proc; + ObjectAddresses *objects = new_object_addresses(); + int i; /* Emit an error if it is not partitioned by RANGE */ prel = get_pathman_relation_info(parent); @@ -749,7 +752,7 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) /* First determine the bounds of a new constraint */ first = (RangeEntry *) linitial(rentry_list); - last = (RangeEntry *) llast(rentry_list); + last = (RangeEntry *) llast(rentry_list); /* Swap ranges if 'last' < 'first' */ fmgr_info(prel->cmp_proc, &cmp_proc); @@ -793,6 +796,9 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) /* Drop obsolete partitions */ performMultipleDeletions(objects, DROP_CASCADE, 0); free_object_addresses(objects); + + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); } diff --git a/src/relation_info.c b/src/relation_info.c index c2563d4e..b4f75f2a 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -83,9 +83,10 @@ static bool delayed_shutdown = false; /* pathman was dropped */ bsearch((const void *) &(key), (array), (array_size), sizeof(Oid), oid_cmp) -static void invalidate_pathman_status_info(PartStatusInfo *psin); static PartRelationInfo *build_pathman_relation_info(Oid relid, Datum *values); static void free_pathman_relation_info(PartRelationInfo *prel); +static void invalidate_psin_entries_using_relid(Oid relid); +static void invalidate_psin_entry(PartStatusInfo *psin); static Expr *get_partition_constraint_expr(Oid partition); @@ -119,59 +120,91 @@ init_relation_info_static_data(void) /* - * Partition dispatch routines. + * Status cache routines. */ -/* TODO: comment */ +/* Invalidate PartStatusInfo for 'relid' */ void -refresh_pathman_relation_info(Oid relid) -{ - -} - -/* TODO: comment */ -void -invalidate_pathman_relation_info(Oid relid) +invalidate_pathman_status_info(Oid relid) { PartStatusInfo *psin; + PartParentInfo *ppar; + /* Find status cache entry for this relation */ psin = pathman_cache_search_relid(status_cache, relid, HASH_FIND, NULL); - if (psin) - { -#ifdef USE_RELINFO_LOGGING - elog(DEBUG2, "invalidation message for relation %u [%u]", - relid, MyProcPid); -#endif + invalidate_psin_entry(psin); - invalidate_pathman_status_info(psin); + /* + * Find parent of this relation. + * + * We don't want to use get_parent_of_partition() + * since it relies upon the syscache. + */ + ppar = pathman_cache_search_relid(parents_cache, + relid, HASH_FIND, + NULL); + + /* Invalidate parent directly */ + if (ppar) + { + /* Find status cache entry for parent */ + psin = pathman_cache_search_relid(status_cache, + relid, HASH_FIND, + NULL); + if (psin) + invalidate_psin_entry(psin); } + /* Otherwise, look through all entries */ + else invalidate_psin_entries_using_relid(relid); } -/* TODO: comment */ +/* Invalidate all PartStatusInfo entries */ void -invalidate_pathman_relation_info_cache(void) +invalidate_pathman_status_info_cache(void) +{ + invalidate_psin_entries_using_relid(InvalidOid); +} + +/* Invalidate PartStatusInfo entry referencing 'relid' */ +static void +invalidate_psin_entries_using_relid(Oid relid) { HASH_SEQ_STATUS status; PartStatusInfo *psin; + hash_seq_init(&status, status_cache); + while ((psin = (PartStatusInfo *) hash_seq_search(&status)) != NULL) { -#ifdef USE_RELINFO_LOGGING - elog(DEBUG2, "invalidation message for relation %u [%u]", - psin->relid, MyProcPid); -#endif + if (relid == InvalidOid || + psin->relid == relid || + (psin->prel && PrelHasPartition(psin->prel, relid))) + { + /* Perform invalidation */ + invalidate_psin_entry(psin); - invalidate_pathman_status_info(psin); + /* Exit if found */ + if (OidIsValid(relid)) + { + hash_seq_term(&status); + break; + } + } } } -/* TODO: comment */ +/* Invalidate single PartStatusInfo entry */ static void -invalidate_pathman_status_info(PartStatusInfo *psin) +invalidate_psin_entry(PartStatusInfo *psin) { +#ifdef USE_RELINFO_LOGGING + elog(DEBUG2, "invalidation message for relation %u [%u]", + psin->relid, MyProcPid); +#endif + /* Mark entry as invalid */ if (psin->prel && PrelReferenceCount(psin->prel) > 0) { @@ -189,7 +222,19 @@ invalidate_pathman_status_info(PartStatusInfo *psin) } } -/* TODO: comment */ + +/* + * Dispatch cache routines. + */ + +/* Make changes to PartRelationInfo visible */ +void +refresh_pathman_relation_info(Oid relid) +{ + +} + +/* Close PartRelationInfo entry */ void close_pathman_relation_info(PartRelationInfo *prel) { @@ -680,7 +725,7 @@ PrelExpressionAttributesMap(const PartRelationInfo *prel, /* - * Partition bounds cache routines. + * Bounds cache routines. */ /* Remove partition's constraint from cache */ @@ -904,7 +949,7 @@ fill_pbin_with_bounds(PartBoundInfo *pbin, /* - * Partition parents cache routines. + * Parents cache routines. */ /* Add parent of partition to cache */ From c8690fc1dab69830caa2834762a448aa8977ed42 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 21 Nov 2017 15:36:54 +0300 Subject: [PATCH 198/528] WIP refactoring & fixes in PathmanCopyFrom() --- src/utility_stmt_hooking.c | 69 ++++++++++++++++++++------------------ 1 file changed, 37 insertions(+), 32 deletions(-) diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index f90cca36..d52dd330 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -474,13 +474,11 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, ResultPartsStorage parts_storage; ResultRelInfo *parent_rri; + ExprState *expr_state = NULL; + MemoryContext query_mcxt = CurrentMemoryContext; EState *estate = CreateExecutorState(); /* for ExecConstraints() */ TupleTableSlot *myslot; - MemoryContext oldcontext = CurrentMemoryContext; - - Node *expr = NULL; - ExprState *expr_state = NULL; uint64 processed = 0; @@ -531,28 +529,18 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, for (;;) { TupleTableSlot *slot; - bool skip_tuple; + bool skip_tuple = false; Oid tuple_oid = InvalidOid; ExprContext *econtext = GetPerTupleExprContext(estate); PartRelationInfo *prel; ResultRelInfoHolder *rri_holder; - ResultRelInfo *child_result_rel; + ResultRelInfo *child_rri; CHECK_FOR_INTERRUPTS(); ResetPerTupleExprContext(estate); - /* Fetch PartRelationInfo for parent relation */ - prel = get_pathman_relation_info(RelationGetRelid(parent_rel)); - - /* Initialize expression and expression state */ - if (expr == NULL) - { - expr = PrelExpressionForRelid(prel, PART_EXPR_VARNO); - expr_state = ExecInitExpr((Expr *) expr, NULL); - } - /* Switch into per tuple memory context */ MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); @@ -573,20 +561,39 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, /* Store slot for expression evaluation */ econtext->ecxt_scantuple = slot; + /* Fetch PartRelationInfo for parent relation */ + prel = get_pathman_relation_info(RelationGetRelid(parent_rel)); + + /* Initialize expression state */ + if (expr_state == NULL) + { + MemoryContext old_mcxt; + Node *expr; + + old_mcxt = MemoryContextSwitchTo(query_mcxt); + + expr = PrelExpressionForRelid(prel, PART_EXPR_VARNO); + expr_state = ExecInitExpr((Expr *) expr, NULL); + + MemoryContextSwitchTo(old_mcxt); + } + /* Search for a matching partition */ rri_holder = select_partition_for_insert(expr_state, econtext, estate, prel, &parts_storage); + child_rri = rri_holder->result_rel_info; - child_result_rel = rri_holder->result_rel_info; + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); /* Magic: replace parent's ResultRelInfo with ours */ - estate->es_result_relation_info = child_result_rel; + estate->es_result_relation_info = child_rri; /* * Constraints might reference the tableoid column, so initialize * t_tableOid before evaluating them. */ - tuple->t_tableOid = RelationGetRelid(child_result_rel->ri_RelationDesc); + tuple->t_tableOid = RelationGetRelid(child_rri->ri_RelationDesc); /* If there's a transform map, rebuild the tuple */ if (rri_holder->tuple_map) @@ -599,19 +606,17 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, } /* Now we can set proper tuple descriptor according to child relation */ - ExecSetSlotDescriptor(slot, RelationGetDescr(child_result_rel->ri_RelationDesc)); + ExecSetSlotDescriptor(slot, RelationGetDescr(child_rri->ri_RelationDesc)); ExecStoreTuple(tuple, slot, InvalidBuffer, false); /* Triggers and stuff need to be invoked in query context. */ - MemoryContextSwitchTo(oldcontext); - - skip_tuple = false; + MemoryContextSwitchTo(query_mcxt); /* BEFORE ROW INSERT Triggers */ - if (child_result_rel->ri_TrigDesc && - child_result_rel->ri_TrigDesc->trig_insert_before_row) + if (child_rri->ri_TrigDesc && + child_rri->ri_TrigDesc->trig_insert_before_row) { - slot = ExecBRInsertTriggers(estate, child_result_rel, slot); + slot = ExecBRInsertTriggers(estate, child_rri, slot); if (slot == NULL) /* "do nothing" */ skip_tuple = true; @@ -625,18 +630,18 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, List *recheckIndexes = NIL; /* Check the constraints of the tuple */ - if (child_result_rel->ri_RelationDesc->rd_att->constr) - ExecConstraints(child_result_rel, slot, estate); + if (child_rri->ri_RelationDesc->rd_att->constr) + ExecConstraints(child_rri, slot, estate); /* OK, store the tuple and create index entries for it */ - simple_heap_insert(child_result_rel->ri_RelationDesc, tuple); + simple_heap_insert(child_rri->ri_RelationDesc, tuple); - if (child_result_rel->ri_NumIndices > 0) + if (child_rri->ri_NumIndices > 0) recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false, NULL, NIL); /* AFTER ROW INSERT Triggers (FIXME: NULL transition) */ - ExecARInsertTriggersCompat(estate, child_result_rel, tuple, + ExecARInsertTriggersCompat(estate, child_rri, tuple, recheckIndexes, NULL); list_free(recheckIndexes); @@ -651,7 +656,7 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, } /* Switch back to query context */ - MemoryContextSwitchTo(oldcontext); + MemoryContextSwitchTo(query_mcxt); /* Required for old protocol */ if (old_protocol) From 1b380ff81cb995cfbc032a47f48be38818e850f2 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 22 Nov 2017 20:38:58 +0300 Subject: [PATCH 199/528] small refactoring for ResultPartsStorage --- src/include/partition_filter.h | 20 ++++-- src/partition_filter.c | 123 ++++++++++++++++----------------- src/utility_stmt_hooking.c | 12 ++-- 3 files changed, 78 insertions(+), 77 deletions(-) diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index 841cd0cb..d298bb34 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -49,8 +49,9 @@ typedef struct } ResultRelInfoHolder; -/* Standard size of ResultPartsStorage entry */ -#define ResultPartsStorageStandard 0 +/* Default settings for ResultPartsStorage */ +#define RPS_DEFAULT_ENTRY_SIZE sizeof(ResultPartsStorage) +#define RPS_DEFAULT_SPECULATIVE false /* speculative inserts */ /* Forward declaration (for on_new_rri_holder()) */ struct ResultPartsStorage; @@ -137,18 +138,25 @@ extern CustomExecMethods partition_filter_exec_methods; void init_partition_filter_static_data(void); -/* ResultPartsStorage init\fini\scan function */ +/* + * ResultPartsStorage API (select partition for INSERT & UPDATE). + */ + +/* Initialize storage for some parent table */ void init_result_parts_storage(ResultPartsStorage *parts_storage, + ResultRelInfo *parent_rri, EState *estate, - bool speculative_inserts, + CmdType cmd_type, Size table_entry_size, + bool speculative_inserts, on_new_rri_holder on_new_rri_holder_cb, - void *on_new_rri_holder_cb_arg, - CmdType cmd_type); + void *on_new_rri_holder_cb_arg); +/* Free storage and opened relations */ void fini_result_parts_storage(ResultPartsStorage *parts_storage, bool close_rels); +/* Find ResultRelInfo holder in storage */ ResultRelInfoHolder * scan_result_parts_storage(Oid partid, ResultPartsStorage *storage); diff --git a/src/partition_filter.c b/src/partition_filter.c index 33424e06..035db748 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -144,30 +144,30 @@ init_partition_filter_static_data(void) /* Initialize ResultPartsStorage (hash table etc) */ void init_result_parts_storage(ResultPartsStorage *parts_storage, + ResultRelInfo *parent_rri, EState *estate, - bool speculative_inserts, + CmdType cmd_type, Size table_entry_size, + bool speculative_inserts, on_new_rri_holder on_new_rri_holder_cb, - void *on_new_rri_holder_cb_arg, - CmdType cmd_type) + void *on_new_rri_holder_cb_arg) { HASHCTL *result_rels_table_config = &parts_storage->result_rels_table_config; memset(result_rels_table_config, 0, sizeof(HASHCTL)); result_rels_table_config->keysize = sizeof(Oid); - - /* Use sizeof(ResultRelInfoHolder) if table_entry_size is 0 */ - if (table_entry_size == ResultPartsStorageStandard) - result_rels_table_config->entrysize = sizeof(ResultRelInfoHolder); - else - result_rels_table_config->entrysize = table_entry_size; + result_rels_table_config->entrysize = table_entry_size; parts_storage->result_rels_table = hash_create("ResultRelInfo storage", 10, result_rels_table_config, HASH_ELEM | HASH_BLOBS); + Assert(parent_rri); + parts_storage->base_rri = parent_rri; + + Assert(estate); parts_storage->estate = estate; - parts_storage->base_rri = NULL; + /* Callback might be NULL */ parts_storage->on_new_rri_holder_callback = on_new_rri_holder_cb; parts_storage->callback_arg = on_new_rri_holder_cb_arg; @@ -225,7 +225,7 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) rri_holder = hash_search(parts_storage->result_rels_table, (const void *) &partid, - HASH_ENTER, &found); + HASH_FIND, &found); /* If not found, create & cache new ResultRelInfo */ if (!found) @@ -237,6 +237,7 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) Index child_rte_idx; ResultRelInfo *child_result_rel_info; List *translated_vars; + MemoryContext old_mcxt; /* Check that 'base_rri' is set */ if (!parts_storage->base_rri) @@ -246,15 +247,18 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) LockRelationOid(partid, parts_storage->head_open_lock_mode); if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(partid))) { - /* Don't forget to drop invalid hash table entry */ - hash_search(parts_storage->result_rels_table, - (const void *) &partid, - HASH_REMOVE, NULL); - UnlockRelationOid(partid, parts_storage->head_open_lock_mode); return NULL; } + /* Switch to query-level mcxt for allocations */ + old_mcxt = MemoryContextSwitchTo(parts_storage->estate->es_query_cxt); + + /* Create a new cache entry for this partition */ + rri_holder = hash_search(parts_storage->result_rels_table, + (const void *) &partid, + HASH_ENTER, NULL); + parent_rte = rt_fetch(parts_storage->base_rri->ri_RangeTableIndex, parts_storage->estate->es_range_table); @@ -300,15 +304,15 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) /* Copy necessary fields from saved ResultRelInfo */ CopyToResultRelInfo(ri_WithCheckOptions); CopyToResultRelInfo(ri_WithCheckOptionExprs); + CopyToResultRelInfo(ri_projectReturning); + CopyToResultRelInfo(ri_onConflictSetProj); + CopyToResultRelInfo(ri_onConflictSetWhere); + if (parts_storage->command_type != CMD_UPDATE) CopyToResultRelInfo(ri_junkFilter); else child_result_rel_info->ri_junkFilter = NULL; - CopyToResultRelInfo(ri_projectReturning); - CopyToResultRelInfo(ri_onConflictSetProj); - CopyToResultRelInfo(ri_onConflictSetWhere); - /* ri_ConstraintExprs will be initialized by ExecRelCheck() */ child_result_rel_info->ri_ConstraintExprs = NULL; @@ -334,8 +338,11 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) parts_storage, parts_storage->callback_arg); - /* Finally append ResultRelInfo to storage->es_alloc_result_rels */ + /* Append ResultRelInfo to storage->es_alloc_result_rels */ append_rri_to_estate(parts_storage->estate, child_result_rel_info); + + /* Don't forget to switch back! */ + MemoryContextSwitchTo(old_mcxt); } return rri_holder; @@ -426,7 +433,6 @@ select_partition_for_insert(ExprState *expr_state, const PartRelationInfo *prel, ResultPartsStorage *parts_storage) { - MemoryContext old_mcxt; ResultRelInfoHolder *rri_holder; Oid parent_relid = PrelParentRelid(prel), partition_relid = InvalidOid; @@ -448,27 +454,22 @@ select_partition_for_insert(ExprState *expr_state, parts = find_partitions_for_value(value, prel->ev_type, prel, &nparts); if (nparts > 1) + { elog(ERROR, ERR_PART_ATTR_MULTIPLE); + } else if (nparts == 0) { partition_relid = create_partitions_for_value(parent_relid, value, prel->ev_type); - - /* get_pathman_relation_info() will refresh this entry */ - refresh_pathman_relation_info(parent_relid); } else partition_relid = parts[0]; - old_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); + /* Get ResultRelationInfo holder for the selected partition */ rri_holder = scan_result_parts_storage(partition_relid, parts_storage); - MemoryContextSwitchTo(old_mcxt); /* This partition has been dropped, repeat with a new 'prel' */ if (rri_holder == NULL) { - /* get_pathman_relation_info() will refresh this entry */ - refresh_pathman_relation_info(parent_relid); - /* Get a fresh PartRelationInfo */ prel = get_pathman_relation_info(parent_relid); @@ -520,9 +521,9 @@ prepare_expr_state(const PartRelationInfo *prel, EState *estate, bool try_map) { - ExprState *expr_state; - MemoryContext old_mcxt; - Node *expr; + ExprState *expr_state; + MemoryContext old_mcxt; + Node *expr; /* Make sure we use query memory context */ old_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); @@ -650,12 +651,12 @@ partition_filter_create_scan_state(CustomScan *node) void partition_filter_begin(CustomScanState *node, EState *estate, int eflags) { - PartitionFilterState *state = (PartitionFilterState *) node; - PlanState *child_state; - ResultRelInfo *current_rri; - Relation current_rel; - const PartRelationInfo *prel; - bool try_map; + PartitionFilterState *state = (PartitionFilterState *) node; + PlanState *child_state; + ResultRelInfo *current_rri; + Relation current_rel; + PartRelationInfo *prel; + bool try_map; /* It's convenient to store PlanState in 'custom_ps' */ child_state = ExecInitNode(state->subplan, estate, eflags); @@ -665,9 +666,6 @@ partition_filter_begin(CustomScanState *node, EState *estate, int eflags) current_rri = estate->es_result_relation_info; current_rel = current_rri->ri_RelationDesc; - /* Fetch PartRelationInfo for this partitioned relation */ - prel = get_pathman_relation_info(state->partitioned_table); - /* * In UPDATE queries we have to work with child relation tlist, * but expression contains varattnos of base relation, so we @@ -678,19 +676,21 @@ partition_filter_begin(CustomScanState *node, EState *estate, int eflags) try_map = state->command_type == CMD_UPDATE && RelationGetRelid(current_rel) != state->partitioned_table; + /* Fetch PartRelationInfo for this partitioned relation */ + prel = get_pathman_relation_info(state->partitioned_table); + /* Build a partitioning expression state */ state->expr_state = prepare_expr_state(prel, current_rel, estate, try_map); + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); + /* Init ResultRelInfo cache */ - init_result_parts_storage(&state->result_parts, estate, + init_result_parts_storage(&state->result_parts, current_rri, + estate, state->command_type, + RPS_DEFAULT_ENTRY_SIZE, state->on_conflict_action != ONCONFLICT_NONE, - ResultPartsStorageStandard, - prepare_rri_for_insert, - (void *) state, - state->command_type); - - /* Don't forget to initialize 'base_rri'! */ - state->result_parts.base_rri = current_rri; + prepare_rri_for_insert, (void *) state); /* No warnings yet */ state->warning_triggered = false; @@ -714,22 +714,14 @@ partition_filter_exec(CustomScanState *node) if (!TupIsNull(slot)) { - MemoryContext old_mcxt; - const PartRelationInfo *prel; - ResultRelInfoHolder *rri_holder; - ResultRelInfo *resultRelInfo; + MemoryContext old_mcxt; + PartRelationInfo *prel; + ResultRelInfoHolder *rri_holder; + ResultRelInfo *resultRelInfo; /* Fetch PartRelationInfo for this partitioned relation */ - prel = get_pathman_relation_info(state->partitioned_table); - if (!prel) - { - if (!state->warning_triggered) - elog(WARNING, "table \"%s\" is not partitioned, " - INSERT_NODE_NAME " will behave as a normal INSERT", - get_rel_name_or_relid(state->partitioned_table)); - - return slot; - } + if ((prel = get_pathman_relation_info(state->partitioned_table)) == NULL) + return slot; /* table is not partitioned anymore */ /* Switch to per-tuple context */ old_mcxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); @@ -775,6 +767,9 @@ partition_filter_exec(CustomScanState *node) slot = state->tup_convert_slot; } + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); + return slot; } diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index d52dd330..96a35989 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -498,13 +498,11 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, estate->es_range_table = range_table; /* Initialize ResultPartsStorage */ - init_result_parts_storage(&parts_storage, estate, false, - ResultPartsStorageStandard, - prepare_rri_for_copy, NULL, - CMD_INSERT); - - /* Don't forget to initialize 'base_rri'! */ - parts_storage.base_rri = parent_rri; + init_result_parts_storage(&parts_storage, parent_rri, + estate, CMD_INSERT, + RPS_DEFAULT_ENTRY_SIZE, + RPS_DEFAULT_SPECULATIVE, + prepare_rri_for_copy, NULL); /* Set up a tuple slot too */ myslot = ExecInitExtraTupleSlot(estate); From b605edd3687835908eedd770c44cfd41d85b97ba Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 31 Oct 2017 14:14:18 +0300 Subject: [PATCH 200/528] Adapted for https://github.com/arssher/postgresql/tree/foreign_copy_from. --- src/utility_stmt_hooking.c | 52 ++++++++++++++++++++++++++++++-------- 1 file changed, 42 insertions(+), 10 deletions(-) diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 103f194e..93908d38 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -499,7 +499,7 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, /* Initialize ResultPartsStorage */ init_result_parts_storage(&parts_storage, estate, false, ResultPartsStorageStandard, - prepare_rri_for_copy, NULL); + prepare_rri_for_copy, cstate); parts_storage.saved_rel_info = parent_result_rel; /* Set up a tuple slot too */ @@ -634,13 +634,20 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, /* Check the constraints of the tuple */ if (child_result_rel->ri_RelationDesc->rd_att->constr) ExecConstraints(child_result_rel, slot, estate); + if (!child_result_rel->ri_FdwRoutine) + { + /* OK, store the tuple and create index entries for it */ + simple_heap_insert(child_result_rel->ri_RelationDesc, tuple); - /* OK, store the tuple and create index entries for it */ - simple_heap_insert(child_result_rel->ri_RelationDesc, tuple); - - if (child_result_rel->ri_NumIndices > 0) - recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self), - estate, false, NULL, NIL); + if (child_result_rel->ri_NumIndices > 0) + recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self), + estate, false, NULL, NIL); + } + else /* FDW table */ + { + child_result_rel->ri_FdwRoutine->ForeignNextCopyFrom( + estate, child_result_rel, cstate); + } /* AFTER ROW INSERT Triggers (FIXME: NULL transition) */ ExecARInsertTriggersCompat(estate, child_result_rel, tuple, @@ -677,6 +684,24 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, ExecResetTupleTable(estate->es_tupleTable, false); + { + /* Shut down FDWs. TODO: make hook in fini_result_parts_storage? */ + HASH_SEQ_STATUS stat; + ResultRelInfoHolder *rri_holder; /* ResultRelInfo holder */ + + hash_seq_init(&stat, parts_storage.result_rels_table); + while ((rri_holder = (ResultRelInfoHolder *) hash_seq_search(&stat)) != NULL) + { + ResultRelInfo *resultRelInfo = rri_holder->result_rel_info; + + if (resultRelInfo->ri_FdwRoutine) + { + resultRelInfo->ri_FdwRoutine->EndForeignCopyFrom( + estate, resultRelInfo); + } + } + } + /* Close partitions and destroy hash table */ fini_result_parts_storage(&parts_storage, true); @@ -689,7 +714,7 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, } /* - * COPY FROM does not support FDWs, emit ERROR. + * Init COPY FROM, if supported. */ static void prepare_rri_for_copy(EState *estate, @@ -699,10 +724,17 @@ prepare_rri_for_copy(EState *estate, { ResultRelInfo *rri = rri_holder->result_rel_info; FdwRoutine *fdw_routine = rri->ri_FdwRoutine; + CopyState cstate = (CopyState) arg; if (fdw_routine != NULL) - elog(ERROR, "cannot copy to foreign partition \"%s\"", - get_rel_name(RelationGetRelid(rri->ri_RelationDesc))); + { + if (!FdwCopyFromIsSupported(fdw_routine)) + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("FDW adapter for relation \"%s\" doesn't support COPY FROM", + RelationGetRelationName(rri->ri_RelationDesc)))); + rri->ri_FdwRoutine->BeginForeignCopyFrom(estate, rri, cstate); + } } /* From 74f40cc87b4a547b6261e7b0c80277477f59bb13 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Thu, 2 Nov 2017 18:49:35 +0300 Subject: [PATCH 201/528] COPYing FROM to parent table instead of foreign, assuming we using shardman. Also, callback args simplified. --- src/include/partition_filter.h | 16 +++---- src/partition_filter.c | 82 +++++++++++++++----------------- src/utility_stmt_hooking.c | 85 ++++++++++++++++------------------ 3 files changed, 84 insertions(+), 99 deletions(-) diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index 85ddcf91..0cd08c36 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -43,17 +43,15 @@ typedef struct } ResultRelInfoHolder; -/* Forward declaration (for on_new_rri_holder()) */ +/* Forward declaration (for on_rri_holder()) */ struct ResultPartsStorage; typedef struct ResultPartsStorage ResultPartsStorage; /* - * Callback to be fired at rri_holder creation. + * Callback to be fired at rri_holder creation/destruction. */ -typedef void (*on_new_rri_holder)(EState *estate, - ResultRelInfoHolder *rri_holder, - const ResultPartsStorage *rps_storage, - void *arg); +typedef void (*on_rri_holder)(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage); /* * Cached ResultRelInfos of partitions. @@ -66,7 +64,7 @@ struct ResultPartsStorage bool speculative_inserts; /* for ExecOpenIndices() */ - on_new_rri_holder on_new_rri_holder_callback; + on_rri_holder on_new_rri_holder_callback; void *callback_arg; EState *estate; /* pointer to executor's state */ @@ -116,11 +114,11 @@ void init_result_parts_storage(ResultPartsStorage *parts_storage, EState *estate, bool speculative_inserts, Size table_entry_size, - on_new_rri_holder on_new_rri_holder_cb, + on_rri_holder on_new_rri_holder_cb, void *on_new_rri_holder_cb_arg); void fini_result_parts_storage(ResultPartsStorage *parts_storage, - bool close_rels); + bool close_rels, on_rri_holder hook); ResultRelInfoHolder * scan_result_parts_storage(Oid partid, ResultPartsStorage *storage); diff --git a/src/partition_filter.c b/src/partition_filter.c index 214b926a..a1886c4d 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -68,18 +68,12 @@ CustomScanMethods partition_filter_plan_methods; CustomExecMethods partition_filter_exec_methods; -static void prepare_rri_for_insert(EState *estate, - ResultRelInfoHolder *rri_holder, - const ResultPartsStorage *rps_storage, - void *arg); -static void prepare_rri_returning_for_insert(EState *estate, - ResultRelInfoHolder *rri_holder, - const ResultPartsStorage *rps_storage, - void *arg); -static void prepare_rri_fdw_for_insert(EState *estate, - ResultRelInfoHolder *rri_holder, - const ResultPartsStorage *rps_storage, - void *arg); +static void prepare_rri_for_insert(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage); +static void prepare_rri_returning_for_insert(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage); +static void prepare_rri_fdw_for_insert(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage); static Node *fix_returning_list_mutator(Node *node, void *state); static Index append_rte_to_estate(EState *estate, RangeTblEntry *rte); @@ -143,7 +137,7 @@ init_result_parts_storage(ResultPartsStorage *parts_storage, EState *estate, bool speculative_inserts, Size table_entry_size, - on_new_rri_holder on_new_rri_holder_cb, + on_rri_holder on_new_rri_holder_cb, void *on_new_rri_holder_cb_arg) { HASHCTL *result_rels_table_config = &parts_storage->result_rels_table_config; @@ -177,16 +171,21 @@ init_result_parts_storage(ResultPartsStorage *parts_storage, /* Free ResultPartsStorage (close relations etc) */ void -fini_result_parts_storage(ResultPartsStorage *parts_storage, bool close_rels) +fini_result_parts_storage(ResultPartsStorage *parts_storage, bool close_rels, + on_rri_holder hook) { HASH_SEQ_STATUS stat; ResultRelInfoHolder *rri_holder; /* ResultRelInfo holder */ - /* Close partitions and free free conversion-related stuff */ - if (close_rels) + hash_seq_init(&stat, parts_storage->result_rels_table); + while ((rri_holder = (ResultRelInfoHolder *) hash_seq_search(&stat)) != NULL) { - hash_seq_init(&stat, parts_storage->result_rels_table); - while ((rri_holder = (ResultRelInfoHolder *) hash_seq_search(&stat)) != NULL) + /* Call destruction hook, if needed */ + if (hook != NULL) + hook(rri_holder, parts_storage); + + /* Close partitions and free free conversion-related stuff */ + if (close_rels) { ExecCloseIndices(rri_holder->result_rel_info); @@ -202,13 +201,8 @@ fini_result_parts_storage(ResultPartsStorage *parts_storage, bool close_rels) free_conversion_map(rri_holder->tuple_map); } - } - - /* Else just free conversion-related stuff */ - else - { - hash_seq_init(&stat, parts_storage->result_rels_table); - while ((rri_holder = (ResultRelInfoHolder *) hash_seq_search(&stat)) != NULL) + /* Else just free conversion-related stuff */ + else { /* Skip if there's no map */ if (!rri_holder->tuple_map) @@ -329,10 +323,8 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) /* Call on_new_rri_holder_callback() if needed */ if (parts_storage->on_new_rri_holder_callback) - parts_storage->on_new_rri_holder_callback(parts_storage->estate, - rri_holder, - parts_storage, - parts_storage->callback_arg); + parts_storage->on_new_rri_holder_callback(rri_holder, + parts_storage); /* Finally append ResultRelInfo to storage->es_alloc_result_rels */ append_rri_to_estate(parts_storage->estate, child_result_rel_info); @@ -702,7 +694,7 @@ partition_filter_end(CustomScanState *node) PartitionFilterState *state = (PartitionFilterState *) node; /* Executor will close rels via estate->es_result_relations */ - fini_result_parts_storage(&state->result_parts, false); + fini_result_parts_storage(&state->result_parts, false, NULL); Assert(list_length(node->custom_ps) == 1); ExecEndNode((PlanState *) linitial(node->custom_ps)); @@ -793,21 +785,17 @@ pfilter_build_tlist(Relation parent_rel, List *tlist) /* Main trigger */ static void -prepare_rri_for_insert(EState *estate, - ResultRelInfoHolder *rri_holder, - const ResultPartsStorage *rps_storage, - void *arg) +prepare_rri_for_insert(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage) { - prepare_rri_returning_for_insert(estate, rri_holder, rps_storage, arg); - prepare_rri_fdw_for_insert(estate, rri_holder, rps_storage, arg); + prepare_rri_returning_for_insert(rri_holder, rps_storage); + prepare_rri_fdw_for_insert(rri_holder, rps_storage); } /* Prepare 'RETURNING *' tlist & projection */ static void -prepare_rri_returning_for_insert(EState *estate, - ResultRelInfoHolder *rri_holder, - const ResultPartsStorage *rps_storage, - void *arg) +prepare_rri_returning_for_insert(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage) { PartitionFilterState *pfstate; List *returning_list; @@ -815,12 +803,15 @@ prepare_rri_returning_for_insert(EState *estate, *parent_rri; Index parent_rt_idx; TupleTableSlot *result_slot; + EState *estate; + + estate = rps_storage->estate; /* We don't need to do anything ff there's no map */ if (!rri_holder->tuple_map) return; - pfstate = (PartitionFilterState *) arg; + pfstate = (PartitionFilterState *) rps_storage->callback_arg; returning_list = pfstate->returning_list; /* Exit if there's no RETURNING list */ @@ -857,14 +848,15 @@ prepare_rri_returning_for_insert(EState *estate, /* Prepare FDW access structs */ static void -prepare_rri_fdw_for_insert(EState *estate, - ResultRelInfoHolder *rri_holder, - const ResultPartsStorage *rps_storage, - void *arg) +prepare_rri_fdw_for_insert(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage) { ResultRelInfo *rri = rri_holder->result_rel_info; FdwRoutine *fdw_routine = rri->ri_FdwRoutine; Oid partid; + EState *estate; + + estate = rps_storage->estate; /* Nothing to do if not FDW */ if (fdw_routine == NULL) diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 93908d38..6ad88bf6 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -64,10 +64,10 @@ static uint64 PathmanCopyFrom(CopyState cstate, List *range_table, bool old_protocol); -static void prepare_rri_for_copy(EState *estate, - ResultRelInfoHolder *rri_holder, - const ResultPartsStorage *rps_storage, - void *arg); +static void prepare_rri_for_copy(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage); +static void finish_rri_copy(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage); /* @@ -105,20 +105,6 @@ is_pathman_related_copy(Node *parsetree) /* Check that relation is partitioned */ if (get_pathman_relation_info(parent_relid)) { - ListCell *lc; - - /* Analyze options list */ - foreach (lc, copy_stmt->options) - { - DefElem *defel = (DefElem *) lfirst(lc); - - Assert(IsA(defel, DefElem)); - - /* We do not support freeze */ - if (strcmp(defel->defname, "freeze") == 0) - elog(ERROR, "freeze is not supported for partitioned tables"); - } - /* Emit ERROR if we can't see the necessary symbols */ #ifdef DISABLE_PATHMAN_COPY elog(ERROR, "COPY is not supported for partitioned tables on Windows"); @@ -481,6 +467,10 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, uint64 processed = 0; + /* We do not support freeze */ + if (cstate->freeze) + elog(ERROR, "freeze is not supported for partitioned tables"); + tupDesc = RelationGetDescr(parent_rel); @@ -684,26 +674,8 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, ExecResetTupleTable(estate->es_tupleTable, false); - { - /* Shut down FDWs. TODO: make hook in fini_result_parts_storage? */ - HASH_SEQ_STATUS stat; - ResultRelInfoHolder *rri_holder; /* ResultRelInfo holder */ - - hash_seq_init(&stat, parts_storage.result_rels_table); - while ((rri_holder = (ResultRelInfoHolder *) hash_seq_search(&stat)) != NULL) - { - ResultRelInfo *resultRelInfo = rri_holder->result_rel_info; - - if (resultRelInfo->ri_FdwRoutine) - { - resultRelInfo->ri_FdwRoutine->EndForeignCopyFrom( - estate, resultRelInfo); - } - } - } - /* Close partitions and destroy hash table */ - fini_result_parts_storage(&parts_storage, true); + fini_result_parts_storage(&parts_storage, true, finish_rri_copy); /* Close parent's indices */ ExecCloseIndices(parent_result_rel); @@ -717,23 +689,46 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, * Init COPY FROM, if supported. */ static void -prepare_rri_for_copy(EState *estate, - ResultRelInfoHolder *rri_holder, - const ResultPartsStorage *rps_storage, - void *arg) +prepare_rri_for_copy(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage) { - ResultRelInfo *rri = rri_holder->result_rel_info; - FdwRoutine *fdw_routine = rri->ri_FdwRoutine; - CopyState cstate = (CopyState) arg; + ResultRelInfo *rri = rri_holder->result_rel_info; + FdwRoutine *fdw_routine = rri->ri_FdwRoutine; + CopyState cstate = (CopyState) rps_storage->callback_arg; + ResultRelInfo *parent_rri; + const char *parent_relname; + EState *estate; + + estate = rps_storage->estate; if (fdw_routine != NULL) { + parent_rri = rps_storage->saved_rel_info; + parent_relname = psprintf( + "%s.%s", "public", + quote_identifier(RelationGetRelationName(parent_rri->ri_RelationDesc))); if (!FdwCopyFromIsSupported(fdw_routine)) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("FDW adapter for relation \"%s\" doesn't support COPY FROM", RelationGetRelationName(rri->ri_RelationDesc)))); - rri->ri_FdwRoutine->BeginForeignCopyFrom(estate, rri, cstate); + fdw_routine->BeginForeignCopyFrom(estate, rri, cstate, parent_relname); + } +} + +/* + * Shut down FDWs. + */ +static void +finish_rri_copy(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage) +{ + ResultRelInfo *resultRelInfo = rri_holder->result_rel_info; + + if (resultRelInfo->ri_FdwRoutine) + { + resultRelInfo->ri_FdwRoutine->EndForeignCopyFrom( + rps_storage->estate, resultRelInfo); } } From 551f4f23dfbfe67dd04b77b823c2ba74dda72b66 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Fri, 1 Dec 2017 12:26:59 +0300 Subject: [PATCH 202/528] Do COPY FROM to foreign parts only when needed. That is, when 1) pg_pathman was compiled against postgres with shardman patches. 2) Shardman's COPY FROM was explicitly asked by setting renderzvous var. Also, check for 'freeze' option early, as before, to keep regression tests as they are. --- src/utility_stmt_hooking.c | 88 +++++++++++++++++++++++++++++--------- 1 file changed, 67 insertions(+), 21 deletions(-) diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 6ad88bf6..e8ddd3de 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -22,6 +22,7 @@ #include "access/xact.h" #include "catalog/namespace.h" #include "commands/copy.h" +#include "commands/defrem.h" #include "commands/trigger.h" #include "commands/tablecmds.h" #include "foreign/fdwapi.h" @@ -105,6 +106,26 @@ is_pathman_related_copy(Node *parsetree) /* Check that relation is partitioned */ if (get_pathman_relation_info(parent_relid)) { + ListCell *lc; + + /* Analyze options list */ + foreach (lc, copy_stmt->options) + { + DefElem *defel = lfirst_node(DefElem, lc); + + /* We do not support freeze */ + /* + * It would be great to allow copy.c extract option value and + * check it ready. However, there is no possibility (hooks) to do + * that before messaging 'ok, begin streaming data' to the client, + * which is ugly and confusing: e.g. it would require us to + * actually send something in regression tests before we notice + * the error. + */ + if (strcmp(defel->defname, "freeze") == 0 && defGetBoolean(defel)) + elog(ERROR, "freeze is not supported for partitioned tables"); + } + /* Emit ERROR if we can't see the necessary symbols */ #ifdef DISABLE_PATHMAN_COPY elog(ERROR, "COPY is not supported for partitioned tables on Windows"); @@ -467,11 +488,6 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, uint64 processed = 0; - /* We do not support freeze */ - if (cstate->freeze) - elog(ERROR, "freeze is not supported for partitioned tables"); - - tupDesc = RelationGetDescr(parent_rel); parent_result_rel = makeNode(ResultRelInfo); @@ -633,11 +649,13 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false, NULL, NIL); } +#ifdef PG_SHARDMAN else /* FDW table */ { child_result_rel->ri_FdwRoutine->ForeignNextCopyFrom( estate, child_result_rel, cstate); } +#endif /* AFTER ROW INSERT Triggers (FIXME: NULL transition) */ ExecARInsertTriggersCompat(estate, child_result_rel, tuple, @@ -694,25 +712,51 @@ prepare_rri_for_copy(ResultRelInfoHolder *rri_holder, { ResultRelInfo *rri = rri_holder->result_rel_info; FdwRoutine *fdw_routine = rri->ri_FdwRoutine; - CopyState cstate = (CopyState) rps_storage->callback_arg; - ResultRelInfo *parent_rri; - const char *parent_relname; - EState *estate; - - estate = rps_storage->estate; if (fdw_routine != NULL) { - parent_rri = rps_storage->saved_rel_info; - parent_relname = psprintf( - "%s.%s", "public", - quote_identifier(RelationGetRelationName(parent_rri->ri_RelationDesc))); - if (!FdwCopyFromIsSupported(fdw_routine)) - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("FDW adapter for relation \"%s\" doesn't support COPY FROM", - RelationGetRelationName(rri->ri_RelationDesc)))); - fdw_routine->BeginForeignCopyFrom(estate, rri, cstate, parent_relname); + /* + * If this Postgres has no idea about shardman, behave as usual: + * vanilla Postgres doesn't support COPY FROM to foreign partitions. + * However, shardman patches to core extend FDW API to allow it, + * though currently postgres_fdw does so in a bit perverted way: we + * redirect COPY FROM to parent table on foreign server, assuming it + * exists, and let it direct tuple to proper partition. This is + * because otherwise we have to modify logic of managing connections + * in postgres_fdw and keep many connections open to one server from + * one backend. + */ +#ifndef PG_SHARDMAN + goto bail_out; /* to avoid 'unused label' warning */ +#else + { /* separate block to avoid 'unused var' warnings */ + CopyState cstate = (CopyState) rps_storage->callback_arg; + ResultRelInfo *parent_rri; + const char *parent_relname; + EState *estate; + + /* shardman COPY FROM requested? */ + if (*find_rendezvous_variable( + "shardman_pathman_copy_from_rendezvous") == NULL) + goto bail_out; + + estate = rps_storage->estate; + parent_rri = rps_storage->saved_rel_info; + parent_relname = psprintf( + "%s.%s", "public", + quote_identifier(RelationGetRelationName(parent_rri->ri_RelationDesc))); + if (!FdwCopyFromIsSupported(fdw_routine)) + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("FDW adapter for relation \"%s\" doesn't support COPY FROM", + RelationGetRelationName(rri->ri_RelationDesc)))); + fdw_routine->BeginForeignCopyFrom(estate, rri, cstate, parent_relname); + return; + } +#endif +bail_out: + elog(ERROR, "cannot copy to foreign partition \"%s\"", + get_rel_name(RelationGetRelid(rri->ri_RelationDesc))); } } @@ -723,6 +767,7 @@ static void finish_rri_copy(ResultRelInfoHolder *rri_holder, const ResultPartsStorage *rps_storage) { +#ifdef PG_SHARDMAN ResultRelInfo *resultRelInfo = rri_holder->result_rel_info; if (resultRelInfo->ri_FdwRoutine) @@ -730,6 +775,7 @@ finish_rri_copy(ResultRelInfoHolder *rri_holder, resultRelInfo->ri_FdwRoutine->EndForeignCopyFrom( rps_storage->estate, resultRelInfo); } +#endif } /* From d7520bbf4d50e514a88b5925daf3989219ded480 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Fri, 1 Dec 2017 16:21:36 +0300 Subject: [PATCH 203/528] Code simpified and improved a bit. --- src/utility_stmt_hooking.c | 44 +++++++++++--------------------------- 1 file changed, 12 insertions(+), 32 deletions(-) diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index e8ddd3de..e64c1542 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -718,43 +718,23 @@ prepare_rri_for_copy(ResultRelInfoHolder *rri_holder, /* * If this Postgres has no idea about shardman, behave as usual: * vanilla Postgres doesn't support COPY FROM to foreign partitions. - * However, shardman patches to core extend FDW API to allow it, - * though currently postgres_fdw does so in a bit perverted way: we - * redirect COPY FROM to parent table on foreign server, assuming it - * exists, and let it direct tuple to proper partition. This is - * because otherwise we have to modify logic of managing connections - * in postgres_fdw and keep many connections open to one server from - * one backend. + * However, shardman patches to core extend FDW API to allow it. */ -#ifndef PG_SHARDMAN - goto bail_out; /* to avoid 'unused label' warning */ -#else - { /* separate block to avoid 'unused var' warnings */ +#ifdef PG_SHARDMAN + /* shardman COPY FROM requested? */ + if (*find_rendezvous_variable( + "shardman_pathman_copy_from_rendezvous") != NULL && + FdwCopyFromIsSupported(fdw_routine)) + { CopyState cstate = (CopyState) rps_storage->callback_arg; - ResultRelInfo *parent_rri; - const char *parent_relname; - EState *estate; - - /* shardman COPY FROM requested? */ - if (*find_rendezvous_variable( - "shardman_pathman_copy_from_rendezvous") == NULL) - goto bail_out; - - estate = rps_storage->estate; - parent_rri = rps_storage->saved_rel_info; - parent_relname = psprintf( - "%s.%s", "public", - quote_identifier(RelationGetRelationName(parent_rri->ri_RelationDesc))); - if (!FdwCopyFromIsSupported(fdw_routine)) - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("FDW adapter for relation \"%s\" doesn't support COPY FROM", - RelationGetRelationName(rri->ri_RelationDesc)))); - fdw_routine->BeginForeignCopyFrom(estate, rri, cstate, parent_relname); + ResultRelInfo *parent_rri = rps_storage->saved_rel_info; + EState *estate = rps_storage->estate; + + fdw_routine->BeginForeignCopyFrom(estate, rri, cstate, parent_rri); return; } #endif -bail_out: + elog(ERROR, "cannot copy to foreign partition \"%s\"", get_rel_name(RelationGetRelid(rri->ri_RelationDesc))); } From 6a1ad9a48a4e5dbfc7ddf5d99d8236c3316182c9 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 6 Dec 2017 15:40:13 +0300 Subject: [PATCH 204/528] improve support for ONLY in select, insert, delete etc --- Makefile | 2 +- expected/pathman_only.out | 29 ++- expected/pathman_only_1.out | 247 -------------------- src/compat/relation_tags.c | 251 --------------------- src/hooks.c | 20 +- src/include/compat/debug_compat_features.h | 1 - src/include/compat/relation_tags.h | 78 ------- src/include/planner_tree_modification.h | 12 +- src/planner_tree_modification.c | 93 ++++---- 9 files changed, 93 insertions(+), 640 deletions(-) delete mode 100644 expected/pathman_only_1.out delete mode 100644 src/compat/relation_tags.c delete mode 100644 src/include/compat/relation_tags.h diff --git a/Makefile b/Makefile index 9e036208..79f674ec 100644 --- a/Makefile +++ b/Makefile @@ -7,7 +7,7 @@ OBJS = src/init.o src/relation_info.o src/utils.o src/partition_filter.o \ src/pl_funcs.o src/pl_range_funcs.o src/pl_hash_funcs.o src/pathman_workers.o \ src/hooks.o src/nodes_common.o src/xact_handling.o src/utility_stmt_hooking.o \ src/planner_tree_modification.o src/debug_print.o src/partition_creation.o \ - src/compat/pg_compat.o src/compat/relation_tags.o src/compat/rowmarks_fix.o \ + src/compat/pg_compat.o src/compat/rowmarks_fix.o \ $(WIN32RES) ifdef USE_PGXS diff --git a/expected/pathman_only.out b/expected/pathman_only.out index f90dc56e..28471cf3 100644 --- a/expected/pathman_only.out +++ b/expected/pathman_only.out @@ -137,7 +137,34 @@ UNION SELECT * FROM test_only.from_only_test; EXPLAIN (COSTS OFF) SELECT * FROM test_only.from_only_test a JOIN ONLY test_only.from_only_test b USING(val); -ERROR: it is prohibited to apply ONLY modifier to partitioned tables which have already been mentioned without ONLY + QUERY PLAN +--------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test b + -> Custom Scan (RuntimeAppend) + Prune by: (b.val = a.val) + -> Seq Scan on from_only_test_1 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_2 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_3 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_4 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_5 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_6 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_7 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_8 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_9 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_10 a + Filter: (b.val = val) +(24 rows) + /* should be OK */ EXPLAIN (COSTS OFF) WITH q1 AS (SELECT * FROM test_only.from_only_test), diff --git a/expected/pathman_only_1.out b/expected/pathman_only_1.out deleted file mode 100644 index 77fc0dc5..00000000 --- a/expected/pathman_only_1.out +++ /dev/null @@ -1,247 +0,0 @@ -/* - * --------------------------------------------- - * NOTE: This test behaves differenly on PgPro - * --------------------------------------------- - */ -\set VERBOSITY terse -SET search_path = 'public'; -CREATE EXTENSION pg_pathman; -CREATE SCHEMA test_only; -/* Test special case: ONLY statement with not-ONLY for partitioned table */ -CREATE TABLE test_only.from_only_test(val INT NOT NULL); -INSERT INTO test_only.from_only_test SELECT generate_series(1, 20); -SELECT create_range_partitions('test_only.from_only_test', 'val', 1, 2); -NOTICE: sequence "from_only_test_seq" does not exist, skipping - create_range_partitions -------------------------- - 10 -(1 row) - -VACUUM ANALYZE; -/* should be OK */ -EXPLAIN (COSTS OFF) -SELECT * FROM ONLY test_only.from_only_test -UNION SELECT * FROM test_only.from_only_test; - QUERY PLAN -------------------------------------------------- - HashAggregate - Group Key: from_only_test.val - -> Append - -> Seq Scan on from_only_test - -> Append - -> Seq Scan on from_only_test_1 - -> Seq Scan on from_only_test_2 - -> Seq Scan on from_only_test_3 - -> Seq Scan on from_only_test_4 - -> Seq Scan on from_only_test_5 - -> Seq Scan on from_only_test_6 - -> Seq Scan on from_only_test_7 - -> Seq Scan on from_only_test_8 - -> Seq Scan on from_only_test_9 - -> Seq Scan on from_only_test_10 -(15 rows) - -/* should be OK */ -EXPLAIN (COSTS OFF) -SELECT * FROM test_only.from_only_test -UNION SELECT * FROM ONLY test_only.from_only_test; - QUERY PLAN -------------------------------------------------- - HashAggregate - Group Key: from_only_test_1.val - -> Append - -> Append - -> Seq Scan on from_only_test_1 - -> Seq Scan on from_only_test_2 - -> Seq Scan on from_only_test_3 - -> Seq Scan on from_only_test_4 - -> Seq Scan on from_only_test_5 - -> Seq Scan on from_only_test_6 - -> Seq Scan on from_only_test_7 - -> Seq Scan on from_only_test_8 - -> Seq Scan on from_only_test_9 - -> Seq Scan on from_only_test_10 - -> Seq Scan on from_only_test -(15 rows) - -/* should be OK */ -EXPLAIN (COSTS OFF) -SELECT * FROM test_only.from_only_test -UNION SELECT * FROM test_only.from_only_test -UNION SELECT * FROM ONLY test_only.from_only_test; - QUERY PLAN ---------------------------------------------------------------------- - HashAggregate - Group Key: from_only_test_1.val - -> Append - -> Append - -> Seq Scan on from_only_test_1 - -> Seq Scan on from_only_test_2 - -> Seq Scan on from_only_test_3 - -> Seq Scan on from_only_test_4 - -> Seq Scan on from_only_test_5 - -> Seq Scan on from_only_test_6 - -> Seq Scan on from_only_test_7 - -> Seq Scan on from_only_test_8 - -> Seq Scan on from_only_test_9 - -> Seq Scan on from_only_test_10 - -> Append - -> Seq Scan on from_only_test_1 from_only_test_1_1 - -> Seq Scan on from_only_test_2 from_only_test_2_1 - -> Seq Scan on from_only_test_3 from_only_test_3_1 - -> Seq Scan on from_only_test_4 from_only_test_4_1 - -> Seq Scan on from_only_test_5 from_only_test_5_1 - -> Seq Scan on from_only_test_6 from_only_test_6_1 - -> Seq Scan on from_only_test_7 from_only_test_7_1 - -> Seq Scan on from_only_test_8 from_only_test_8_1 - -> Seq Scan on from_only_test_9 from_only_test_9_1 - -> Seq Scan on from_only_test_10 from_only_test_10_1 - -> Seq Scan on from_only_test -(26 rows) - -/* should be OK */ -EXPLAIN (COSTS OFF) -SELECT * FROM ONLY test_only.from_only_test -UNION SELECT * FROM test_only.from_only_test -UNION SELECT * FROM test_only.from_only_test; - QUERY PLAN ---------------------------------------------------------------------- - HashAggregate - Group Key: from_only_test.val - -> Append - -> Seq Scan on from_only_test - -> Append - -> Seq Scan on from_only_test_1 - -> Seq Scan on from_only_test_2 - -> Seq Scan on from_only_test_3 - -> Seq Scan on from_only_test_4 - -> Seq Scan on from_only_test_5 - -> Seq Scan on from_only_test_6 - -> Seq Scan on from_only_test_7 - -> Seq Scan on from_only_test_8 - -> Seq Scan on from_only_test_9 - -> Seq Scan on from_only_test_10 - -> Append - -> Seq Scan on from_only_test_1 from_only_test_1_1 - -> Seq Scan on from_only_test_2 from_only_test_2_1 - -> Seq Scan on from_only_test_3 from_only_test_3_1 - -> Seq Scan on from_only_test_4 from_only_test_4_1 - -> Seq Scan on from_only_test_5 from_only_test_5_1 - -> Seq Scan on from_only_test_6 from_only_test_6_1 - -> Seq Scan on from_only_test_7 from_only_test_7_1 - -> Seq Scan on from_only_test_8 from_only_test_8_1 - -> Seq Scan on from_only_test_9 from_only_test_9_1 - -> Seq Scan on from_only_test_10 from_only_test_10_1 -(26 rows) - -/* not ok, ONLY|non-ONLY in one query (this is not the case for PgPro) */ -EXPLAIN (COSTS OFF) -SELECT * FROM test_only.from_only_test a -JOIN ONLY test_only.from_only_test b USING(val); - QUERY PLAN ---------------------------------------------- - Nested Loop - -> Seq Scan on from_only_test b - -> Custom Scan (RuntimeAppend) - -> Seq Scan on from_only_test_1 a - -> Seq Scan on from_only_test_2 a - -> Seq Scan on from_only_test_3 a - -> Seq Scan on from_only_test_4 a - -> Seq Scan on from_only_test_5 a - -> Seq Scan on from_only_test_6 a - -> Seq Scan on from_only_test_7 a - -> Seq Scan on from_only_test_8 a - -> Seq Scan on from_only_test_9 a - -> Seq Scan on from_only_test_10 a -(13 rows) - -/* should be OK */ -EXPLAIN (COSTS OFF) -WITH q1 AS (SELECT * FROM test_only.from_only_test), - q2 AS (SELECT * FROM ONLY test_only.from_only_test) -SELECT * FROM q1 JOIN q2 USING(val); - QUERY PLAN ---------------------------------------------- - Hash Join - Hash Cond: (q1.val = q2.val) - CTE q1 - -> Append - -> Seq Scan on from_only_test_1 - -> Seq Scan on from_only_test_2 - -> Seq Scan on from_only_test_3 - -> Seq Scan on from_only_test_4 - -> Seq Scan on from_only_test_5 - -> Seq Scan on from_only_test_6 - -> Seq Scan on from_only_test_7 - -> Seq Scan on from_only_test_8 - -> Seq Scan on from_only_test_9 - -> Seq Scan on from_only_test_10 - CTE q2 - -> Seq Scan on from_only_test - -> CTE Scan on q1 - -> Hash - -> CTE Scan on q2 -(19 rows) - -/* should be OK */ -EXPLAIN (COSTS OFF) -WITH q1 AS (SELECT * FROM ONLY test_only.from_only_test) -SELECT * FROM test_only.from_only_test JOIN q1 USING(val); - QUERY PLAN ----------------------------------------------------------- - Nested Loop - CTE q1 - -> Seq Scan on from_only_test from_only_test_1 - -> CTE Scan on q1 - -> Custom Scan (RuntimeAppend) - -> Seq Scan on from_only_test_1 from_only_test - -> Seq Scan on from_only_test_2 from_only_test - -> Seq Scan on from_only_test_3 from_only_test - -> Seq Scan on from_only_test_4 from_only_test - -> Seq Scan on from_only_test_5 from_only_test - -> Seq Scan on from_only_test_6 from_only_test - -> Seq Scan on from_only_test_7 from_only_test - -> Seq Scan on from_only_test_8 from_only_test - -> Seq Scan on from_only_test_9 from_only_test - -> Seq Scan on from_only_test_10 from_only_test -(15 rows) - -/* should be OK */ -EXPLAIN (COSTS OFF) -SELECT * FROM test_only.from_only_test -WHERE val = (SELECT val FROM ONLY test_only.from_only_test - ORDER BY val ASC - LIMIT 1); - QUERY PLAN ------------------------------------------------------------------ - Custom Scan (RuntimeAppend) - InitPlan 1 (returns $0) - -> Limit - -> Sort - Sort Key: from_only_test_1.val - -> Seq Scan on from_only_test from_only_test_1 - -> Seq Scan on from_only_test_1 from_only_test - Filter: (val = $0) - -> Seq Scan on from_only_test_2 from_only_test - Filter: (val = $0) - -> Seq Scan on from_only_test_3 from_only_test - Filter: (val = $0) - -> Seq Scan on from_only_test_4 from_only_test - Filter: (val = $0) - -> Seq Scan on from_only_test_5 from_only_test - Filter: (val = $0) - -> Seq Scan on from_only_test_6 from_only_test - Filter: (val = $0) - -> Seq Scan on from_only_test_7 from_only_test - Filter: (val = $0) - -> Seq Scan on from_only_test_8 from_only_test - Filter: (val = $0) - -> Seq Scan on from_only_test_9 from_only_test - Filter: (val = $0) - -> Seq Scan on from_only_test_10 from_only_test - Filter: (val = $0) -(26 rows) - -DROP SCHEMA test_only CASCADE; -NOTICE: drop cascades to 12 other objects -DROP EXTENSION pg_pathman; diff --git a/src/compat/relation_tags.c b/src/compat/relation_tags.c deleted file mode 100644 index 383dd1f5..00000000 --- a/src/compat/relation_tags.c +++ /dev/null @@ -1,251 +0,0 @@ -/* ------------------------------------------------------------------------ - * - * relation_tags.c - * Attach custom (Key, Value) pairs to an arbitrary RangeTblEntry - * NOTE: implementations for vanilla and PostgresPro differ - * - * Copyright (c) 2017, Postgres Professional - * - * ------------------------------------------------------------------------ - */ - -#include "compat/relation_tags.h" -#include "planner_tree_modification.h" - -#include "nodes/nodes.h" - - -#ifndef NATIVE_RELATION_TAGS - -/* - * This table is used to ensure that partitioned relation - * cant't be referenced as ONLY and non-ONLY at the same time. - */ -static HTAB *per_table_relation_tags = NULL; - -/* - * Single row of 'per_table_relation_tags'. - * NOTE: do not reorder these fields. - */ -typedef struct -{ - Oid relid; /* key (part #1) */ - uint32 queryId; /* key (part #2) */ - List *relation_tags; -} relation_tags_entry; - -#endif - -/* Also used in get_refcount_relation_tags() etc... */ -static int per_table_relation_tags_refcount = 0; - - - -/* Look through RTE's relation tags */ -List * -rte_fetch_tag(const uint32 query_id, - const RangeTblEntry *rte, - const char *key) -{ -#ifdef NATIVE_RELATION_TAGS - - return relation_tags_search(rte->custom_tags, key); - -#else - - relation_tags_entry *htab_entry, - htab_key = { rte->relid, query_id, NIL /* unused */ }; - - /* Skip if table is not initialized */ - if (per_table_relation_tags) - { - /* Search by 'htab_key' */ - htab_entry = hash_search(per_table_relation_tags, - &htab_key, HASH_FIND, NULL); - - if (htab_entry) - return relation_tags_search(htab_entry->relation_tags, key); - } - - /* Not found, return stub value */ - return NIL; - -#endif -} - -/* Attach new relation tag to RTE. Returns KVP with duplicate key. */ -List * -rte_attach_tag(const uint32 query_id, - RangeTblEntry *rte, - List *key_value_pair) -{ - /* Common variables */ - MemoryContext old_mcxt; - const char *current_key; - List *existing_kvp, - *temp_tags; /* rte->custom_tags OR - htab_entry->relation_tags */ - -#ifdef NATIVE_RELATION_TAGS - - /* Load relation tags to 'temp_tags' */ - temp_tags = rte->custom_tags; - -#else - - relation_tags_entry *htab_entry, - htab_key = { rte->relid, query_id, NIL /* unused */ }; - bool found; - - /* We prefer to initialize this table lazily */ - if (!per_table_relation_tags) - { - const long start_elems = 50; - HASHCTL hashctl; - - memset(&hashctl, 0, sizeof(HASHCTL)); - hashctl.entrysize = sizeof(relation_tags_entry); - hashctl.keysize = offsetof(relation_tags_entry, relation_tags); - hashctl.hcxt = RELATION_TAG_MCXT; - - per_table_relation_tags = hash_create("Custom tags for RangeTblEntry", - start_elems, &hashctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); - } - - /* Search by 'htab_key' */ - htab_entry = hash_search(per_table_relation_tags, - &htab_key, HASH_ENTER, &found); - - /* Don't forget to initialize list! */ - if (!found) - htab_entry->relation_tags = NIL; - - /* Load relation tags to 'temp_tags' */ - temp_tags = htab_entry->relation_tags; - -#endif - - /* Check that 'key_value_pair' is valid */ - AssertArg(key_value_pair && list_length(key_value_pair) == 2); - - /* Extract key of this KVP */ - rte_deconstruct_tag(key_value_pair, ¤t_key, NULL); - - /* Check if KVP with such key already exists */ - existing_kvp = relation_tags_search(temp_tags, current_key); - if (existing_kvp) - return existing_kvp; /* return KVP with duplicate key */ - - /* Add this KVP to relation tags list */ - old_mcxt = MemoryContextSwitchTo(RELATION_TAG_MCXT); - temp_tags = lappend(temp_tags, key_value_pair); - MemoryContextSwitchTo(old_mcxt); - -/* Finally store 'temp_tags' to relation tags list */ -#ifdef NATIVE_RELATION_TAGS - rte->custom_tags = temp_tags; -#else - htab_entry->relation_tags = temp_tags; -#endif - - /* Success! */ - return NIL; -} - - - -/* Extract key & value from 'key_value_pair' */ -void -rte_deconstruct_tag(const List *key_value_pair, - const char **key, /* ret value #1 */ - const Value **value) /* ret value #2 */ -{ - const char *r_key; - const Value *r_value; - - AssertArg(key_value_pair && list_length(key_value_pair) == 2); - - r_key = (const char *) strVal(linitial(key_value_pair)); - r_value = (const Value *) lsecond(key_value_pair); - - /* Check that 'key' is valid */ - Assert(IsA(linitial(key_value_pair), String)); - - /* Check that 'value' is valid or NULL */ - Assert(r_value == NULL || - IsA(r_value, Integer) || - IsA(r_value, Float) || - IsA(r_value, String)); - - /* Finally return key & value */ - if (key) *key = r_key; - if (value) *value = r_value; -} - -/* Search through list of 'relation_tags' */ -List * -relation_tags_search(List *relation_tags, const char *key) -{ - ListCell *lc; - - AssertArg(key); - - /* Scan KVP list */ - foreach (lc, relation_tags) - { - List *current_kvp = (List *) lfirst(lc); - const char *current_key; - - /* Extract key of this KVP */ - rte_deconstruct_tag(current_kvp, ¤t_key, NULL); - - /* Check if this is the KVP we're looking for */ - if (strcmp(key, current_key) == 0) - return current_kvp; - } - - /* Nothing! */ - return NIL; -} - - - -/* Increate usage counter by 1 */ -void -incr_refcount_relation_tags(void) -{ - /* Increment reference counter */ - if (++per_table_relation_tags_refcount <= 0) - elog(WARNING, "imbalanced %s", - CppAsString(incr_refcount_relation_tags)); -} - -/* Return current value of usage counter */ -uint32 -get_refcount_relation_tags(void) -{ - /* incr_refcount_parenthood_statuses() is called by pathman_planner_hook() */ - return per_table_relation_tags_refcount; -} - -/* Reset all cached statuses if needed (query end) */ -void -decr_refcount_relation_tags(void) -{ - /* Decrement reference counter */ - if (--per_table_relation_tags_refcount < 0) - elog(WARNING, "imbalanced %s", - CppAsString(decr_refcount_relation_tags)); - - /* Free resources if no one is using them */ - if (per_table_relation_tags_refcount == 0) - { - reset_query_id_generator(); - -#ifndef NATIVE_RELATION_TAGS - hash_destroy(per_table_relation_tags); - per_table_relation_tags = NULL; -#endif - } -} diff --git a/src/hooks.c b/src/hooks.c index 3503f857..ebd35b61 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -11,7 +11,6 @@ */ #include "compat/pg_compat.h" -#include "compat/relation_tags.h" #include "compat/rowmarks_fix.h" #include "hooks.h" @@ -153,8 +152,7 @@ pathman_join_pathlist_hook(PlannerInfo *root, } /* Skip if inner table is not allowed to act as parent (e.g. FROM ONLY) */ - if (PARENTHOOD_DISALLOWED == get_rel_parenthood_status(root->parse->queryId, - inner_rte)) + if (PARENTHOOD_DISALLOWED == get_rel_parenthood_status(inner_rte)) return; /* @@ -340,7 +338,7 @@ pathman_rel_pathlist_hook(PlannerInfo *root, #endif /* Skip if this table is not allowed to act as parent (e.g. FROM ONLY) */ - if (PARENTHOOD_DISALLOWED == get_rel_parenthood_status(root->parse->queryId, rte)) + if (PARENTHOOD_DISALLOWED == get_rel_parenthood_status(rte)) return; /* Proceed iff relation 'rel' is partitioned */ @@ -626,8 +624,8 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) { if (pathman_ready) { - /* Increment relation tags refcount */ - incr_refcount_relation_tags(); + /* Increase planner() calls count */ + incr_planner_calls_count(); /* Modify query tree if needed */ pathman_transform_query(parse, boundParams); @@ -644,8 +642,8 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) /* Add PartitionFilter node for INSERT queries */ ExecuteForPlanTree(result, add_partition_filters); - /* Decrement relation tags refcount */ - decr_refcount_relation_tags(); + /* Decrement planner() calls count */ + decr_planner_calls_count(); /* HACK: restore queryId set by pg_stat_statements */ result->queryId = query_id; @@ -656,8 +654,8 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) { if (pathman_ready) { - /* Caught an ERROR, decrease refcount */ - decr_refcount_relation_tags(); + /* Caught an ERROR, decrease count */ + decr_planner_calls_count(); } /* Rethrow ERROR further */ @@ -735,7 +733,7 @@ pathman_post_parse_analysis_hook(ParseState *pstate, Query *query) } /* Process inlined SQL functions (we've already entered planning stage) */ - if (IsPathmanReady() && get_refcount_relation_tags() > 0) + if (IsPathmanReady() && get_planner_calls_count() > 0) { /* Check that pg_pathman is the last extension loaded */ if (post_parse_analyze_hook != pathman_post_parse_analysis_hook) diff --git a/src/include/compat/debug_compat_features.h b/src/include/compat/debug_compat_features.h index 8caa6d44..09f12849 100644 --- a/src/include/compat/debug_compat_features.h +++ b/src/include/compat/debug_compat_features.h @@ -12,5 +12,4 @@ #define ENABLE_PGPRO_PATCHES /* PgPro exclusive features */ -//#define ENABLE_RELATION_TAGS #define ENABLE_PATHMAN_AWARE_COPY_WIN32 diff --git a/src/include/compat/relation_tags.h b/src/include/compat/relation_tags.h deleted file mode 100644 index d5183d32..00000000 --- a/src/include/compat/relation_tags.h +++ /dev/null @@ -1,78 +0,0 @@ -/* ------------------------------------------------------------------------ - * - * relation_tags.h - * Attach custom (Key, Value) pairs to an arbitrary RangeTblEntry - * - * NOTE: implementations for vanilla and PostgresPro differ, - * which means that subquery pull-up might break the bond - * between a RangeTblEntry and the corresponding KVPs. - * - * This subsystem was meant to replace the broken 'inh' flag - * (see get_rel_parenthood_status() for more details). - * - * Copyright (c) 2017, Postgres Professional - * - * ------------------------------------------------------------------------ - */ - -#ifndef RELATION_TAGS_H -#define RELATION_TAGS_H - -#include "compat/debug_compat_features.h" - -#include "postgres.h" -#include "nodes/relation.h" -#include "nodes/value.h" -#include "utils/memutils.h" - - -/* Does RTE contain 'custom_tags' list? */ -/* TODO: fix this definition once PgPro contains 'relation_tags' patch */ -#if defined(ENABLE_PGPRO_PATCHES) && \ - defined(ENABLE_RELATION_TAGS) /* && ... */ -#define NATIVE_RELATION_TAGS -#endif - -/* Memory context we're going to use for tags */ -#define RELATION_TAG_MCXT TopTransactionContext - - -/* Safe TAG constructor (Integer) */ -static inline List * -make_rte_tag_int(char *key, int value) -{ - List *kvp; - MemoryContext old_mcxt; - - /* Allocate TAG in a persistent memory context */ - old_mcxt = MemoryContextSwitchTo(RELATION_TAG_MCXT); - kvp = list_make2(makeString(key), makeInteger(value)); - MemoryContextSwitchTo(old_mcxt); - - return kvp; -} - - -List *rte_fetch_tag(const uint32 query_id, - const RangeTblEntry *rte, - const char *key); - -List *rte_attach_tag(const uint32 query_id, - RangeTblEntry *rte, - List *key_value_pair); - - -List *relation_tags_search(List *custom_tags, - const char *key); - -void rte_deconstruct_tag(const List *key_value_pair, - const char **key, - const Value **value); - - -void incr_refcount_relation_tags(void); -uint32 get_refcount_relation_tags(void); -void decr_refcount_relation_tags(void); - - -#endif /* RELATION_TAGS_H */ diff --git a/src/include/planner_tree_modification.h b/src/include/planner_tree_modification.h index eee1ea76..b56b6734 100644 --- a/src/include/planner_tree_modification.h +++ b/src/include/planner_tree_modification.h @@ -44,12 +44,16 @@ typedef enum PARENTHOOD_ALLOWED /* children are enabled (default) */ } rel_parenthood_status; -void assign_rel_parenthood_status(uint32 query_id, - RangeTblEntry *rte, +void assign_rel_parenthood_status(RangeTblEntry *rte, rel_parenthood_status new_status); -rel_parenthood_status get_rel_parenthood_status(uint32 query_id, - RangeTblEntry *rte); +rel_parenthood_status get_rel_parenthood_status(RangeTblEntry *rte); + + +/* used to determine nested planner() calls */ +void incr_planner_calls_count(void); +void decr_planner_calls_count(void); +int32 get_planner_calls_count(void); #endif /* PLANNER_TREE_MODIFICATION_H */ diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 77540d95..9c449e00 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -10,7 +10,6 @@ * ------------------------------------------------------------------------ */ -#include "compat/relation_tags.h" #include "compat/rowmarks_fix.h" #include "partition_filter.h" @@ -103,8 +102,6 @@ static void handle_modification_query(Query *parse, transform_query_cxt *context static void partition_filter_visitor(Plan *plan, void *context); -static rel_parenthood_status tag_extract_parenthood_status(List *relation_tag); - static Node *eval_extern_params_mutator(Node *node, ParamListInfo params); @@ -353,13 +350,11 @@ disable_standard_inheritance(Query *parse, transform_query_cxt *context) rte->inh = false; /* Try marking it using PARENTHOOD_ALLOWED */ - assign_rel_parenthood_status(parse->queryId, rte, - PARENTHOOD_ALLOWED); + assign_rel_parenthood_status(rte, PARENTHOOD_ALLOWED); } } /* Else try marking it using PARENTHOOD_DISALLOWED */ - else assign_rel_parenthood_status(parse->queryId, rte, - PARENTHOOD_DISALLOWED); + else assign_rel_parenthood_status(rte, PARENTHOOD_DISALLOWED); } } @@ -567,59 +562,34 @@ partition_filter_visitor(Plan *plan, void *context) * ----------------------------------------------- */ +#define RPS_STATUS_ASSIGNED ( (uint32) (1 << 31) ) +#define RPS_ENABLE_PARENT ( (uint32) (1 << 30) ) + /* Set parenthood status (per query level) */ void -assign_rel_parenthood_status(uint32 query_id, - RangeTblEntry *rte, +assign_rel_parenthood_status(RangeTblEntry *rte, rel_parenthood_status new_status) { - - List *old_relation_tag; - - old_relation_tag = rte_attach_tag(query_id, rte, - make_rte_tag_int(PARENTHOOD_TAG, - new_status)); - - /* We already have a PARENTHOOD_TAG, examine it's value */ - if (old_relation_tag && - tag_extract_parenthood_status(old_relation_tag) != new_status) - { - elog(ERROR, - "it is prohibited to apply ONLY modifier to partitioned " - "tables which have already been mentioned without ONLY"); - } + /* HACK: set relevant bits in RTE */ + rte->requiredPerms |= RPS_STATUS_ASSIGNED; + if (new_status == PARENTHOOD_ALLOWED) + rte->requiredPerms |= RPS_ENABLE_PARENT; } /* Get parenthood status (per query level) */ rel_parenthood_status -get_rel_parenthood_status(uint32 query_id, RangeTblEntry *rte) +get_rel_parenthood_status(RangeTblEntry *rte) { - List *relation_tag; - - relation_tag = rte_fetch_tag(query_id, rte, PARENTHOOD_TAG); - if (relation_tag) - return tag_extract_parenthood_status(relation_tag); + /* HACK: check relevant bits in RTE */ + if (rte->requiredPerms & RPS_STATUS_ASSIGNED) + return (rte->requiredPerms & RPS_ENABLE_PARENT) ? + PARENTHOOD_ALLOWED : + PARENTHOOD_DISALLOWED; /* Not found, return stub value */ return PARENTHOOD_NOT_SET; } -static rel_parenthood_status -tag_extract_parenthood_status(List *relation_tag) -{ - const Value *value; - rel_parenthood_status status; - - rte_deconstruct_tag(relation_tag, NULL, &value); - Assert(value && IsA(value, Integer)); - - status = (rel_parenthood_status) intVal(value); - Assert(status >= PARENTHOOD_NOT_SET && - status <= PARENTHOOD_ALLOWED); - - return status; -} - /* Replace extern param nodes with consts */ static Node * @@ -678,3 +648,34 @@ eval_extern_params_mutator(Node *node, ParamListInfo params) return expression_tree_mutator(node, eval_extern_params_mutator, (void *) params); } + + +/* + * ----------------------------------------------- + * Count number of times we've visited planner() + * ----------------------------------------------- + */ + +static int32 planner_calls = 0; + +void +incr_planner_calls_count(void) +{ + Assert(planner_calls < INT32_MAX); + + planner_calls++; +} + +void +decr_planner_calls_count(void) +{ + Assert(planner_calls > 0); + + planner_calls--; +} + +int32 +get_planner_calls_count(void) +{ + return planner_calls; +} From feb446317cfb29b028325e75faf221f40c1c7a50 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 6 Dec 2017 16:41:03 +0300 Subject: [PATCH 205/528] attempt to fix issue #134 --- src/planner_tree_modification.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 9c449e00..3f504217 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -562,27 +562,31 @@ partition_filter_visitor(Plan *plan, void *context) * ----------------------------------------------- */ -#define RPS_STATUS_ASSIGNED ( (uint32) (1 << 31) ) -#define RPS_ENABLE_PARENT ( (uint32) (1 << 30) ) +#define RPS_STATUS_ASSIGNED ( (Index) 0x2 ) +#define RPS_ENABLE_PARENT ( (Index) 0x1 ) /* Set parenthood status (per query level) */ void assign_rel_parenthood_status(RangeTblEntry *rte, rel_parenthood_status new_status) -{ +{ + Assert(rte->rtekind != RTE_CTE); + /* HACK: set relevant bits in RTE */ - rte->requiredPerms |= RPS_STATUS_ASSIGNED; + rte->ctelevelsup |= RPS_STATUS_ASSIGNED; if (new_status == PARENTHOOD_ALLOWED) - rte->requiredPerms |= RPS_ENABLE_PARENT; + rte->ctelevelsup |= RPS_ENABLE_PARENT; } /* Get parenthood status (per query level) */ rel_parenthood_status get_rel_parenthood_status(RangeTblEntry *rte) { + Assert(rte->rtekind != RTE_CTE); + /* HACK: check relevant bits in RTE */ - if (rte->requiredPerms & RPS_STATUS_ASSIGNED) - return (rte->requiredPerms & RPS_ENABLE_PARENT) ? + if (rte->ctelevelsup & RPS_STATUS_ASSIGNED) + return (rte->ctelevelsup & RPS_ENABLE_PARENT) ? PARENTHOOD_ALLOWED : PARENTHOOD_DISALLOWED; From 12c86afdc0a26cfcf4668583ef6fb38944bc1a81 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 6 Dec 2017 18:45:56 +0300 Subject: [PATCH 206/528] add a few more tests regarding #134 --- expected/pathman_inserts.out | 30 ++++++++++++++++++++++++++++++ expected/pathman_inserts_1.out | 30 ++++++++++++++++++++++++++++++ sql/pathman_inserts.sql | 19 +++++++++++++++++++ 3 files changed, 79 insertions(+) diff --git a/expected/pathman_inserts.out b/expected/pathman_inserts.out index c3a8566f..d1dbf005 100644 --- a/expected/pathman_inserts.out +++ b/expected/pathman_inserts.out @@ -1032,6 +1032,36 @@ INSERT INTO test_inserts.test_gap VALUES(15); /* not ok */ ERROR: cannot spawn a partition DROP TABLE test_inserts.test_gap CASCADE; NOTICE: drop cascades to 3 other objects +/* test a few "special" ONLY queries used in pg_repack */ +CREATE TABLE test_inserts.test_special_only(val INT NOT NULL); +INSERT INTO test_inserts.test_special_only SELECT generate_series(1, 30); +SELECT create_hash_partitions('test_inserts.test_special_only', 'val', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +/* create table as select only */ +CREATE TABLE test_inserts.special_1 AS SELECT * FROM ONLY test_inserts.test_special_only; +SELECT count(*) FROM test_inserts.special_1; + count +------- + 0 +(1 row) + +DROP TABLE test_inserts.special_1; +/* insert into ... select only */ +CREATE TABLE test_inserts.special_2 AS SELECT * FROM ONLY test_inserts.test_special_only WITH NO DATA; +INSERT INTO test_inserts.special_2 SELECT * FROM ONLY test_inserts.test_special_only; +SELECT count(*) FROM test_inserts.special_2; + count +------- + 0 +(1 row) + +DROP TABLE test_inserts.special_2; +DROP TABLE test_inserts.test_special_only CASCADE; +NOTICE: drop cascades to 4 other objects DROP SCHEMA test_inserts CASCADE; NOTICE: drop cascades to 19 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_inserts_1.out b/expected/pathman_inserts_1.out index 9f8633ab..8029a0a7 100644 --- a/expected/pathman_inserts_1.out +++ b/expected/pathman_inserts_1.out @@ -1032,6 +1032,36 @@ INSERT INTO test_inserts.test_gap VALUES(15); /* not ok */ ERROR: cannot spawn a partition DROP TABLE test_inserts.test_gap CASCADE; NOTICE: drop cascades to 3 other objects +/* test a few "special" ONLY queries used in pg_repack */ +CREATE TABLE test_inserts.test_special_only(val INT NOT NULL); +INSERT INTO test_inserts.test_special_only SELECT generate_series(1, 30); +SELECT create_hash_partitions('test_inserts.test_special_only', 'val', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +/* create table as select only */ +CREATE TABLE test_inserts.special_1 AS SELECT * FROM ONLY test_inserts.test_special_only; +SELECT count(*) FROM test_inserts.special_1; + count +------- + 0 +(1 row) + +DROP TABLE test_inserts.special_1; +/* insert into ... select only */ +CREATE TABLE test_inserts.special_2 AS SELECT * FROM ONLY test_inserts.test_special_only WITH NO DATA; +INSERT INTO test_inserts.special_2 SELECT * FROM ONLY test_inserts.test_special_only; +SELECT count(*) FROM test_inserts.special_2; + count +------- + 0 +(1 row) + +DROP TABLE test_inserts.special_2; +DROP TABLE test_inserts.test_special_only CASCADE; +NOTICE: drop cascades to 4 other objects DROP SCHEMA test_inserts CASCADE; NOTICE: drop cascades to 19 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_inserts.sql b/sql/pathman_inserts.sql index 7653a3e6..0f4859c4 100644 --- a/sql/pathman_inserts.sql +++ b/sql/pathman_inserts.sql @@ -199,5 +199,24 @@ INSERT INTO test_inserts.test_gap VALUES(15); /* not ok */ DROP TABLE test_inserts.test_gap CASCADE; +/* test a few "special" ONLY queries used in pg_repack */ +CREATE TABLE test_inserts.test_special_only(val INT NOT NULL); +INSERT INTO test_inserts.test_special_only SELECT generate_series(1, 30); +SELECT create_hash_partitions('test_inserts.test_special_only', 'val', 4); + +/* create table as select only */ +CREATE TABLE test_inserts.special_1 AS SELECT * FROM ONLY test_inserts.test_special_only; +SELECT count(*) FROM test_inserts.special_1; +DROP TABLE test_inserts.special_1; + +/* insert into ... select only */ +CREATE TABLE test_inserts.special_2 AS SELECT * FROM ONLY test_inserts.test_special_only WITH NO DATA; +INSERT INTO test_inserts.special_2 SELECT * FROM ONLY test_inserts.test_special_only; +SELECT count(*) FROM test_inserts.special_2; +DROP TABLE test_inserts.special_2; + +DROP TABLE test_inserts.test_special_only CASCADE; + + DROP SCHEMA test_inserts CASCADE; DROP EXTENSION pg_pathman CASCADE; From 13dd68e8071279d71e05a9f882bd0467b6cd7d00 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 7 Dec 2017 12:44:42 +0300 Subject: [PATCH 207/528] bump lib version to 1.4.9 --- META.json | 4 ++-- expected/pathman_calamity.out | 2 +- src/include/init.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/META.json b/META.json index 2718a8da..4922b21a 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_pathman", "abstract": "Partitioning tool", "description": "The `pg_pathman` module provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.4.8", + "version": "1.4.9", "maintainer": [ "Ildar Musin ", "Dmitry Ivanov ", @@ -24,7 +24,7 @@ "pg_pathman": { "file": "pg_pathman--1.4.sql", "docfile": "README.md", - "version": "1.4.8", + "version": "1.4.9", "abstract": "Partitioning tool" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 7c090761..121880ea 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -12,7 +12,7 @@ SELECT debug_capture(); SELECT get_pathman_lib_version(); get_pathman_lib_version ------------------------- - 10408 + 10409 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index a2f7ec77..4ce40e4f 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -158,7 +158,7 @@ simpify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT 0x010400 /* Current version of native C library (0xAA_BB_CC) */ -#define CURRENT_LIB_VERSION 0x010408 +#define CURRENT_LIB_VERSION 0x010409 void *pathman_cache_search_relid(HTAB *cache_table, From b7f4ac672668bfff63b2f9219d7c183ff7d989bb Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 19 Dec 2017 16:58:04 +0300 Subject: [PATCH 208/528] replace obsolete info in README.md --- README.md | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 3f3a80ba..71a3895b 100644 --- a/README.md +++ b/README.md @@ -165,13 +165,9 @@ Stops a background worker performing a concurrent partitioning task. Note: worke ### Triggers ```plpgsql -create_hash_update_trigger(parent REGCLASS) +create_update_triggers(parent REGCLASS) ``` -Creates the trigger on UPDATE for HASH partitions. The UPDATE trigger isn't created by default because of the overhead. It's useful in cases when the partitioning expression's value might change. -```plpgsql -create_range_update_trigger(parent REGCLASS) -``` -Same as above, but for a RANGE-partitioned table. +Creates a for-each-row trigger to enable cross-partition UPDATE on a table partitioned by HASH/RANGE. The trigger is not created automatically because of the overhead caused by its function. You don't have to use this feature unless partitioning key might change during an UPDATE. ### Post-creation partition management ```plpgsql From f7fe78c3f420bef04bc6aff321f0b3589429b3b1 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 29 Jan 2018 18:20:08 +0300 Subject: [PATCH 209/528] change INT32_MAX to PG_INT32_MAX for Windows --- src/planner_tree_modification.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 3f504217..0df4fc22 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -665,7 +665,7 @@ static int32 planner_calls = 0; void incr_planner_calls_count(void) { - Assert(planner_calls < INT32_MAX); + Assert(planner_calls < PG_INT32_MAX); planner_calls++; } From bc504d43828d471876ec4b3731be66df487cfef2 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 30 Jan 2018 17:54:41 +0300 Subject: [PATCH 210/528] replace lfirst_node() with lfirst() for the sake of 9.5.5 (issue #142) --- src/utility_stmt_hooking.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index e64c1542..30301cb2 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -111,7 +111,7 @@ is_pathman_related_copy(Node *parsetree) /* Analyze options list */ foreach (lc, copy_stmt->options) { - DefElem *defel = lfirst_node(DefElem, lc); + DefElem *defel = (DefElem *) lfirst(lc); /* We do not support freeze */ /* From 945f224c4ac97f084bb8a8d51d39d8c301eb2967 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 20 Feb 2018 14:17:59 +0300 Subject: [PATCH 211/528] update README.md --- README.md | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 71a3895b..99d2cc9f 100644 --- a/README.md +++ b/README.md @@ -12,10 +12,10 @@ The extension is compatible with: * Postgres Pro Standard 9.5, 9.6; * Postgres Pro Enterprise; -By the way, we have a growing Wiki [out there](https://github.com/postgrespro/pg_pathman/wiki). +Take a look at our Wiki [out there](https://github.com/postgrespro/pg_pathman/wiki). ## Overview -**Partitioning** means splitting one large table into smaller pieces. Each row in such table is moved to a single partition according to the partitioning key. PostgreSQL supports partitioning via table inheritance: each partition must be created as a child table with CHECK CONSTRAINT. For example: +**Partitioning** means splitting one large table into smaller pieces. Each row in such table is moved to a single partition according to the partitioning key. PostgreSQL <= 10 supports partitioning via table inheritance: each partition must be created as a child table with CHECK CONSTRAINT: ```plpgsql CREATE TABLE test (id SERIAL PRIMARY KEY, title TEXT); @@ -23,6 +23,16 @@ CREATE TABLE test_1 (CHECK ( id >= 100 AND id < 200 )) INHERITS (test); CREATE TABLE test_2 (CHECK ( id >= 200 AND id < 300 )) INHERITS (test); ``` +PostgreSQL 10 provides native partitioning: + +```plpgsql +CREATE TABLE test(id int4, value text) PARTITION BY RANGE(id); +CREATE TABLE test_1 PARTITION OF test FOR VALUES FROM (1) TO (10); +CREATE TABLE test_2 PARTITION OF test FOR VALUES FROM (10) TO (20); +``` + +It's not so different from the classic approach; there are implicit check constraints, and most of its limitations are still relevant. + Despite the flexibility, this approach forces the planner to perform an exhaustive search and to check constraints on each partition to determine whether it should be present in the plan or not. Large amount of partitions may result in significant planning overhead. The `pg_pathman` module features partition managing functions and optimized planning mechanism which utilizes knowledge of the partitions' structure. It stores partitioning configuration in the `pathman_config` table; each row contains a single entry for a partitioned table (relation name, partitioning column and its type). During the initialization stage the `pg_pathman` module caches some information about child partitions in the shared memory, which is used later for plan construction. Before a SELECT query is executed, `pg_pathman` traverses the condition tree in search of expressions like: @@ -60,13 +70,6 @@ More interesting features are yet to come. Stay tuned! * FDW support (foreign partitions); * Various GUC toggles and configurable settings. -## Roadmap - - * Multi-level partitioning (ver 1.5); - * Improved referential integrity + foreign keys on partitioned tables (ver 1.5); - -Take a look at [this page](https://github.com/postgrespro/pg_pathman/wiki/Roadmap); - ## Installation guide To install `pg_pathman`, execute this in the module's directory: ```shell From 62d04775377225805c110bf2428eeef66335c1c7 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 15 Mar 2018 16:15:56 +0300 Subject: [PATCH 212/528] fix incorrect usage of memcpy() in start_bgworker() --- src/pathman_workers.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/src/pathman_workers.c b/src/pathman_workers.c index d6d9a953..e393d313 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -185,17 +185,16 @@ start_bgworker(const char bgworker_name[BGW_MAXLEN], pid_t pid; /* Initialize worker struct */ - memcpy(worker.bgw_name, bgworker_name, BGW_MAXLEN); - memcpy(worker.bgw_function_name, bgworker_proc, BGW_MAXLEN); - memcpy(worker.bgw_library_name, "pg_pathman", BGW_MAXLEN); + memset(&worker, 0, sizeof(worker)); + + snprintf(worker.bgw_name, BGW_MAXLEN, "%s", bgworker_name); + snprintf(worker.bgw_function_name, BGW_MAXLEN, "%s", bgworker_proc); + snprintf(worker.bgw_library_name, BGW_MAXLEN, "pg_pathman"); worker.bgw_flags = BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION; worker.bgw_start_time = BgWorkerStart_RecoveryFinished; worker.bgw_restart_time = BGW_NEVER_RESTART; -#if PG_VERSION_NUM < 100000 - worker.bgw_main = NULL; -#endif worker.bgw_main_arg = bgw_arg; worker.bgw_notify_pid = MyProcPid; From d527f7032eaa9e86ca6141705341025539282ba0 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 15 Mar 2018 18:00:22 +0300 Subject: [PATCH 213/528] backport tests from rel_future_beta --- tests/python/.gitignore | 1 + tests/python/partitioning_test.py | 2046 ++++++++++++++--------------- travis/pg-travis-test.sh | 2 +- 3 files changed, 978 insertions(+), 1071 deletions(-) create mode 100644 tests/python/.gitignore diff --git a/tests/python/.gitignore b/tests/python/.gitignore new file mode 100644 index 00000000..750ecf9f --- /dev/null +++ b/tests/python/.gitignore @@ -0,0 +1 @@ +tests.log diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 0d05c458..2c290f8d 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -1,1087 +1,993 @@ #!/usr/bin/env python3 # coding: utf-8 - """ - concurrent_partitioning_test.py - Tests concurrent partitioning worker with simultaneous update queries +partitioning_test.py + Various stuff that looks out of place in regression tests - Copyright (c) 2015-2017, Postgres Professional + Copyright (c) 2015-2017, Postgres Professional """ -import unittest +import json import math -import time import os import re import subprocess import threading +import time +import unittest +import functools + +from distutils.version import LooseVersion +from testgres import get_new_node, get_pg_version +from testgres.utils import pg_version_ge + +# set setup base logging config, it can be turned on by `use_logging` +# parameter on node setup + +import logging +import logging.config + +logfile = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'tests.log') +LOG_CONFIG = { + 'version': 1, + 'handlers': { + 'console': { + 'class': 'logging.StreamHandler', + 'formatter': 'base_format', + 'level': logging.DEBUG, + }, + 'file': { + 'class': 'logging.FileHandler', + 'filename': logfile, + 'formatter': 'base_format', + 'level': logging.DEBUG, + }, + }, + 'formatters': { + 'base_format': { + 'format': '%(node)-5s: %(message)s', + }, + }, + 'root': { + 'handlers': ('file', ), + 'level': 'DEBUG', + }, +} + +logging.config.dictConfig(LOG_CONFIG) +version = LooseVersion(get_pg_version()) -from testgres import get_new_node, stop_all, get_config - -version = get_config().get("VERSION_NUM") # Helper function for json equality -def ordered(obj): - if isinstance(obj, dict): - return sorted((k, ordered(v)) for k, v in obj.items()) - if isinstance(obj, list): - return sorted(ordered(x) for x in obj) - else: - return obj - - -def if_fdw_enabled(func): - """To run tests with FDW support set environment variable TEST_FDW=1""" - def wrapper(*args, **kwargs): - if os.environ.get('FDW_DISABLED') != '1': - func(*args, **kwargs) - else: - print('Warning: FDW features tests are disabled, skipping...') - return wrapper - - -class PartitioningTests(unittest.TestCase): - - def setUp(self): - self.setup_cmd = [ - 'create table abc(id serial, t text)', - 'insert into abc select generate_series(1, 300000)', - 'select create_hash_partitions(\'abc\', \'id\', 3, partition_data := false)', - ] - - def tearDown(self): - stop_all() - - def start_new_pathman_cluster(self, name='test', allows_streaming=False): - node = get_new_node(name) - node.init(allows_streaming=allows_streaming) - node.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman\'\n') - node.start() - node.psql('postgres', 'create extension pg_pathman') - return node - - def init_test_data(self, node): - """Initialize pg_pathman extension and test data""" - for cmd in self.setup_cmd: - node.safe_psql('postgres', cmd) - - def catchup_replica(self, master, replica): - """Wait until replica synchronizes with master""" - if version >= 100000: - wait_lsn_query = \ - 'SELECT pg_current_wal_lsn() <= replay_lsn ' \ - 'FROM pg_stat_replication WHERE application_name = \'%s\'' \ - % replica.name - else: - wait_lsn_query = \ - 'SELECT pg_current_xlog_location() <= replay_location ' \ - 'FROM pg_stat_replication WHERE application_name = \'%s\'' \ - % replica.name - master.poll_query_until('postgres', wait_lsn_query) - - def printlog(self, logfile): - with open(logfile, 'r') as log: - for line in log.readlines(): - print(line) - - def test_concurrent(self): - """Tests concurrent partitioning""" - try: - node = self.start_new_pathman_cluster() - self.init_test_data(node) - - node.psql( - 'postgres', - 'select partition_table_concurrently(\'abc\')') - - while True: - # update some rows to check for deadlocks - node.safe_psql( - 'postgres', - ''' - update abc set t = 'test' - where id in (select (random() * 300000)::int - from generate_series(1, 3000)) - ''') - - count = node.execute( - 'postgres', - 'select count(*) from pathman_concurrent_part_tasks') - - # if there is no active workers then it means work is done - if count[0][0] == 0: - break - time.sleep(1) - - data = node.execute('postgres', 'select count(*) from only abc') - self.assertEqual(data[0][0], 0) - data = node.execute('postgres', 'select count(*) from abc') - self.assertEqual(data[0][0], 300000) - - node.stop() - except Exception as e: - self.printlog(node.logs_dir + '/postgresql.log') - raise e - - def test_replication(self): - """Tests how pg_pathman works with replication""" - node = get_new_node('master') - replica = get_new_node('repl') - - try: - # initialize master server - node = self.start_new_pathman_cluster(allows_streaming=True) - node.backup('my_backup') - - # initialize replica from backup - replica.init_from_backup(node, 'my_backup', has_streaming=True) - replica.start() - - # initialize pg_pathman extension and some test data - self.init_test_data(node) - - # wait until replica catches up - self.catchup_replica(node, replica) - - # check that results are equal - self.assertEqual( - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc') - ) - - # enable parent and see if it is enabled in replica - node.psql('postgres', 'select enable_parent(\'abc\'') - - self.catchup_replica(node, replica) - self.assertEqual( - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc') - ) - self.assertEqual( - node.psql('postgres', 'select * from abc'), - replica.psql('postgres', 'select * from abc') - ) - self.assertEqual( - node.execute('postgres', 'select count(*) from abc')[0][0], - 300000 - ) - - # check that direct UPDATE in pathman_config_params invalidates - # cache - node.psql( - 'postgres', - 'update pathman_config_params set enable_parent = false') - self.catchup_replica(node, replica) - self.assertEqual( - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc') - ) - self.assertEqual( - node.psql('postgres', 'select * from abc'), - replica.psql('postgres', 'select * from abc') - ) - self.assertEqual( - node.execute('postgres', 'select count(*) from abc')[0][0], - 0 - ) - except Exception as e: - self.printlog(node.logs_dir + '/postgresql.log') - self.printlog(replica.logs_dir + '/postgresql.log') - raise e - - def test_locks(self): - """Test that a session trying to create new partitions waits for other - sessions if they are doing the same""" - - import threading - import time - - class Flag: - def __init__(self, value): - self.flag = value - - def set(self, value): - self.flag = value - - def get(self): - return self.flag - - # There is one flag for each thread which shows if thread have done its work - flags = [Flag(False) for i in range(3)] - - # All threads synchronize though this lock - lock = threading.Lock() - - # Define thread function - def add_partition(node, flag, query): - """ We expect that this query will wait until another session - commits or rolls back""" - node.safe_psql('postgres', query) - with lock: - flag.set(True) - - # Initialize master server - node = get_new_node('master') - - try: - node.init() - node.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman\'\n') - node.start() - node.safe_psql( - 'postgres', - 'create extension pg_pathman; ' + - 'create table abc(id serial, t text); ' + - 'insert into abc select generate_series(1, 100000); ' + - 'select create_range_partitions(\'abc\', \'id\', 1, 50000);' - ) - - # Start transaction that will create partition - con = node.connect() - con.begin() - con.execute('select append_range_partition(\'abc\')') - - # Start threads that suppose to add new partitions and wait some - # time - query = [ - 'select prepend_range_partition(\'abc\')', - 'select append_range_partition(\'abc\')', - 'select add_range_partition(\'abc\', 500000, 550000)', - ] - threads = [] - for i in range(3): - thread = threading.Thread( - target=add_partition, - args=(node, flags[i], query[i])) - threads.append(thread) - thread.start() - time.sleep(3) - - # This threads should wait until current transaction finished - with lock: - for i in range(3): - self.assertEqual(flags[i].get(), False) - - # Commit transaction. Since then other sessions can create - # partitions - con.commit() - - # Now wait until each thread finishes - for thread in threads: - thread.join() - - # Check flags, it should be true which means that threads are - # finished - with lock: - for i in range(3): - self.assertEqual(flags[i].get(), True) - - # Check that all partitions are created - self.assertEqual( - node.safe_psql( - 'postgres', - 'select count(*) from pg_inherits where inhparent=\'abc\'::regclass' - ), - b'6\n' - ) - except Exception as e: - self.printlog(node.logs_dir + '/postgresql.log') - raise e - - def test_tablespace(self): - """Check tablespace support""" - - def check_tablespace(node, tablename, tablespace): - res = node.execute( - 'postgres', - 'select get_tablespace(\'{}\')'.format(tablename)) - if len(res) == 0: - return False - - return res[0][0] == tablespace - - node = get_new_node('master') - node.init() - node.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman\'\n') - node.start() - node.psql('postgres', 'create extension pg_pathman') - - # create tablespace - path = os.path.join(node.data_dir, 'test_space_location') - os.mkdir(path) - node.psql( - 'postgres', - 'create tablespace test_space location \'{}\''.format(path)) - - # create table in this tablespace - node.psql( - 'postgres', - 'create table abc(a serial, b int) tablespace test_space') - - # create three partitions. Excpect that they will be created in the - # same tablespace as the parent table - node.psql( - 'postgres', - 'select create_range_partitions(\'abc\', \'a\', 1, 10, 3)') - self.assertTrue(check_tablespace(node, 'abc', 'test_space')) - - # check tablespace for appended partition - node.psql( - 'postgres', - 'select append_range_partition(\'abc\', \'abc_appended\')') - self.assertTrue(check_tablespace(node, 'abc_appended', 'test_space')) - - # check tablespace for prepended partition - node.psql( - 'postgres', - 'select prepend_range_partition(\'abc\', \'abc_prepended\')') - self.assertTrue(check_tablespace(node, 'abc_prepended', 'test_space')) - - # check tablespace for prepended partition - node.psql( - 'postgres', - 'select add_range_partition(\'abc\', 41, 51, \'abc_added\')') - self.assertTrue(check_tablespace(node, 'abc_added', 'test_space')) - - # check tablespace for split - node.psql( - 'postgres', - 'select split_range_partition(\'abc_added\', 45, \'abc_splitted\')') - self.assertTrue(check_tablespace(node, 'abc_splitted', 'test_space')) - - # now let's specify tablespace explicitly - node.psql( - 'postgres', - 'select append_range_partition(\'abc\', \'abc_appended_2\', \'pg_default\')') - node.psql( - 'postgres', - 'select prepend_range_partition(\'abc\', \'abc_prepended_2\', \'pg_default\')') - node.psql( - 'postgres', - 'select add_range_partition(\'abc\', 61, 71, \'abc_added_2\', \'pg_default\')') - node.psql( - 'postgres', - 'select split_range_partition(\'abc_added_2\', 65, \'abc_splitted_2\', \'pg_default\')') - self.assertTrue(check_tablespace(node, 'abc_appended_2', 'pg_default')) - self.assertTrue(check_tablespace(node, 'abc_prepended_2', 'pg_default')) - self.assertTrue(check_tablespace(node, 'abc_added_2', 'pg_default')) - self.assertTrue(check_tablespace(node, 'abc_splitted_2', 'pg_default')) - - @if_fdw_enabled - def test_foreign_table(self): - """Test foreign tables""" - - # Start master server - master = get_new_node('test') - master.init() - master.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman, postgres_fdw\'\n') - master.start() - master.psql('postgres', 'create extension pg_pathman') - master.psql('postgres', 'create extension postgres_fdw') - - # RANGE partitioning test with FDW: - # - create range partitioned table in master - # - create foreign server - # - create foreign table and insert some data into it - # - attach foreign table to partitioned one - # - try inserting data into foreign partition via parent - # - drop partitions - master.psql( - 'postgres', - '''create table abc(id serial, name text); - select create_range_partitions('abc', 'id', 0, 10, 2)''') - - # Current user name (needed for user mapping) - username = master.execute('postgres', 'select current_user')[0][0] - - # Start foreign server - fserv = get_new_node('fserv') - fserv.init().start() - fserv.safe_psql('postgres', 'create table ftable(id serial, name text)') - fserv.safe_psql('postgres', 'insert into ftable values (25, \'foreign\')') - - # Create foreign table and attach it to partitioned table - master.safe_psql( - 'postgres', - '''create server fserv - foreign data wrapper postgres_fdw - options (dbname 'postgres', host '127.0.0.1', port '{}')'''.format(fserv.port) - ) - master.safe_psql( - 'postgres', - '''create user mapping for {0} - server fserv - options (user '{0}')'''.format(username) - ) - master.safe_psql( - 'postgres', - '''import foreign schema public limit to (ftable) - from server fserv into public''' - ) - master.safe_psql( - 'postgres', - 'select attach_range_partition(\'abc\', \'ftable\', 20, 30)') - - # Check that table attached to partitioned table - self.assertEqual( - master.safe_psql('postgres', 'select * from ftable'), - b'25|foreign\n' - ) - - # Check that we can successfully insert new data into foreign partition - master.safe_psql('postgres', 'insert into abc values (26, \'part\')') - self.assertEqual( - master.safe_psql('postgres', 'select * from ftable order by id'), - b'25|foreign\n26|part\n' - ) - - # Testing drop partitions (including foreign partitions) - master.safe_psql('postgres', 'select drop_partitions(\'abc\')') - - # HASH partitioning with FDW: - # - create hash partitioned table in master - # - create foreign table - # - replace local partition with foreign one - # - insert data - # - drop partitions - master.psql( - 'postgres', - '''create table hash_test(id serial, name text); - select create_hash_partitions('hash_test', 'id', 2)''') - fserv.safe_psql('postgres', 'create table f_hash_test(id serial, name text)') - - master.safe_psql( - 'postgres', - '''import foreign schema public limit to (f_hash_test) - from server fserv into public''' - ) - master.safe_psql( - 'postgres', - 'select replace_hash_partition(\'hash_test_1\', \'f_hash_test\')') - master.safe_psql('postgres', 'insert into hash_test select generate_series(1,10)') - - self.assertEqual( - master.safe_psql('postgres', 'select * from hash_test'), - b'1|\n2|\n5|\n6|\n8|\n9|\n3|\n4|\n7|\n10|\n' - ) - master.safe_psql('postgres', 'select drop_partitions(\'hash_test\')') - - def test_parallel_nodes(self): - """Test parallel queries under partitions""" - - import json - - # Init and start postgres instance with preload pg_pathman module - node = get_new_node('test') - node.init() - node.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman, postgres_fdw\'\n') - node.start() - - # Check version of postgres server - # If version < 9.6 skip all tests for parallel queries - if version < 90600: - return - - # Prepare test database - node.psql('postgres', 'create extension pg_pathman') - node.psql('postgres', 'create table range_partitioned as select generate_series(1, 1e4::integer) i') - node.psql('postgres', 'alter table range_partitioned alter column i set not null') - node.psql('postgres', 'select create_range_partitions(\'range_partitioned\', \'i\', 1, 1e3::integer)') - node.psql('postgres', 'vacuum analyze range_partitioned') - - node.psql('postgres', 'create table hash_partitioned as select generate_series(1, 1e4::integer) i') - node.psql('postgres', 'alter table hash_partitioned alter column i set not null') - node.psql('postgres', 'select create_hash_partitions(\'hash_partitioned\', \'i\', 10)') - node.psql('postgres', 'vacuum analyze hash_partitioned') - - node.psql('postgres', """ - create or replace function query_plan(query text) returns jsonb as $$ - declare - plan jsonb; - begin - execute 'explain (costs off, format json)' || query into plan; - return plan; - end; - $$ language plpgsql; - """) - - # Test parallel select - with node.connect() as con: - con.execute('set max_parallel_workers_per_gather = 2') - if version >= 100000: - con.execute('set min_parallel_table_scan_size = 0') - else: - con.execute('set min_parallel_relation_size = 0') - con.execute('set parallel_setup_cost = 0') - con.execute('set parallel_tuple_cost = 0') - - # Check parallel aggregate plan - test_query = 'select count(*) from range_partitioned where i < 1500' - plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] - expected = json.loads(""" - [ - { - "Plan": { - "Node Type": "Aggregate", - "Strategy": "Plain", - "Partial Mode": "Finalize", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Gather", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Workers Planned": 2, - "Single Copy": false, - "Plans": [ - { - "Node Type": "Aggregate", - "Strategy": "Plain", - "Partial Mode": "Partial", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Append", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Seq Scan", - "Parent Relationship": "Member", - "Parallel Aware": true, - "Relation Name": "range_partitioned_2", - "Alias": "range_partitioned_2", - "Filter": "(i < 1500)" - }, - { - "Node Type": "Seq Scan", - "Parent Relationship": "Member", - "Parallel Aware": true, - "Relation Name": "range_partitioned_1", - "Alias": "range_partitioned_1" - } - ] - } - ] - } - ] - } - ] - } - } - ] - """) - self.assertEqual(ordered(plan), ordered(expected)) - - # Check count of returned tuples - count = con.execute('select count(*) from range_partitioned where i < 1500')[0][0] - self.assertEqual(count, 1499) - - # Check simple parallel seq scan plan with limit - test_query = 'select * from range_partitioned where i < 1500 limit 5' - plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] - expected = json.loads(""" - [ - { - "Plan": { - "Node Type": "Limit", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Gather", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Workers Planned": 2, - "Single Copy": false, - "Plans": [ - { - "Node Type": "Append", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Seq Scan", - "Parent Relationship": "Member", - "Parallel Aware": true, - "Relation Name": "range_partitioned_2", - "Alias": "range_partitioned_2", - "Filter": "(i < 1500)" - }, - { - "Node Type": "Seq Scan", - "Parent Relationship": "Member", - "Parallel Aware": true, - "Relation Name": "range_partitioned_1", - "Alias": "range_partitioned_1" - } - ] - } - ] - } - ] - } - } - ] - """) - self.assertEqual(ordered(plan), ordered(expected)) - - # Check tuples returned by query above - res_tuples = con.execute('select * from range_partitioned where i < 1500 limit 5') - res_tuples = sorted(map(lambda x: x[0], res_tuples)) - expected = [1, 2, 3, 4, 5] - self.assertEqual(res_tuples, expected) - - # Check the case when none partition is selected in result plan - test_query = 'select * from range_partitioned where i < 1' - plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] - expected = json.loads(""" - [ - { - "Plan": { - "Node Type": "Result", - "Parallel Aware": false, - "One-Time Filter": "false" - } - } - ] - """) - self.assertEqual(ordered(plan), ordered(expected)) - - # Remove all objects for testing - node.psql('postgres', 'drop table range_partitioned cascade') - node.psql('postgres', 'drop table hash_partitioned cascade') - node.psql('postgres', 'drop extension pg_pathman cascade') - - # Stop instance and finish work - node.stop() - node.cleanup() - - def test_conc_part_creation_insert(self): - """Test concurrent partition creation on INSERT""" - - # Create and start new instance - node = self.start_new_pathman_cluster(allows_streaming=False) - - # Create table 'ins_test' and partition it - with node.connect() as con0: - con0.begin() - con0.execute('create table ins_test(val int not null)') - con0.execute('insert into ins_test select generate_series(1, 50)') - con0.execute("select create_range_partitions('ins_test', 'val', 1, 10)") - con0.commit() - - # Create two separate connections for this test - with node.connect() as con1, node.connect() as con2: - - # Thread for connection #2 (it has to wait) - def con2_thread(): - con2.execute('insert into ins_test values(51)') - con2.commit() - - # Step 1: lock partitioned table in con1 - con1.begin() - con1.execute('select count(*) from ins_test') # load pathman's cache - con1.execute('lock table ins_test in share update exclusive mode') - - # Step 2: try inserting new value in con2 (waiting) - con2.begin() - con2.execute('select count(*) from ins_test') # load pathman's cache - t = threading.Thread(target=con2_thread) - t.start() - - # Step 3: wait until 't' locks - while True: - with node.connect() as con0: - locks = con0.execute(""" - select count(*) from pg_locks where granted = 'f' - """) - - if int(locks[0][0]) > 0: - break - - # Step 4: try inserting new value in con1 (success, unlock) - con1.execute('insert into ins_test values(52)') - con1.commit() - - # Step 5: wait for con2 - t.join() - - rows = con1.execute(""" - select * from pathman_partition_list - where parent = 'ins_test'::regclass - order by range_min, range_max - """) - - # check number of partitions - self.assertEqual(len(rows), 6) - - # check range_max of partitions - self.assertEqual(int(rows[0][5]), 11) - self.assertEqual(int(rows[1][5]), 21) - self.assertEqual(int(rows[2][5]), 31) - self.assertEqual(int(rows[3][5]), 41) - self.assertEqual(int(rows[4][5]), 51) - self.assertEqual(int(rows[5][5]), 61) - - # Stop instance and finish work - node.stop() - node.cleanup() - - def test_conc_part_merge_insert(self): - """Test concurrent merge_range_partitions() + INSERT""" - - # Create and start new instance - node = self.start_new_pathman_cluster(allows_streaming=False) - - # Create table 'ins_test' and partition it - with node.connect() as con0: - con0.begin() - con0.execute('create table ins_test(val int not null)') - con0.execute("select create_range_partitions('ins_test', 'val', 1, 10, 10)") - con0.commit() - - # Create two separate connections for this test - with node.connect() as con1, node.connect() as con2: - - # Thread for connection #2 (it has to wait) - def con2_thread(): - con2.begin() - con2.execute('insert into ins_test values(20)') - con2.commit() - - # Step 1: initilize con1 - con1.begin() - con1.execute('select count(*) from ins_test') # load pathman's cache - - # Step 2: initilize con2 - con2.begin() - con2.execute('select count(*) from ins_test') # load pathman's cache - con2.commit() # unlock relations - - # Step 3: merge 'ins_test1' + 'ins_test_2' in con1 (success) - con1.execute("select merge_range_partitions('ins_test_1', 'ins_test_2')") - - # Step 4: try inserting new value in con2 (waiting) - t = threading.Thread(target=con2_thread) - t.start() - - # Step 5: wait until 't' locks - while True: - with node.connect() as con0: - locks = con0.execute(""" - select count(*) from pg_locks where granted = 'f' - """) - - if int(locks[0][0]) > 0: - break - - # Step 6: finish merge in con1 (success, unlock) - con1.commit() - - # Step 7: wait for con2 - t.join() - - rows = con1.execute("select *, tableoid::regclass::text from ins_test") - - # check number of rows in table - self.assertEqual(len(rows), 1) - - # check value that has been inserted - self.assertEqual(int(rows[0][0]), 20) - - # check partition that was chosen for insert - self.assertEqual(str(rows[0][1]), 'ins_test_1') - - # Stop instance and finish work - node.stop() - node.cleanup() - - def test_pg_dump(self): - """ - Test using dump and restore of partitioned table through pg_dump and pg_restore tools. - - Test strategy: - - test range and hash partitioned tables; - - for each partitioned table check on restorable side the following quantities: - * constraints related to partitioning; - * init callback function and enable parent flag; - * number of rows in parent and child tables; - * plan validity of simple SELECT query under partitioned table; - - check dumping using the following parameters of pg_dump: - * format = plain | custom; - * using of inserts and copy. - - all test cases are carried out on tables half-full with data located in parent part, - the rest of data - in child tables. - """ - - import subprocess - - # Init and start postgres instance with preload pg_pathman module - node = get_new_node('test') - node.init() - node.append_conf( - 'postgresql.conf', - """ - shared_preload_libraries=\'pg_pathman\' - pg_pathman.override_copy=false - """) - node.start() - - # Init two databases: initial and copy - node.psql('postgres', 'create database initial') - node.psql('postgres', 'create database copy') - node.psql('initial', 'create extension pg_pathman') - - # Create and fillin partitioned table in initial database - with node.connect('initial') as con: - - # create and initailly fillin tables - con.execute('create table range_partitioned (i integer not null)') - con.execute('insert into range_partitioned select i from generate_series(1, 500) i') - con.execute('create table hash_partitioned (i integer not null)') - con.execute('insert into hash_partitioned select i from generate_series(1, 500) i') - - # partition table keeping data in base table - # enable_parent parameter automatically becames true - con.execute('select create_range_partitions(\'range_partitioned\', \'i\', 1, 200, partition_data := false)') - con.execute('select create_hash_partitions(\'hash_partitioned\', \'i\', 5, false)') - - # fillin child tables with remain data - con.execute('insert into range_partitioned select i from generate_series(501, 1000) i') - con.execute('insert into hash_partitioned select i from generate_series(501, 1000) i') - - # set init callback - con.execute(""" - create or replace function init_partition_stub_callback(args jsonb) - returns void as $$ - begin - end - $$ language plpgsql; - """) - con.execute('select set_init_callback(\'range_partitioned\', \'init_partition_stub_callback(jsonb)\')') - con.execute('select set_init_callback(\'hash_partitioned\', \'init_partition_stub_callback(jsonb)\')') - - # turn off enable_parent option - con.execute('select set_enable_parent(\'range_partitioned\', false)') - con.execute('select set_enable_parent(\'hash_partitioned\', false)') - - con.commit() - - # compare strategies - CMP_OK, PLANS_MISMATCH, CONTENTS_MISMATCH = range(3) - def cmp_full(con1, con2): - """Compare selection partitions in plan and contents in partitioned tables""" - - plan_query = 'explain (costs off, format json) select * from %s' - content_query = 'select * from %s order by i' - table_refs = [ - 'range_partitioned', - 'only range_partitioned', - 'hash_partitioned', - 'only hash_partitioned' - ] - for table_ref in table_refs: - plan_initial = con1.execute(plan_query % table_ref)[0][0][0]['Plan'] - plan_copy = con2.execute(plan_query % table_ref)[0][0][0]['Plan'] - if ordered(plan_initial) != ordered(plan_copy): - return PLANS_MISMATCH - - content_initial = [x[0] for x in con1.execute(content_query % table_ref)] - content_copy = [x[0] for x in con2.execute(content_query % table_ref)] - if content_initial != content_copy: - return CONTENTS_MISMATCH - - return CMP_OK - - def turnoff_pathman(node): - node.psql('initial', 'alter system set pg_pathman.enable to off') - node.reload() - - def turnon_pathman(node): - node.psql('initial', 'alter system set pg_pathman.enable to on') - node.psql('copy', 'alter system set pg_pathman.enable to on') - node.psql('initial', 'alter system set pg_pathman.override_copy to off') - node.psql('copy', 'alter system set pg_pathman.override_copy to off') - node.reload() - - # Test dump/restore from init database to copy functionality - test_params = [ - (None, - None, - [node.get_bin_path("pg_dump"), - "-p {}".format(node.port), - "initial"], - [node.get_bin_path("psql"), - "-p {}".format(node.port), - "copy"], - cmp_full), # dump as plain text and restore via COPY - (turnoff_pathman, - turnon_pathman, - [node.get_bin_path("pg_dump"), - "-p {}".format(node.port), - "--inserts", - "initial"], - [node.get_bin_path("psql"), - "-p {}".format(node.port), - "copy"], - cmp_full), # dump as plain text and restore via INSERTs - (None, - None, - [node.get_bin_path("pg_dump"), - "-p {}".format(node.port), - "--format=custom", - "initial"], - [node.get_bin_path("pg_restore"), - "-p {}".format(node.port), - "--dbname=copy"], - cmp_full), # dump in archive format - ] - - try: - FNULL = open(os.devnull, 'w') - - for preproc, postproc, pg_dump_params, pg_restore_params, cmp_dbs in test_params: - - dump_restore_cmd = " | ".join((' '.join(pg_dump_params), ' '.join(pg_restore_params))) - - if (preproc != None): - preproc(node) - - # transfer and restore data - p1 = subprocess.Popen(pg_dump_params, stdout=subprocess.PIPE) - stdoutdata, _ = p1.communicate() - p2 = subprocess.Popen(pg_restore_params, stdin=subprocess.PIPE, - stdout=FNULL, stderr=FNULL) - p2.communicate(input=stdoutdata) - - if (postproc != None): - postproc(node) - - # check validity of data - with node.connect('initial') as con1, node.connect('copy') as con2: - - # compare plans and contents of initial and copy - cmp_result = cmp_dbs(con1, con2) - self.assertNotEqual(cmp_result, PLANS_MISMATCH, - "mismatch in plans of select query on partitioned tables under the command: %s" % dump_restore_cmd) - self.assertNotEqual(cmp_result, CONTENTS_MISMATCH, - "mismatch in contents of partitioned tables under the command: %s" % dump_restore_cmd) - - # compare enable_parent flag and callback function - config_params_query = """ - select partrel, enable_parent, init_callback from pathman_config_params - """ - config_params_initial, config_params_copy = {}, {} - for row in con1.execute(config_params_query): - config_params_initial[row[0]] = row[1:] - for row in con2.execute(config_params_query): - config_params_copy[row[0]] = row[1:] - self.assertEqual(config_params_initial, config_params_copy, \ - "mismatch in pathman_config_params under the command: %s" % dump_restore_cmd) - - # compare constraints on each partition - constraints_query = """ - select r.relname, c.conname, c.consrc from - pg_constraint c join pg_class r on c.conrelid=r.oid - where relname similar to '(range|hash)_partitioned_\d+' - """ - constraints_initial, constraints_copy = {}, {} - for row in con1.execute(constraints_query): - constraints_initial[row[0]] = row[1:] - for row in con2.execute(constraints_query): - constraints_copy[row[0]] = row[1:] - self.assertEqual(constraints_initial, constraints_copy, \ - "mismatch in partitions' constraints under the command: %s" % dump_restore_cmd) - - # clear copy database - node.psql('copy', 'drop schema public cascade') - node.psql('copy', 'create schema public') - node.psql('copy', 'drop extension pg_pathman cascade') - - except: - raise - finally: - FNULL.close() - - # Stop instance and finish work - node.stop() - node.cleanup() - - def test_concurrent_detach(self): - """Test concurrent detach partition with contiguous tuple inserting and spawning new partitions""" - - # Init parameters - num_insert_workers = 8 - detach_timeout = 0.1 # time in sec between successive inserts and detachs - num_detachs = 100 # estimated number of detachs - inserts_advance = 1 # abvance in sec of inserts process under detachs - test_interval = int(math.ceil(detach_timeout * num_detachs)) - - insert_pgbench_script = os.path.dirname(os.path.realpath(__file__)) \ - + "/pgbench_scripts/insert_current_timestamp.pgbench" - detach_pgbench_script = os.path.dirname(os.path.realpath(__file__)) \ - + "/pgbench_scripts/detachs_in_timeout.pgbench" - - # Check pgbench scripts on existance - self.assertTrue(os.path.isfile(insert_pgbench_script), - msg="pgbench script with insert timestamp doesn't exist") - self.assertTrue(os.path.isfile(detach_pgbench_script), - msg="pgbench script with detach letfmost partition doesn't exist") - - # Create and start new instance - node = self.start_new_pathman_cluster(allows_streaming=False) - - # Create partitioned table for testing that spawns new partition on each next *detach_timeout* sec - with node.connect() as con0: - con0.begin() - con0.execute('create table ts_range_partitioned(ts timestamp not null)') - con0.execute("select create_range_partitions('ts_range_partitioned', 'ts', current_timestamp, interval '%f', 1)" % detach_timeout) - con0.commit() - - # Run in background inserts and detachs processes - FNULL = open(os.devnull, 'w') - - # init pgbench's utility tables - init_pgbench = node.pgbench(stdout=FNULL, stderr=FNULL, options=["-i"]) - init_pgbench.wait() - - inserts = node.pgbench(stdout=FNULL, stderr=subprocess.PIPE, options=[ - "-j", "%i" % num_insert_workers, - "-c", "%i" % num_insert_workers, - "-f", insert_pgbench_script, - "-T", "%i" % (test_interval+inserts_advance) - ]) - time.sleep(inserts_advance) - detachs = node.pgbench(stdout=FNULL, stderr=FNULL, options=[ - "-D", "timeout=%f" % detach_timeout, - "-f", detach_pgbench_script, - "-T", "%i" % test_interval - ]) - - # Wait for completion of processes - _, stderrdata = inserts.communicate() - detachs.wait() - - # Obtain error log from inserts process - self.assertIsNone(re.search("ERROR|FATAL|PANIC", str(stderrdata)), - msg="Race condition between detach and concurrent inserts with append partition is expired") - - # Stop instance and finish work - node.stop() - node.cleanup() - FNULL.close() +def ordered(obj, skip_keys=None): + if isinstance(obj, dict): + return sorted((k, ordered(v, skip_keys=skip_keys)) for k, v in obj.items() + if skip_keys is None or (skip_keys and k not in skip_keys)) + if isinstance(obj, list): + return sorted(ordered(x, skip_keys=skip_keys) for x in obj) + else: + return obj + + +# Check if postgres_fdw is available +@functools.lru_cache(maxsize=1) +def is_postgres_fdw_ready(): + with get_new_node().init().start() as node: + result = node.execute(""" + select count(*) from pg_available_extensions where name = 'postgres_fdw' + """) + + if result[0][0] > 0: + return True + + return False + + +class Tests(unittest.TestCase): + def set_trace(self, con, command="pg_debug"): + pid = con.execute("select pg_backend_pid()")[0][0] + p = subprocess.Popen([command], stdin=subprocess.PIPE) + p.communicate(str(pid).encode()) + + def start_new_pathman_cluster(self, allow_streaming=False, test_data=False): + node = get_new_node() + node.init(allow_streaming=allow_streaming) + node.append_conf("shared_preload_libraries='pg_pathman'\n") + node.start() + node.psql('create extension pg_pathman') + + if test_data: + node.safe_psql(""" + create table abc(id serial, t text); + insert into abc select generate_series(1, 300000); + select create_hash_partitions('abc', 'id', 3, partition_data := false); + """) + + node.safe_psql('vacuum analyze') + + return node + + def test_concurrent(self): + """ Test concurrent partitioning """ + + with self.start_new_pathman_cluster(test_data=True) as node: + node.psql("select partition_table_concurrently('abc')") + + while True: + # update some rows to check for deadlocks + node.safe_psql(""" + update abc set t = 'test' + where id in (select (random() * 300000)::int + from generate_series(1, 3000)) + """) + + count = node.execute(""" + select count(*) from pathman_concurrent_part_tasks + """) + + # if there is no active workers then it means work is done + if count[0][0] == 0: + break + time.sleep(1) + + data = node.execute('select count(*) from only abc') + self.assertEqual(data[0][0], 0) + data = node.execute('select count(*) from abc') + self.assertEqual(data[0][0], 300000) + node.stop() + + def test_replication(self): + """ Test how pg_pathman works with replication """ + + with self.start_new_pathman_cluster(allow_streaming=True, test_data=True) as node: + with node.replicate() as replica: + replica.start() + replica.catchup() + + # check that results are equal + self.assertEqual( + node.psql('explain (costs off) select * from abc'), + replica.psql('explain (costs off) select * from abc')) + + # enable parent and see if it is enabled in replica + node.psql("select enable_parent('abc')") + + # wait until replica catches up + replica.catchup() + + self.assertEqual( + node.psql('explain (costs off) select * from abc'), + replica.psql('explain (costs off) select * from abc')) + self.assertEqual( + node.psql('select * from abc'), + replica.psql('select * from abc')) + self.assertEqual( + node.execute('select count(*) from abc')[0][0], 300000) + + # check that UPDATE in pathman_config_params invalidates cache + node.psql('update pathman_config_params set enable_parent = false') + + # wait until replica catches up + replica.catchup() + + self.assertEqual( + node.psql('explain (costs off) select * from abc'), + replica.psql('explain (costs off) select * from abc')) + self.assertEqual( + node.psql('select * from abc'), + replica.psql('select * from abc')) + self.assertEqual( + node.execute('select count(*) from abc')[0][0], 0) + + def test_locks(self): + """ + Test that a session trying to create new partitions + waits for other sessions if they are doing the same + """ + + class Flag: + def __init__(self, value): + self.flag = value + + def set(self, value): + self.flag = value + + def get(self): + return self.flag + + # There is one flag for each thread which shows if thread have done its work + flags = [Flag(False) for i in range(3)] + + # All threads synchronize though this lock + lock = threading.Lock() + + # Define thread function + def add_partition(node, flag, query): + """ + We expect that this query will wait until + another session commits or rolls back + """ + node.safe_psql(query) + with lock: + flag.set(True) + + # Initialize master server + with get_new_node() as node: + node.init() + node.append_conf("shared_preload_libraries='pg_pathman'") + node.start() + + node.safe_psql(""" + create extension pg_pathman; + create table abc(id serial, t text); + insert into abc select generate_series(1, 100000); + select create_range_partitions('abc', 'id', 1, 50000); + """) + + # Start transaction that will create partition + with node.connect() as con: + con.begin() + con.execute("select append_range_partition('abc')") + + # Start threads that suppose to add new partitions and wait some + # time + query = ( + "select prepend_range_partition('abc')", + "select append_range_partition('abc')", + "select add_range_partition('abc', 500000, 550000)", + ) + + threads = [] + for i in range(3): + thread = threading.Thread( + target=add_partition, args=(node, flags[i], query[i])) + threads.append(thread) + thread.start() + time.sleep(3) + + # These threads should wait until current transaction finished + with lock: + for i in range(3): + self.assertEqual(flags[i].get(), False) + + # Commit transaction. Since then other sessions can create + # partitions + con.commit() + + # Now wait until each thread finishes + for thread in threads: + thread.join() + + # Check flags, it should be true which means that threads are + # finished + with lock: + for i in range(3): + self.assertEqual(flags[i].get(), True) + + # Check that all partitions are created + self.assertEqual( + node.safe_psql( + "select count(*) from pg_inherits where inhparent='abc'::regclass"), + b'6\n') + + def test_tablespace(self): + """ Check tablespace support """ + + def check_tablespace(node, tablename, tablespace): + res = node.execute("select get_tablespace('{}')".format(tablename)) + if len(res) == 0: + return False + + return res[0][0] == tablespace + + with get_new_node() as node: + node.init() + node.append_conf("shared_preload_libraries='pg_pathman'") + node.start() + node.psql('create extension pg_pathman') + + # create tablespace + path = os.path.join(node.data_dir, 'test_space_location') + os.mkdir(path) + node.psql("create tablespace test_space location '{}'".format(path)) + + # create table in this tablespace + node.psql('create table abc(a serial, b int) tablespace test_space') + + # create three partitions. Excpect that they will be created in the + # same tablespace as the parent table + node.psql("select create_range_partitions('abc', 'a', 1, 10, 3)") + self.assertTrue(check_tablespace(node, 'abc', 'test_space')) + + # check tablespace for appended partition + node.psql("select append_range_partition('abc', 'abc_appended')") + self.assertTrue(check_tablespace(node, 'abc_appended', 'test_space')) + + # check tablespace for prepended partition + node.psql("select prepend_range_partition('abc', 'abc_prepended')") + self.assertTrue(check_tablespace(node, 'abc_prepended', 'test_space')) + + # check tablespace for prepended partition + node.psql("select add_range_partition('abc', 41, 51, 'abc_added')") + self.assertTrue(check_tablespace(node, 'abc_added', 'test_space')) + + # check tablespace for split + node.psql("select split_range_partition('abc_added', 45, 'abc_splitted')") + self.assertTrue(check_tablespace(node, 'abc_splitted', 'test_space')) + + # now let's specify tablespace explicitly + node.psql( + "select append_range_partition('abc', 'abc_appended_2', 'pg_default')" + ) + node.psql( + "select prepend_range_partition('abc', 'abc_prepended_2', 'pg_default')" + ) + node.psql( + "select add_range_partition('abc', 61, 71, 'abc_added_2', 'pg_default')" + ) + node.psql( + "select split_range_partition('abc_added_2', 65, 'abc_splitted_2', 'pg_default')" + ) + + # yapf: disable + self.assertTrue(check_tablespace(node, 'abc_appended_2', 'pg_default')) + self.assertTrue(check_tablespace(node, 'abc_prepended_2', 'pg_default')) + self.assertTrue(check_tablespace(node, 'abc_added_2', 'pg_default')) + self.assertTrue(check_tablespace(node, 'abc_splitted_2', 'pg_default')) + + @unittest.skipUnless(is_postgres_fdw_ready(), 'might be missing') + def test_foreign_table(self): + """ Test foreign tables """ + + # Start master server + with get_new_node() as master, get_new_node() as fserv: + master.init() + master.append_conf(""" + shared_preload_libraries='pg_pathman, postgres_fdw'\n + """) + master.start() + master.psql('create extension pg_pathman') + master.psql('create extension postgres_fdw') + + # RANGE partitioning test with FDW: + # - create range partitioned table in master + # - create foreign server + # - create foreign table and insert some data into it + # - attach foreign table to partitioned one + # - try inserting data into foreign partition via parent + # - drop partitions + master.psql(""" + create table abc(id serial, name text); + select create_range_partitions('abc', 'id', 0, 10, 2) + """) + + # Current user name (needed for user mapping) + username = master.execute('select current_user')[0][0] + + fserv.init().start() + fserv.safe_psql("create table ftable(id serial, name text)") + fserv.safe_psql("insert into ftable values (25, 'foreign')") + + # Create foreign table and attach it to partitioned table + master.safe_psql(""" + create server fserv + foreign data wrapper postgres_fdw + options (dbname 'postgres', host '127.0.0.1', port '{}') + """.format(fserv.port)) + + master.safe_psql(""" + create user mapping for {0} server fserv + options (user '{0}') + """.format(username)) + + master.safe_psql(""" + import foreign schema public limit to (ftable) + from server fserv into public + """) + + master.safe_psql( + "select attach_range_partition('abc', 'ftable', 20, 30)") + + # Check that table attached to partitioned table + self.assertEqual( + master.safe_psql('select * from ftable'), + b'25|foreign\n') + + # Check that we can successfully insert new data into foreign partition + master.safe_psql("insert into abc values (26, 'part')") + self.assertEqual( + master.safe_psql('select * from ftable order by id'), + b'25|foreign\n26|part\n') + + # Testing drop partitions (including foreign partitions) + master.safe_psql("select drop_partitions('abc')") + + # HASH partitioning with FDW: + # - create hash partitioned table in master + # - create foreign table + # - replace local partition with foreign one + # - insert data + # - drop partitions + master.psql(""" + create table hash_test(id serial, name text); + select create_hash_partitions('hash_test', 'id', 2) + """) + fserv.safe_psql('create table f_hash_test(id serial, name text)') + + master.safe_psql(""" + import foreign schema public limit to (f_hash_test) + from server fserv into public + """) + master.safe_psql(""" + select replace_hash_partition('hash_test_1', 'f_hash_test') + """) + master.safe_psql('insert into hash_test select generate_series(1,10)') + + self.assertEqual( + master.safe_psql('select * from hash_test'), + b'1|\n2|\n5|\n6|\n8|\n9|\n3|\n4|\n7|\n10|\n') + master.safe_psql("select drop_partitions('hash_test')") + + @unittest.skipUnless(is_postgres_fdw_ready(), 'might be missing') + def test_parallel_nodes(self): + """ Test parallel queries under partitions """ + + # Init and start postgres instance with preload pg_pathman module + with get_new_node() as node: + node.init() + node.append_conf( + "shared_preload_libraries='pg_pathman, postgres_fdw'") + node.start() + + # Check version of postgres server + # If version < 9.6 skip all tests for parallel queries + if version < LooseVersion('9.6.0'): + return + + # Prepare test database + node.psql('create extension pg_pathman') + node.psql(""" + create table range_partitioned as + select generate_series(1, 1e4::integer) i; + + alter table range_partitioned alter column i set not null; + select create_range_partitions('range_partitioned', 'i', 1, 1e3::integer); + + create table hash_partitioned as + select generate_series(1, 1e4::integer) i; + + alter table hash_partitioned alter column i set not null; + select create_hash_partitions('hash_partitioned', 'i', 10); + """) + + # create statistics for both partitioned tables + node.psql('vacuum analyze') + + node.psql(""" + create or replace function query_plan(query text) + returns jsonb as $$ + declare + plan jsonb; + begin + execute 'explain (costs off, format json)' || query into plan; + return plan; + end; + $$ language plpgsql; + """) + + # Test parallel select + with node.connect() as con: + con.execute('set max_parallel_workers_per_gather = 2') + if version >= LooseVersion('10'): + con.execute('set min_parallel_table_scan_size = 0') + else: + con.execute('set min_parallel_relation_size = 0') + con.execute('set parallel_setup_cost = 0') + con.execute('set parallel_tuple_cost = 0') + + # Check parallel aggregate plan + test_query = 'select count(*) from range_partitioned where i < 1500' + plan = con.execute("select query_plan('%s')" % test_query)[0][0] + expected = json.loads(""" + [ + { + "Plan": { + "Node Type": "Aggregate", + "Strategy": "Plain", + "Partial Mode": "Finalize", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Gather", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Workers Planned": 2, + "Single Copy": false, + "Plans": [ + { + "Node Type": "Aggregate", + "Strategy": "Plain", + "Partial Mode": "Partial", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Append", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_2", + "Alias": "range_partitioned_2", + "Filter": "(i < 1500)" + }, + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_1", + "Alias": "range_partitioned_1" + } + ] + } + ] + } + ] + } + ] + } + } + ] + """) + self.assertEqual(ordered(plan), ordered(expected)) + + # Check count of returned tuples + count = con.execute( + 'select count(*) from range_partitioned where i < 1500')[0][0] + self.assertEqual(count, 1499) + + # Check simple parallel seq scan plan with limit + test_query = 'select * from range_partitioned where i < 1500 limit 5' + plan = con.execute("select query_plan('%s')" % test_query)[0][0] + expected = json.loads(""" + [ + { + "Plan": { + "Node Type": "Limit", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Gather", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Workers Planned": 2, + "Single Copy": false, + "Plans": [ + { + "Node Type": "Append", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_2", + "Alias": "range_partitioned_2", + "Filter": "(i < 1500)" + }, + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_1", + "Alias": "range_partitioned_1" + } + ] + } + ] + } + ] + } + } + ] + """) + self.assertEqual(ordered(plan), ordered(expected)) + + # Check tuples returned by query above + res_tuples = con.execute( + 'select * from range_partitioned where i < 1500 limit 5') + res_tuples = sorted(map(lambda x: x[0], res_tuples)) + expected = [1, 2, 3, 4, 5] + self.assertEqual(res_tuples, expected) + + # Check the case when none partition is selected in result plan + test_query = 'select * from range_partitioned where i < 1' + plan = con.execute("select query_plan('%s')" % test_query)[0][0] + expected = json.loads(""" + [ + { + "Plan": { + "Node Type": "Result", + "Parallel Aware": false, + "One-Time Filter": "false" + } + } + ] + """) + self.assertEqual(ordered(plan), ordered(expected)) + + # Remove all objects for testing + node.psql('drop table range_partitioned cascade') + node.psql('drop table hash_partitioned cascade') + node.psql('drop extension pg_pathman cascade') + + def test_conc_part_drop_runtime_append(self): + """ Test concurrent partition drop + SELECT (RuntimeAppend) """ + + # Create and start new instance + with self.start_new_pathman_cluster(allow_streaming=False) as node: + # Create table 'drop_test' and partition it + with node.connect() as con0: + # yapf: disable + con0.begin() + con0.execute("create table drop_test(val int not null)") + con0.execute("insert into drop_test select generate_series(1, 1000)") + con0.execute("select create_range_partitions('drop_test', 'val', 1, 10)") + con0.commit() + + # Create two separate connections for this test + with node.connect() as con1, node.connect() as con2: + try: + from queue import Queue + except ImportError: + from Queue import Queue + + # return values from thread + queue = Queue() + + # Thread for connection #2 (it has to wait) + def con2_thread(): + con1.begin() + con2.execute('set enable_hashjoin = f') + con2.execute('set enable_mergejoin = f') + + res = con2.execute(""" + explain (analyze, costs off, timing off) + select * from drop_test + where val = any (select generate_series(1, 40, 34)) + """) # query selects from drop_test_1 and drop_test_4 + + con2.commit() + + has_runtime_append = False + has_drop_test_1 = False + has_drop_test_4 = False + + for row in res: + if row[0].find('RuntimeAppend') >= 0: + has_runtime_append = True + continue + + if row[0].find('drop_test_1') >= 0: + has_drop_test_1 = True + continue + + if row[0].find('drop_test_4') >= 0: + has_drop_test_4 = True + continue + + # return all values in tuple + queue.put((has_runtime_append, has_drop_test_1, has_drop_test_4)) + + # Step 1: cache partitioned table in con1 + con1.begin() + con1.execute('select count(*) from drop_test') # load pathman's cache + con1.commit() + + # Step 2: cache partitioned table in con2 + con2.begin() + con2.execute('select count(*) from drop_test') # load pathman's cache + con2.commit() + + # Step 3: drop first partition of 'drop_test' + con1.begin() + con1.execute('drop table drop_test_1') + + # Step 4: try executing select (RuntimeAppend) + t = threading.Thread(target=con2_thread) + t.start() + + # Step 5: wait until 't' locks + while True: + with node.connect() as con0: + locks = con0.execute(""" + select count(*) from pg_locks where granted = 'f' + """) + + if int(locks[0][0]) > 0: + break + + # Step 6: commit 'DROP TABLE' + con1.commit() + + # Step 7: wait for con2 + t.join() + + rows = con1.execute(""" + select * from pathman_partition_list + where parent = 'drop_test'::regclass + order by range_min, range_max + """) + + # check number of partitions + self.assertEqual(len(rows), 99) + + # check RuntimeAppend + selected partitions + (has_runtime_append, has_drop_test_1, has_drop_test_4) = queue.get() + self.assertTrue(has_runtime_append) + self.assertFalse(has_drop_test_1) + self.assertTrue(has_drop_test_4) + + def test_conc_part_creation_insert(self): + """ Test concurrent partition creation on INSERT """ + + # Create and start new instance + with self.start_new_pathman_cluster(allow_streaming=False) as node: + # Create table 'ins_test' and partition it + with node.connect() as con0: + # yapf: disable + con0.begin() + con0.execute("create table ins_test(val int not null)") + con0.execute("insert into ins_test select generate_series(1, 50)") + con0.execute("select create_range_partitions('ins_test', 'val', 1, 10)") + con0.commit() + + # Create two separate connections for this test + with node.connect() as con1, node.connect() as con2: + + # Thread for connection #2 (it has to wait) + def con2_thread(): + con2.execute('insert into ins_test values(51)') + con2.commit() + + # Step 1: lock partitioned table in con1 + con1.begin() + con1.execute('select count(*) from ins_test') # load pathman's cache + con1.execute('lock table ins_test in share update exclusive mode') + + # Step 2: try inserting new value in con2 (waiting) + con2.begin() + con2.execute('select count(*) from ins_test') # load pathman's cache + t = threading.Thread(target=con2_thread) + t.start() + + # Step 3: wait until 't' locks + while True: + with node.connect() as con0: + locks = con0.execute(""" + select count(*) from pg_locks where granted = 'f' + """) + + if int(locks[0][0]) > 0: + break + + # Step 4: try inserting new value in con1 (success, unlock) + con1.execute('insert into ins_test values(52)') + con1.commit() + + # Step 5: wait for con2 + t.join() + + rows = con1.execute(""" + select * from pathman_partition_list + where parent = 'ins_test'::regclass + order by range_min, range_max + """) + + # check number of partitions + self.assertEqual(len(rows), 6) + + # check range_max of partitions + self.assertEqual(int(rows[0][5]), 11) + self.assertEqual(int(rows[1][5]), 21) + self.assertEqual(int(rows[2][5]), 31) + self.assertEqual(int(rows[3][5]), 41) + self.assertEqual(int(rows[4][5]), 51) + self.assertEqual(int(rows[5][5]), 61) + + def test_conc_part_merge_insert(self): + """ Test concurrent merge_range_partitions() + INSERT """ + + # Create and start new instance + with self.start_new_pathman_cluster(allow_streaming=False) as node: + # Create table 'ins_test' and partition it + with node.connect() as con0: + # yapf: disable + con0.begin() + con0.execute("create table ins_test(val int not null)") + con0.execute("select create_range_partitions('ins_test', 'val', 1, 10, 10)") + con0.commit() + + # Create two separate connections for this test + with node.connect() as con1, node.connect() as con2: + + # Thread for connection #2 (it has to wait) + def con2_thread(): + con2.begin() + con2.execute('insert into ins_test values(20)') + con2.commit() + + # Step 1: initilize con1 + con1.begin() + con1.execute('select count(*) from ins_test') # load pathman's cache + + # Step 2: initilize con2 + con2.begin() + con2.execute('select count(*) from ins_test') # load pathman's cache + con2.commit() # unlock relations + + # Step 3: merge 'ins_test1' + 'ins_test_2' in con1 (success) + con1.execute( + "select merge_range_partitions('ins_test_1', 'ins_test_2')") + + # Step 4: try inserting new value in con2 (waiting) + t = threading.Thread(target=con2_thread) + t.start() + + # Step 5: wait until 't' locks + while True: + with node.connect() as con0: + locks = con0.execute(""" + select count(*) from pg_locks where granted = 'f' + """) + + if int(locks[0][0]) > 0: + break + + # Step 6: finish merge in con1 (success, unlock) + con1.commit() + + # Step 7: wait for con2 + t.join() + + rows = con1.execute("select *, tableoid::regclass::text from ins_test") + + # check number of rows in table + self.assertEqual(len(rows), 1) + + # check value that has been inserted + self.assertEqual(int(rows[0][0]), 20) + + # check partition that was chosen for insert + self.assertEqual(str(rows[0][1]), 'ins_test_1') + + def test_pg_dump(self): + with self.start_new_pathman_cluster() as node: + node.safe_psql('create database copy') + + node.safe_psql(""" + create table test_hash(val int not null); + select create_hash_partitions('test_hash', 'val', 10); + insert into test_hash select generate_series(1, 90); + + create table test_range(val int not null); + select create_range_partitions('test_range', 'val', 1, 10, 10); + insert into test_range select generate_series(1, 95); + """) + + dump = node.dump() + node.restore(dbname='copy', filename=dump) + os.remove(dump) + + # HASH + a = node.execute('postgres', 'select * from test_hash order by val') + b = node.execute('copy', 'select * from test_hash order by val') + self.assertEqual(a, b) + c = node.execute('postgres', 'select * from only test_hash order by val') + d = node.execute('copy', 'select * from only test_hash order by val') + self.assertEqual(c, d) + + # RANGE + a = node.execute('postgres', 'select * from test_range order by val') + b = node.execute('copy', 'select * from test_range order by val') + self.assertEqual(a, b) + c = node.execute('postgres', 'select * from only test_range order by val') + d = node.execute('copy', 'select * from only test_range order by val') + self.assertEqual(c, d) + + # check partition sets + p1 = node.execute('postgres', 'select * from pathman_partition_list') + p2 = node.execute('copy', 'select * from pathman_partition_list') + self.assertEqual(sorted(p1), sorted(p2)) + + + def test_concurrent_detach(self): + """ + Test concurrent detach partition with contiguous + tuple inserting and spawning new partitions + """ + + # Init parameters + num_insert_workers = 8 + detach_timeout = 0.1 # time in sec between successive inserts and detachs + num_detachs = 100 # estimated number of detachs + inserts_advance = 1 # abvance in sec of inserts process under detachs + test_interval = int(math.ceil(detach_timeout * num_detachs)) + + insert_pgbench_script = os.path.dirname(os.path.realpath(__file__)) \ + + "/pgbench_scripts/insert_current_timestamp.pgbench" + detach_pgbench_script = os.path.dirname(os.path.realpath(__file__)) \ + + "/pgbench_scripts/detachs_in_timeout.pgbench" + + # Check pgbench scripts on existance + self.assertTrue( + os.path.isfile(insert_pgbench_script), + msg="pgbench script with insert timestamp doesn't exist") + + self.assertTrue( + os.path.isfile(detach_pgbench_script), + msg="pgbench script with detach letfmost partition doesn't exist") + + # Create and start new instance + with self.start_new_pathman_cluster(allow_streaming=False) as node: + # Create partitioned table for testing that spawns new partition on each next *detach_timeout* sec + with node.connect() as con0: + con0.begin() + con0.execute( + 'create table ts_range_partitioned(ts timestamp not null)') + + # yapf: disable + con0.execute(""" + select create_range_partitions('ts_range_partitioned', + 'ts', + current_timestamp, + interval '%f', + 1) + """ % detach_timeout) + con0.commit() + + # Run in background inserts and detachs processes + with open(os.devnull, 'w') as fnull: + # init pgbench's utility tables + init_pgbench = node.pgbench(stdout=fnull, stderr=fnull, options=["-i"]) + init_pgbench.wait() + + inserts = node.pgbench( + stdout=fnull, + stderr=subprocess.PIPE, + options=[ + "-j", + "%i" % num_insert_workers, "-c", + "%i" % num_insert_workers, "-f", insert_pgbench_script, "-T", + "%i" % (test_interval + inserts_advance) + ]) + time.sleep(inserts_advance) + detachs = node.pgbench( + stdout=fnull, + stderr=fnull, + options=[ + "-D", + "timeout=%f" % detach_timeout, "-f", detach_pgbench_script, + "-T", + "%i" % test_interval + ]) + + # Wait for completion of processes + _, stderrdata = inserts.communicate() + detachs.wait() + + # Obtain error log from inserts process + self.assertIsNone( + re.search("ERROR|FATAL|PANIC", str(stderrdata)), + msg=""" + Race condition between detach and concurrent + inserts with append partition is expired + """) if __name__ == "__main__": - unittest.main() - + unittest.main() diff --git a/travis/pg-travis-test.sh b/travis/pg-travis-test.sh index be0e645e..97fa5ea9 100755 --- a/travis/pg-travis-test.sh +++ b/travis/pg-travis-test.sh @@ -7,7 +7,7 @@ sudo apt-get update # required packages apt_packages="postgresql-$PG_VER postgresql-server-dev-$PG_VER postgresql-common python-pip python-dev build-essential" -pip_packages="testgres==0.4.0" +pip_packages="testgres" # exit code status=0 From 49c6f70e8463b0d7d086c47cd7f286879a1d2378 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 15 Mar 2018 18:55:05 +0300 Subject: [PATCH 214/528] bump lib version to 1.4.10 --- META.json | 4 ++-- expected/pathman_calamity.out | 2 +- src/include/init.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/META.json b/META.json index 4922b21a..9bcf29d5 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_pathman", "abstract": "Partitioning tool", "description": "The `pg_pathman` module provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.4.9", + "version": "1.4.10", "maintainer": [ "Ildar Musin ", "Dmitry Ivanov ", @@ -24,7 +24,7 @@ "pg_pathman": { "file": "pg_pathman--1.4.sql", "docfile": "README.md", - "version": "1.4.9", + "version": "1.4.10", "abstract": "Partitioning tool" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 121880ea..0e0cbed7 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -12,7 +12,7 @@ SELECT debug_capture(); SELECT get_pathman_lib_version(); get_pathman_lib_version ------------------------- - 10409 + 10410 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index 4ce40e4f..5de01b32 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -158,7 +158,7 @@ simpify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT 0x010400 /* Current version of native C library (0xAA_BB_CC) */ -#define CURRENT_LIB_VERSION 0x010409 +#define CURRENT_LIB_VERSION 0x010410 void *pathman_cache_search_relid(HTAB *cache_table, From 001166ae918bae169def31e66b8c2ed21e76ac4e Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 26 Mar 2018 15:04:10 +0300 Subject: [PATCH 215/528] some notes regarding partition creation callback (thanks to @thamerlan) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 99d2cc9f..b4a8be50 100644 --- a/README.md +++ b/README.md @@ -272,7 +272,7 @@ Enable/disable auto partition propagation (only for RANGE partitioning). It is e ```plpgsql set_init_callback(relation REGCLASS, callback REGPROC DEFAULT 0) ``` -Set partition creation callback to be invoked for each attached or created partition (both HASH and RANGE). The callback must have the following signature: `part_init_callback(args JSONB) RETURNS VOID`. Parameter `arg` consists of several fields whose presence depends on partitioning type: +Set partition creation callback to be invoked for each attached or created partition (both HASH and RANGE). If callback is marked with SECURITY INVOKER, it's executed with the privileges of the user that produced a statement which has led to creation of a new partition (e.g. `INSERT INTO partitioned_table VALUES (-5)`). The callback must have the following signature: `part_init_callback(args JSONB) RETURNS VOID`. Parameter `arg` consists of several fields whose presence depends on partitioning type: ```json /* RANGE-partitioned table abc (child abc_4) */ { From 6d154fd28938efda2cec68743b18e23b86d764c4 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 11 Apr 2018 15:57:16 +0300 Subject: [PATCH 216/528] fix partition creation for tables with dropped columns --- Makefile | 1 + expected/pathman_dropped_cols.out | 84 +++++++++++++++++++++++++++++++ sql/pathman_dropped_cols.sql | 43 ++++++++++++++++ src/include/utils.h | 2 +- src/init.c | 8 --- src/relation_info.c | 2 +- src/utils.c | 25 ++++++--- 7 files changed, 148 insertions(+), 17 deletions(-) create mode 100644 expected/pathman_dropped_cols.out create mode 100644 sql/pathman_dropped_cols.sql diff --git a/Makefile b/Makefile index 79f674ec..392f8e5d 100644 --- a/Makefile +++ b/Makefile @@ -37,6 +37,7 @@ REGRESS = pathman_array_qual \ pathman_column_type \ pathman_cte \ pathman_domains \ + pathman_dropped_cols \ pathman_expressions \ pathman_foreign_keys \ pathman_gaps \ diff --git a/expected/pathman_dropped_cols.out b/expected/pathman_dropped_cols.out new file mode 100644 index 00000000..89585b52 --- /dev/null +++ b/expected/pathman_dropped_cols.out @@ -0,0 +1,84 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA dropped_cols; +/* + * we should be able to manage tables with dropped columns + */ +create table test_range(a int, b int, key int not null); +alter table test_range drop column a; +select create_range_partitions('test_range', 'key', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +alter table test_range drop column b; +select prepend_range_partition('test_range'); + prepend_range_partition +------------------------- + test_range_3 +(1 row) + +select * from pathman_partition_list order by parent, partition; + parent | partition | parttype | expr | range_min | range_max +------------+--------------+----------+------+-----------+----------- + test_range | test_range_1 | 2 | key | 1 | 11 + test_range | test_range_2 | 2 | key | 11 | 21 + test_range | test_range_3 | 2 | key | -9 | 1 +(3 rows) + +select pg_get_constraintdef(oid, true) from pg_constraint where conname = 'pathman_test_range_1_check'; + pg_get_constraintdef +------------------------------- + CHECK (key >= 1 AND key < 11) +(1 row) + +select pg_get_constraintdef(oid, true) from pg_constraint where conname = 'pathman_test_range_3_check'; + pg_get_constraintdef +------------------------------------------ + CHECK (key >= '-9'::integer AND key < 1) +(1 row) + +drop table test_range cascade; +NOTICE: drop cascades to 4 other objects +create table test_hash(a int, b int, key int not null); +alter table test_hash drop column a; +select create_hash_partitions('test_hash', 'key', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +alter table test_hash drop column b; +create table test_dummy (like test_hash); +select replace_hash_partition('test_hash_2', 'test_dummy', true); + replace_hash_partition +------------------------ + test_dummy +(1 row) + +select * from pathman_partition_list order by parent, partition; + parent | partition | parttype | expr | range_min | range_max +-----------+-------------+----------+------+-----------+----------- + test_hash | test_hash_0 | 1 | key | | + test_hash | test_hash_1 | 1 | key | | + test_hash | test_dummy | 1 | key | | +(3 rows) + +select pg_get_constraintdef(oid, true) from pg_constraint where conname = 'pathman_test_hash_1_check'; + pg_get_constraintdef +------------------------------------------------- + CHECK (get_hash_part_idx(hashint4(key), 3) = 1) +(1 row) + +select pg_get_constraintdef(oid, true) from pg_constraint where conname = 'pathman_test_dummy_check'; + pg_get_constraintdef +------------------------------------------------- + CHECK (get_hash_part_idx(hashint4(key), 3) = 2) +(1 row) + +drop table test_hash cascade; +NOTICE: drop cascades to 3 other objects +DROP SCHEMA dropped_cols CASCADE; +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_dropped_cols.sql b/sql/pathman_dropped_cols.sql new file mode 100644 index 00000000..32589c8c --- /dev/null +++ b/sql/pathman_dropped_cols.sql @@ -0,0 +1,43 @@ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA dropped_cols; + + +/* + * we should be able to manage tables with dropped columns + */ + +create table test_range(a int, b int, key int not null); + +alter table test_range drop column a; +select create_range_partitions('test_range', 'key', 1, 10, 2); + +alter table test_range drop column b; +select prepend_range_partition('test_range'); + +select * from pathman_partition_list order by parent, partition; +select pg_get_constraintdef(oid, true) from pg_constraint where conname = 'pathman_test_range_1_check'; +select pg_get_constraintdef(oid, true) from pg_constraint where conname = 'pathman_test_range_3_check'; + +drop table test_range cascade; + + +create table test_hash(a int, b int, key int not null); + +alter table test_hash drop column a; +select create_hash_partitions('test_hash', 'key', 3); + +alter table test_hash drop column b; +create table test_dummy (like test_hash); +select replace_hash_partition('test_hash_2', 'test_dummy', true); + +select * from pathman_partition_list order by parent, partition; +select pg_get_constraintdef(oid, true) from pg_constraint where conname = 'pathman_test_hash_1_check'; +select pg_get_constraintdef(oid, true) from pg_constraint where conname = 'pathman_test_dummy_check'; +drop table test_hash cascade; + + +DROP SCHEMA dropped_cols CASCADE; +DROP EXTENSION pg_pathman; diff --git a/src/include/utils.h b/src/include/utils.h index 16100df7..42a1b814 100644 --- a/src/include/utils.h +++ b/src/include/utils.h @@ -24,7 +24,7 @@ bool clause_contains_params(Node *clause); bool is_date_type_internal(Oid typid); bool check_security_policy_internal(Oid relid, Oid role); -bool match_expr_to_operand(Node *expr, Node *operand); +bool match_expr_to_operand(const Node *expr, const Node *operand); /* * Misc. diff --git a/src/init.c b/src/init.c index 80ba4f0a..93b95839 100644 --- a/src/init.c +++ b/src/init.c @@ -1165,7 +1165,6 @@ validate_hash_constraint(const Expr *expr, Node *first = linitial(get_hash_expr->args); /* arg #1: TYPE_HASH_PROC(EXPRESSION) */ Node *second = lsecond(get_hash_expr->args); /* arg #2: PARTITIONS_COUNT */ Const *cur_partition_idx; /* hash value for this partition */ - Node *hash_arg; if (!IsA(first, FuncExpr) || !IsA(second, Const)) return false; @@ -1180,13 +1179,6 @@ validate_hash_constraint(const Expr *expr, if (list_length(type_hash_proc_expr->args) != 1) return false; - /* Extract arg of TYPE_HASH_PROC() */ - hash_arg = (Node *) linitial(type_hash_proc_expr->args); - - /* Check arg of TYPE_HASH_PROC() */ - if (!match_expr_to_operand(prel->expr, hash_arg)) - return false; - /* Check that PARTITIONS_COUNT is equal to total amount of partitions */ if (DatumGetUInt32(((Const *) second)->constvalue) != PrelChildrenCount(prel)) return false; diff --git a/src/relation_info.c b/src/relation_info.c index b46c62ee..cb9c8bab 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -976,7 +976,7 @@ delay_invalidation_vague_rel(Oid vague_rel) /* Finish all pending invalidation jobs if possible */ void finish_delayed_invalidation(void) -{ +{ /* Exit early if there's nothing to do */ if (delayed_invalidation_whole_cache == false && delayed_invalidation_parent_rels == NIL && diff --git a/src/utils.c b/src/utils.c index 6f9e53cd..bd60d57d 100644 --- a/src/utils.c +++ b/src/utils.c @@ -37,6 +37,21 @@ #include "utils/regproc.h" #endif +static const Node * +drop_irrelevant_expr_wrappers(const Node *expr) +{ + switch (nodeTag(expr)) + { + /* Strip relabeling */ + case T_RelabelType: + return (const Node *) ((const RelabelType *) expr)->arg; + + /* no special actions required */ + default: + return expr; + } +} + static bool clause_contains_params_walker(Node *node, void *context) { @@ -110,14 +125,10 @@ check_security_policy_internal(Oid relid, Oid role) /* Compare clause operand with expression */ bool -match_expr_to_operand(Node *expr, Node *operand) +match_expr_to_operand(const Node *expr, const Node *operand) { - /* Strip relabeling for both operand and expr */ - if (operand && IsA(operand, RelabelType)) - operand = (Node *) ((RelabelType *) operand)->arg; - - if (expr && IsA(expr, RelabelType)) - expr = (Node *) ((RelabelType *) expr)->arg; + expr = drop_irrelevant_expr_wrappers(expr); + operand = drop_irrelevant_expr_wrappers(operand); /* compare expressions and return result right away */ return equal(expr, operand); From 2ea90b3a3cdc4c36757fb6254735a35940943813 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 11 Apr 2018 16:52:35 +0300 Subject: [PATCH 217/528] prohibit add_to_pathman_config() on partitions --- expected/pathman_basic.out | 12 ++++++++++++ sql/pathman_basic.sql | 6 ++++++ src/pl_funcs.c | 10 ++++++++++ 3 files changed, 28 insertions(+) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index fa946d72..15cd31b7 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1851,6 +1851,18 @@ ORDER BY partition; DROP TABLE test.provided_part_names CASCADE; NOTICE: drop cascades to 2 other objects +/* Check that multilivel is prohibited */ +CREATE TABLE test.multi(key int NOT NULL); +SELECT create_hash_partitions('test.multi', 'key', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT create_hash_partitions('test.multi_1', 'key', 3); +ERROR: multilevel partitioning is not supported +DROP TABLE test.multi CASCADE; +NOTICE: drop cascades to 3 other objects DROP SCHEMA test CASCADE; NOTICE: drop cascades to 28 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index f24716c0..a9d37f18 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -551,6 +551,12 @@ ORDER BY partition; DROP TABLE test.provided_part_names CASCADE; +/* Check that multilivel is prohibited */ +CREATE TABLE test.multi(key int NOT NULL); +SELECT create_hash_partitions('test.multi', 'key', 3); +SELECT create_hash_partitions('test.multi_1', 'key', 3); +DROP TABLE test.multi CASCADE; + DROP SCHEMA test CASCADE; DROP EXTENSION pg_pathman CASCADE; diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 175d36de..53d4259c 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -765,6 +765,7 @@ add_to_pathman_config(PG_FUNCTION_ARGS) Datum expr_datum; PathmanInitState init_state; + PartParentSearch parent_search; if (!PG_ARGISNULL(0)) { @@ -798,6 +799,15 @@ add_to_pathman_config(PG_FUNCTION_ARGS) get_rel_name_or_relid(relid)))); } + /* Check if it's a partition */ + if (get_parent_of_partition(relid, &parent_search) && + parent_search == PPS_ENTRY_PART_PARENT) + { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("multilevel partitioning is not supported"))); + } + /* Select partitioning type */ switch (PG_NARGS()) { From 88491153b8718e096c80b21cdaea277b3c4809c8 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 12 Apr 2018 17:01:42 +0300 Subject: [PATCH 218/528] attempt to fix issue #153 (table is being partitioned now) --- src/init.c | 13 +------------ src/xact_handling.c | 4 ++-- 2 files changed, 3 insertions(+), 14 deletions(-) diff --git a/src/init.c b/src/init.c index 93b95839..d8fb4c57 100644 --- a/src/init.c +++ b/src/init.c @@ -674,18 +674,7 @@ pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, /* Set xmin if necessary */ if (xmin) - { - Datum value; - bool isnull; - - value = heap_getsysattr(htup, - MinTransactionIdAttributeNumber, - RelationGetDescr(rel), - &isnull); - - Assert(!isnull); - *xmin = DatumGetTransactionId(value); - } + *xmin = HeapTupleHeaderGetXmin(htup->t_data); /* Set ItemPointer if necessary */ if (iptr) diff --git a/src/xact_handling.c b/src/xact_handling.c index c6696cce..a63decce 100644 --- a/src/xact_handling.c +++ b/src/xact_handling.c @@ -162,8 +162,8 @@ xact_is_alter_pathman_stmt(Node *stmt) bool xact_object_is_visible(TransactionId obj_xmin) { - return TransactionIdPrecedes(obj_xmin, GetCurrentTransactionId()) || - TransactionIdEquals(obj_xmin, FrozenTransactionId); + return TransactionIdEquals(obj_xmin, FrozenTransactionId) || + TransactionIdPrecedes(obj_xmin, GetCurrentTransactionId()); } /* From e83044ebc82215d633ee457bdd9ab873a7c9ec07 Mon Sep 17 00:00:00 2001 From: "i.kartyshov" Date: Thu, 12 Apr 2018 20:23:27 +0300 Subject: [PATCH 219/528] Fix test pathman_join_clause set search_path=public --- expected/pathman_join_clause.out | 1 + sql/pathman_join_clause.sql | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/expected/pathman_join_clause.out b/expected/pathman_join_clause.out index 7d9acdea..25d5cba9 100644 --- a/expected/pathman_join_clause.out +++ b/expected/pathman_join_clause.out @@ -1,4 +1,5 @@ \set VERBOSITY terse +SET search_path = 'public'; CREATE SCHEMA pathman; CREATE EXTENSION pg_pathman SCHEMA pathman; CREATE SCHEMA test; diff --git a/sql/pathman_join_clause.sql b/sql/pathman_join_clause.sql index 90287201..c578d361 100644 --- a/sql/pathman_join_clause.sql +++ b/sql/pathman_join_clause.sql @@ -1,5 +1,5 @@ \set VERBOSITY terse - +SET search_path = 'public'; CREATE SCHEMA pathman; CREATE EXTENSION pg_pathman SCHEMA pathman; CREATE SCHEMA test; From df9219c00ad4534ddd4b3d068ee38e83bdf33815 Mon Sep 17 00:00:00 2001 From: "i.kartyshov" Date: Thu, 12 Apr 2018 20:39:05 +0300 Subject: [PATCH 220/528] Fix tests set search_path=public --- expected/pathman_calamity.out | 1 + expected/pathman_callbacks.out | 1 + expected/pathman_domains.out | 1 + expected/pathman_foreign_keys.out | 1 + expected/pathman_interval.out | 1 + expected/pathman_permissions.out | 1 + expected/pathman_rowmarks.out | 1 + expected/pathman_rowmarks_1.out | 1 + expected/pathman_runtime_nodes.out | 1 + expected/pathman_utility_stmt.out | 1 + sql/pathman_calamity.sql | 2 +- sql/pathman_callbacks.sql | 2 +- sql/pathman_domains.sql | 1 + sql/pathman_foreign_keys.sql | 1 + sql/pathman_interval.sql | 1 + sql/pathman_permissions.sql | 1 + sql/pathman_rowmarks.sql | 2 +- sql/pathman_runtime_nodes.sql | 2 +- sql/pathman_utility_stmt.sql | 2 +- 19 files changed, 19 insertions(+), 5 deletions(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 0e0cbed7..3e87884c 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -1,4 +1,5 @@ \set VERBOSITY terse +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA calamity; /* call for coverage test */ diff --git a/expected/pathman_callbacks.out b/expected/pathman_callbacks.out index aaa9f82b..3eea2049 100644 --- a/expected/pathman_callbacks.out +++ b/expected/pathman_callbacks.out @@ -1,4 +1,5 @@ \set VERBOSITY terse +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA callbacks; /* callback #1 */ diff --git a/expected/pathman_domains.out b/expected/pathman_domains.out index f78a73dc..e5e882c0 100644 --- a/expected/pathman_domains.out +++ b/expected/pathman_domains.out @@ -1,4 +1,5 @@ \set VERBOSITY terse +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA domains; CREATE DOMAIN domains.dom_test AS numeric CHECK (value < 1200); diff --git a/expected/pathman_foreign_keys.out b/expected/pathman_foreign_keys.out index 00462c3d..2ff12279 100644 --- a/expected/pathman_foreign_keys.out +++ b/expected/pathman_foreign_keys.out @@ -1,4 +1,5 @@ \set VERBOSITY terse +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA fkeys; /* Check primary keys generation */ diff --git a/expected/pathman_interval.out b/expected/pathman_interval.out index 1bcd8216..72dc4e01 100644 --- a/expected/pathman_interval.out +++ b/expected/pathman_interval.out @@ -1,4 +1,5 @@ \set VERBOSITY terse +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA test_interval; /* Range partitions for INT2 type */ diff --git a/expected/pathman_permissions.out b/expected/pathman_permissions.out index 4700f8bf..e329a9ec 100644 --- a/expected/pathman_permissions.out +++ b/expected/pathman_permissions.out @@ -1,4 +1,5 @@ \set VERBOSITY terse +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA permissions; CREATE ROLE user1 LOGIN; diff --git a/expected/pathman_rowmarks.out b/expected/pathman_rowmarks.out index 4c399e85..0bf1078a 100644 --- a/expected/pathman_rowmarks.out +++ b/expected/pathman_rowmarks.out @@ -3,6 +3,7 @@ * NOTE: This test behaves differenly on 9.5 * ------------------------------------------- */ +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA rowmarks; CREATE TABLE rowmarks.first(id int NOT NULL); diff --git a/expected/pathman_rowmarks_1.out b/expected/pathman_rowmarks_1.out index 28d3f27d..d072cde9 100644 --- a/expected/pathman_rowmarks_1.out +++ b/expected/pathman_rowmarks_1.out @@ -3,6 +3,7 @@ * NOTE: This test behaves differenly on 9.5 * ------------------------------------------- */ +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA rowmarks; CREATE TABLE rowmarks.first(id int NOT NULL); diff --git a/expected/pathman_runtime_nodes.out b/expected/pathman_runtime_nodes.out index d49343b9..f364cfb4 100644 --- a/expected/pathman_runtime_nodes.out +++ b/expected/pathman_runtime_nodes.out @@ -1,4 +1,5 @@ \set VERBOSITY terse +SET search_path = 'public'; CREATE SCHEMA pathman; CREATE EXTENSION pg_pathman SCHEMA pathman; CREATE SCHEMA test; diff --git a/expected/pathman_utility_stmt.out b/expected/pathman_utility_stmt.out index 37149f1e..4cc4d493 100644 --- a/expected/pathman_utility_stmt.out +++ b/expected/pathman_utility_stmt.out @@ -1,4 +1,5 @@ \set VERBOSITY terse +SET search_path = 'public'; CREATE EXTENSION pg_pathman; /* * Test COPY diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index ed1b7b82..ad29a705 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -1,5 +1,5 @@ \set VERBOSITY terse - +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA calamity; diff --git a/sql/pathman_callbacks.sql b/sql/pathman_callbacks.sql index f435e1c7..65b729d9 100644 --- a/sql/pathman_callbacks.sql +++ b/sql/pathman_callbacks.sql @@ -1,5 +1,5 @@ \set VERBOSITY terse - +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA callbacks; diff --git a/sql/pathman_domains.sql b/sql/pathman_domains.sql index f6ee7076..4793c6f8 100644 --- a/sql/pathman_domains.sql +++ b/sql/pathman_domains.sql @@ -1,5 +1,6 @@ \set VERBOSITY terse +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA domains; diff --git a/sql/pathman_foreign_keys.sql b/sql/pathman_foreign_keys.sql index 392b3a7a..1ec1b766 100644 --- a/sql/pathman_foreign_keys.sql +++ b/sql/pathman_foreign_keys.sql @@ -1,5 +1,6 @@ \set VERBOSITY terse +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA fkeys; diff --git a/sql/pathman_interval.sql b/sql/pathman_interval.sql index 59393ca4..f2933ab0 100644 --- a/sql/pathman_interval.sql +++ b/sql/pathman_interval.sql @@ -1,5 +1,6 @@ \set VERBOSITY terse +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA test_interval; diff --git a/sql/pathman_permissions.sql b/sql/pathman_permissions.sql index 43bf6ca6..2dd22fc0 100644 --- a/sql/pathman_permissions.sql +++ b/sql/pathman_permissions.sql @@ -1,5 +1,6 @@ \set VERBOSITY terse +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA permissions; diff --git a/sql/pathman_rowmarks.sql b/sql/pathman_rowmarks.sql index 9864b8b9..aa365544 100644 --- a/sql/pathman_rowmarks.sql +++ b/sql/pathman_rowmarks.sql @@ -3,7 +3,7 @@ * NOTE: This test behaves differenly on 9.5 * ------------------------------------------- */ - +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA rowmarks; diff --git a/sql/pathman_runtime_nodes.sql b/sql/pathman_runtime_nodes.sql index b54c7571..e0b50e9b 100644 --- a/sql/pathman_runtime_nodes.sql +++ b/sql/pathman_runtime_nodes.sql @@ -1,5 +1,5 @@ \set VERBOSITY terse - +SET search_path = 'public'; CREATE SCHEMA pathman; CREATE EXTENSION pg_pathman SCHEMA pathman; CREATE SCHEMA test; diff --git a/sql/pathman_utility_stmt.sql b/sql/pathman_utility_stmt.sql index c7d25051..31232ce1 100644 --- a/sql/pathman_utility_stmt.sql +++ b/sql/pathman_utility_stmt.sql @@ -1,5 +1,5 @@ \set VERBOSITY terse - +SET search_path = 'public'; CREATE EXTENSION pg_pathman; From 7a561261b631a391156d1a074a6007d9e51ea128 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 23 Apr 2018 22:49:12 +0300 Subject: [PATCH 221/528] fix typos using codespell --- src/hooks.c | 2 +- src/include/partition_filter.h | 2 +- src/include/relation_info.h | 2 +- src/partition_creation.c | 4 ++-- src/partition_filter.c | 4 ++-- src/pathman_workers.c | 2 +- src/planner_tree_modification.c | 2 +- src/relation_info.c | 2 +- src/utils.c | 4 ++-- 9 files changed, 12 insertions(+), 12 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index ebd35b61..adcb805b 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -600,7 +600,7 @@ pathman_enable_assign_hook(bool newval, void *extra) /* * Planner hook. It disables inheritance for tables that have been partitioned - * by pathman to prevent standart PostgreSQL partitioning mechanism from + * by pathman to prevent standard PostgreSQL partitioning mechanism from * handling that tables. */ PlannedStmt * diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index 0cd08c36..fdd14045 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -69,7 +69,7 @@ struct ResultPartsStorage EState *estate; /* pointer to executor's state */ - CmdType command_type; /* currenly we only allow INSERT */ + CmdType command_type; /* currently we only allow INSERT */ LOCKMODE head_open_lock_mode; LOCKMODE heap_close_lock_mode; }; diff --git a/src/include/relation_info.h b/src/include/relation_info.h index c4bc3a05..99eddc22 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -161,7 +161,7 @@ typedef struct int ev_align; /* alignment of the expression val's type */ Oid ev_collid; /* collation of the expression val */ - Oid cmp_proc, /* comparison fuction for 'ev_type' */ + Oid cmp_proc, /* comparison function for 'ev_type' */ hash_proc; /* hash function for 'ev_type' */ MemoryContext mcxt; /* memory context holding this struct */ diff --git a/src/partition_creation.c b/src/partition_creation.c index 20094a4f..b5acfb28 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -647,7 +647,7 @@ choose_range_partition_name(Oid parent_relid, Oid parent_nsp) (uint64) DatumGetInt64(part_num)); /* can't use UInt64 on 9.5 */ /* - * If we found a unique name or attemps number exceeds some reasonable + * If we found a unique name or attempts number exceeds some reasonable * value then we quit * * XXX Should we throw an exception if max attempts number is reached? @@ -1231,7 +1231,7 @@ build_raw_range_check_tree(Node *raw_expression, and_oper->args = lappend(and_oper->args, left_arg); } - /* Right comparision (VAR < end_value) */ + /* Right comparison (VAR < end_value) */ if (!IsInfinite(end_value)) { /* Build right boundary */ diff --git a/src/partition_filter.c b/src/partition_filter.c index a1886c4d..66d19d34 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -160,7 +160,7 @@ init_result_parts_storage(ResultPartsStorage *parts_storage, parts_storage->on_new_rri_holder_callback = on_new_rri_holder_cb; parts_storage->callback_arg = on_new_rri_holder_cb_arg; - /* Currenly ResultPartsStorage is used only for INSERTs */ + /* Currently ResultPartsStorage is used only for INSERTs */ parts_storage->command_type = CMD_INSERT; parts_storage->speculative_inserts = speculative_inserts; @@ -484,7 +484,7 @@ make_partition_filter(Plan *subplan, CustomScan *cscan = makeNode(CustomScan); Relation parent_rel; - /* Currenly we don't support ON CONFLICT clauses */ + /* Currently we don't support ON CONFLICT clauses */ if (conflict_action != ONCONFLICT_NONE) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), diff --git a/src/pathman_workers.c b/src/pathman_workers.c index e393d313..a3114ec7 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -599,7 +599,7 @@ bgw_main_concurrent_part(Datum main_arg) error = CopyErrorData(); FlushErrorState(); - /* Print messsage for this BGWorker to server log */ + /* Print message for this BGWorker to server log */ ereport(LOG, (errmsg("%s: %s", concurrent_part_bgw, error->message), errdetail("attempt: %d/%d, sleep time: %.2f", diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 0df4fc22..d4c2ee25 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -89,7 +89,7 @@ typedef struct /* SubLink that might contain an examined query */ SubLink *parent_sublink; - /* CommonTableExpr that might containt an examined query */ + /* CommonTableExpr that might contain an examined query */ CommonTableExpr *parent_cte; } transform_query_cxt; diff --git a/src/relation_info.c b/src/relation_info.c index cb9c8bab..1d191f1a 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -218,7 +218,7 @@ refresh_pathman_relation_info(Oid relid, prel->ev_typmod = exprTypmod(prel->expr); prel->ev_collid = exprCollation(prel->expr); - /* Fetch HASH & CMP fuctions and other stuff from type cache */ + /* Fetch HASH & CMP functions and other stuff from type cache */ typcache = lookup_type_cache(prel->ev_type, TYPECACHE_CMP_PROC | TYPECACHE_HASH_PROC); diff --git a/src/utils.c b/src/utils.c index bd60d57d..6f18b770 100644 --- a/src/utils.c +++ b/src/utils.c @@ -377,7 +377,7 @@ perform_type_cast(Datum value, Oid in_type, Oid out_type, bool *success) if (IsBinaryCoercible(in_type, out_type)) return value; - /* If not, try to perfrom a type cast */ + /* If not, try to perform a type cast */ ret = find_coercion_pathway(out_type, in_type, COERCION_EXPLICIT, &castfunc); @@ -422,7 +422,7 @@ perform_type_cast(Datum value, Oid in_type, Oid out_type, bool *success) } /* - * Convert interval from TEXT to binary form using partitioninig expresssion type. + * Convert interval from TEXT to binary form using partitioninig expression type. */ Datum extract_binary_interval_from_text(Datum interval_text, /* interval as TEXT */ From 0c8cd7f5078b062240ebd2a6dfba082ddb2de7d6 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 27 Apr 2018 14:50:42 +0300 Subject: [PATCH 222/528] attempt to fix duplicate rows (issue #155) --- src/hooks.c | 21 +++++++++------------ src/planner_tree_modification.c | 7 ++----- 2 files changed, 11 insertions(+), 17 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index adcb805b..2a045a43 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -369,6 +369,8 @@ pathman_rel_pathlist_hook(PlannerInfo *root, * FROM test.tmp2 t2 * WHERE id = t.id); * + * or unions, multilevel partitioning, etc. + * * Since we disable optimizations on 9.5, we * have to skip parent table that has already * been expanded by standard inheritance. @@ -378,23 +380,18 @@ pathman_rel_pathlist_hook(PlannerInfo *root, foreach (lc, root->append_rel_list) { AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(lc); - RangeTblEntry *cur_parent_rte, - *cur_child_rte; - - /* This 'appinfo' is not for this child */ - if (appinfo->child_relid != rti) - continue; - - cur_parent_rte = root->simple_rte_array[appinfo->parent_relid]; - cur_child_rte = rte; /* we already have it, saves time */ - /* This child == its own parent table! */ - if (cur_parent_rte->relid == cur_child_rte->relid) + /* + * If there's an 'appinfo', it means that somebody + * (PG?) has already processed this partitioned table + * and added its children to the plan. + */ + if (appinfo->child_relid == rti) return; } } - /* Make copy of partitioning expression and fix Var's varno attributes */ + /* Make copy of partitioning expression and fix Var's varno attributes */ part_expr = PrelExpressionForRelid(prel, rti); /* Get partitioning-related clauses (do this before append_child_relation()) */ diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index d4c2ee25..3225e59e 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -343,10 +343,7 @@ disable_standard_inheritance(Query *parse, transform_query_cxt *context) /* Proceed if table is partitioned by pg_pathman */ if ((prel = get_pathman_relation_info(rte->relid)) != NULL) { - /* - * HACK: unset the 'inh' flag to disable standard - * planning. We'll set it again later. - */ + /* HACK: unset the 'inh' flag to disable standard planning */ rte->inh = false; /* Try marking it using PARENTHOOD_ALLOWED */ @@ -569,7 +566,7 @@ partition_filter_visitor(Plan *plan, void *context) void assign_rel_parenthood_status(RangeTblEntry *rte, rel_parenthood_status new_status) -{ +{ Assert(rte->rtekind != RTE_CTE); /* HACK: set relevant bits in RTE */ From f0bd3675ee1546b9ff1193f9962e1ef6d11af423 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 27 Apr 2018 16:16:25 +0300 Subject: [PATCH 223/528] small refactorings in test suite --- Makefile | 1 + expected/pathman_basic.out | 12 --------- expected/pathman_multilevel.out | 44 +++++++++++++++++++++++++++++++++ sql/pathman_basic.sql | 6 ----- sql/pathman_multilevel.sql | 30 ++++++++++++++++++++++ 5 files changed, 75 insertions(+), 18 deletions(-) create mode 100644 expected/pathman_multilevel.out create mode 100644 sql/pathman_multilevel.sql diff --git a/Makefile b/Makefile index 392f8e5d..d810185c 100644 --- a/Makefile +++ b/Makefile @@ -46,6 +46,7 @@ REGRESS = pathman_array_qual \ pathman_join_clause \ pathman_lateral \ pathman_mergejoin \ + pathman_multilevel \ pathman_only \ pathman_param_upd_del \ pathman_permissions \ diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 15cd31b7..fa946d72 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1851,18 +1851,6 @@ ORDER BY partition; DROP TABLE test.provided_part_names CASCADE; NOTICE: drop cascades to 2 other objects -/* Check that multilivel is prohibited */ -CREATE TABLE test.multi(key int NOT NULL); -SELECT create_hash_partitions('test.multi', 'key', 3); - create_hash_partitions ------------------------- - 3 -(1 row) - -SELECT create_hash_partitions('test.multi_1', 'key', 3); -ERROR: multilevel partitioning is not supported -DROP TABLE test.multi CASCADE; -NOTICE: drop cascades to 3 other objects DROP SCHEMA test CASCADE; NOTICE: drop cascades to 28 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_multilevel.out b/expected/pathman_multilevel.out new file mode 100644 index 00000000..062f60a5 --- /dev/null +++ b/expected/pathman_multilevel.out @@ -0,0 +1,44 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA multi; +/* Check that multilevel is prohibited */ +CREATE TABLE multi.test(key int NOT NULL); +SELECT create_hash_partitions('multi.test', 'key', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT create_hash_partitions('multi.test_1', 'key', 3); +ERROR: multilevel partitioning is not supported +DROP TABLE multi.test CASCADE; +NOTICE: drop cascades to 3 other objects +/* Attach partitioned subtree to 'abc' */ +CREATE TABLE multi.abc (val int NOT NULL); +CREATE TABLE multi.def (LIKE multi.abc); +SELECT create_hash_partitions('multi.def', 'val', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +ALTER TABLE multi.def INHERIT multi.abc; +/* + * Although multilevel partitioning is not supported, + * we must make sure that pg_pathman won't add + * duplicate relations to the final plan. + */ +EXPLAIN (COSTS OFF) TABLE multi.abc; + QUERY PLAN +------------------------- + Append + -> Seq Scan on abc + -> Seq Scan on def + -> Seq Scan on def_0 + -> Seq Scan on def_1 +(5 rows) + +DROP SCHEMA multi CASCADE; +NOTICE: drop cascades to 4 other objects +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index a9d37f18..f24716c0 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -551,12 +551,6 @@ ORDER BY partition; DROP TABLE test.provided_part_names CASCADE; -/* Check that multilivel is prohibited */ -CREATE TABLE test.multi(key int NOT NULL); -SELECT create_hash_partitions('test.multi', 'key', 3); -SELECT create_hash_partitions('test.multi_1', 'key', 3); -DROP TABLE test.multi CASCADE; - DROP SCHEMA test CASCADE; DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_multilevel.sql b/sql/pathman_multilevel.sql new file mode 100644 index 00000000..1e211647 --- /dev/null +++ b/sql/pathman_multilevel.sql @@ -0,0 +1,30 @@ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA multi; + + +/* Check that multilevel is prohibited */ +CREATE TABLE multi.test(key int NOT NULL); +SELECT create_hash_partitions('multi.test', 'key', 3); +SELECT create_hash_partitions('multi.test_1', 'key', 3); +DROP TABLE multi.test CASCADE; + + +/* Attach partitioned subtree to 'abc' */ +CREATE TABLE multi.abc (val int NOT NULL); +CREATE TABLE multi.def (LIKE multi.abc); +SELECT create_hash_partitions('multi.def', 'val', 2); +ALTER TABLE multi.def INHERIT multi.abc; + +/* + * Although multilevel partitioning is not supported, + * we must make sure that pg_pathman won't add + * duplicate relations to the final plan. + */ +EXPLAIN (COSTS OFF) TABLE multi.abc; + + +DROP SCHEMA multi CASCADE; +DROP EXTENSION pg_pathman; From 1c1dfffd2875855ccf4869bae098943269798064 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 27 Apr 2018 16:36:57 +0300 Subject: [PATCH 224/528] make output of function get_pathman_lib_version() more user-friendly --- expected/pathman_calamity.out | 2 +- src/pl_funcs.c | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 3e87884c..7ac7da61 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -13,7 +13,7 @@ SELECT debug_capture(); SELECT get_pathman_lib_version(); get_pathman_lib_version ------------------------- - 10410 + 1.4.10 (1 row) set client_min_messages = NOTICE; diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 53d4259c..00b26b44 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -1606,5 +1606,9 @@ debug_capture(PG_FUNCTION_ARGS) Datum get_pathman_lib_version(PG_FUNCTION_ARGS) { - PG_RETURN_CSTRING(psprintf("%x", CURRENT_LIB_VERSION)); + uint8 ver_major = (CURRENT_LIB_VERSION & 0xFF0000) >> 16, + ver_minor = (CURRENT_LIB_VERSION & 0xFF00) >> 8, + ver_patch = (CURRENT_LIB_VERSION & 0xFF); + + PG_RETURN_CSTRING(psprintf("%x.%x.%x", ver_major, ver_minor, ver_patch)); } From 601f53ae4d57216cc0650b3fbcb450e38bec6f01 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 27 Apr 2018 16:50:47 +0300 Subject: [PATCH 225/528] bump lib version to 1.4.11 --- META.json | 4 ++-- expected/pathman_calamity.out | 2 +- src/include/init.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/META.json b/META.json index 9bcf29d5..8d62708b 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_pathman", "abstract": "Partitioning tool", "description": "The `pg_pathman` module provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.4.10", + "version": "1.4.11", "maintainer": [ "Ildar Musin ", "Dmitry Ivanov ", @@ -24,7 +24,7 @@ "pg_pathman": { "file": "pg_pathman--1.4.sql", "docfile": "README.md", - "version": "1.4.10", + "version": "1.4.11", "abstract": "Partitioning tool" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 7ac7da61..f9c63043 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -13,7 +13,7 @@ SELECT debug_capture(); SELECT get_pathman_lib_version(); get_pathman_lib_version ------------------------- - 1.4.10 + 1.4.11 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index 5de01b32..2fb82a91 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -158,7 +158,7 @@ simpify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT 0x010400 /* Current version of native C library (0xAA_BB_CC) */ -#define CURRENT_LIB_VERSION 0x010410 +#define CURRENT_LIB_VERSION 0x010411 void *pathman_cache_search_relid(HTAB *cache_table, From cde3475de6df7201da6dfa2f4c976e5cbc8354b7 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 27 Apr 2018 22:09:54 +0300 Subject: [PATCH 226/528] small changes to issue template --- .github/ISSUE_TEMPLATE.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index 5ad2562c..b1e98a96 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -22,7 +22,5 @@ Explain your problem here (it's always better to provide reproduction steps) ... - - - + From 60e13be3238036c6f4cce8822df499afc64e0e27 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sat, 28 Apr 2018 15:48:59 +0300 Subject: [PATCH 227/528] compatibility fixes for 10.4, 9.6.9 and 9.5.13 --- src/hooks.c | 6 ++++-- src/include/compat/pg_compat.h | 25 +++++++++++++++++++++++++ 2 files changed, 29 insertions(+), 2 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 2a045a43..fb299371 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -165,8 +165,10 @@ pathman_join_pathlist_hook(PlannerInfo *root, /* Extract join clauses which will separate partitions */ if (IS_OUTER_JOIN(extra->sjinfo->jointype)) { - extract_actual_join_clauses(extra->restrictlist, - &joinclauses, &otherclauses); + extract_actual_join_clauses_compat(extra->restrictlist, + joinrel->relids, + &joinclauses, + &otherclauses); } else { diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 22a3d5ff..61d1ab1f 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -318,6 +318,31 @@ static inline void mult_result_handler() { elog(ERROR, ERR_PART_ATTR_MULTIPLE_RE #endif +/* + * extract_actual_join_clauses() + */ +#if (PG_VERSION_NUM >= 100004) || \ + (PG_VERSION_NUM < 100000 && PG_VERSION_NUM >= 90609) || \ + (PG_VERSION_NUM < 90600 && PG_VERSION_NUM >= 90513) +#define extract_actual_join_clauses_compat(restrictinfo_list, \ + joinrelids, \ + joinquals, \ + otherquals) \ + extract_actual_join_clauses((restrictinfo_list), \ + (joinrelids), \ + (joinquals), \ + (otherquals)) +#else +#define extract_actual_join_clauses_compat(restrictinfo_list, \ + joinrelids, \ + joinquals, \ + otherquals) \ + extract_actual_join_clauses((restrictinfo_list), \ + (joinquals), \ + (otherquals)) +#endif + + /* * get_all_actual_clauses() */ From 370f41f08d15216d294abcacc28ab31281e5a450 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 14 May 2018 15:09:43 +0300 Subject: [PATCH 228/528] bump lib version to 1.4.12 --- META.json | 4 ++-- expected/pathman_calamity.out | 2 +- src/include/init.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/META.json b/META.json index 8d62708b..0cfa8dc2 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_pathman", "abstract": "Partitioning tool", "description": "The `pg_pathman` module provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.4.11", + "version": "1.4.12", "maintainer": [ "Ildar Musin ", "Dmitry Ivanov ", @@ -24,7 +24,7 @@ "pg_pathman": { "file": "pg_pathman--1.4.sql", "docfile": "README.md", - "version": "1.4.11", + "version": "1.4.12", "abstract": "Partitioning tool" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index f9c63043..0b2434d4 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -13,7 +13,7 @@ SELECT debug_capture(); SELECT get_pathman_lib_version(); get_pathman_lib_version ------------------------- - 1.4.11 + 1.4.12 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index 2fb82a91..8069f192 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -158,7 +158,7 @@ simpify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT 0x010400 /* Current version of native C library (0xAA_BB_CC) */ -#define CURRENT_LIB_VERSION 0x010411 +#define CURRENT_LIB_VERSION 0x010412 void *pathman_cache_search_relid(HTAB *cache_table, From 6ca926c81b68112e34c86b0607ae1313c4e64ab0 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 16 May 2018 17:37:13 +0300 Subject: [PATCH 229/528] add pathman_ prefix to common hooks --- src/hooks.c | 42 +++++++++++++++++++++--------------------- src/include/hooks.h | 14 +++++++------- src/pg_pathman.c | 24 ++++++++++++------------ 3 files changed, 40 insertions(+), 40 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index fb299371..96efad08 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -62,12 +62,12 @@ allow_star_schema_join(PlannerInfo *root, } -set_join_pathlist_hook_type set_join_pathlist_next = NULL; -set_rel_pathlist_hook_type set_rel_pathlist_hook_next = NULL; -planner_hook_type planner_hook_next = NULL; -post_parse_analyze_hook_type post_parse_analyze_hook_next = NULL; -shmem_startup_hook_type shmem_startup_hook_next = NULL; -ProcessUtility_hook_type process_utility_hook_next = NULL; +set_join_pathlist_hook_type pathman_set_join_pathlist_next = NULL; +set_rel_pathlist_hook_type pathman_set_rel_pathlist_hook_next = NULL; +planner_hook_type pathman_planner_hook_next = NULL; +post_parse_analyze_hook_type pathman_post_parse_analyze_hook_next = NULL; +shmem_startup_hook_type pathman_shmem_startup_hook_next = NULL; +ProcessUtility_hook_type pathman_process_utility_hook_next = NULL; /* Take care of joins */ @@ -91,9 +91,9 @@ pathman_join_pathlist_hook(PlannerInfo *root, ListCell *lc; /* Call hooks set by other extensions */ - if (set_join_pathlist_next) - set_join_pathlist_next(root, joinrel, outerrel, - innerrel, jointype, extra); + if (pathman_set_join_pathlist_next) + pathman_set_join_pathlist_next(root, joinrel, outerrel, + innerrel, jointype, extra); /* Check that both pg_pathman & RuntimeAppend nodes are enabled */ if (!IsPathmanReady() || !pg_pathman_enable_runtimeappend) @@ -312,8 +312,8 @@ pathman_rel_pathlist_hook(PlannerInfo *root, int irange_len; /* Invoke original hook if needed */ - if (set_rel_pathlist_hook_next != NULL) - set_rel_pathlist_hook_next(root, rel, rti, rte); + if (pathman_set_rel_pathlist_hook_next) + pathman_set_rel_pathlist_hook_next(root, rel, rti, rte); /* Make sure that pg_pathman is ready */ if (!IsPathmanReady()) @@ -631,8 +631,8 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) } /* Invoke original hook if needed */ - if (planner_hook_next) - result = planner_hook_next(parse, cursorOptions, boundParams); + if (pathman_planner_hook_next) + result = pathman_planner_hook_next(parse, cursorOptions, boundParams); else result = standard_planner(parse, cursorOptions, boundParams); @@ -671,11 +671,11 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) * any statement, including utility commands */ void -pathman_post_parse_analysis_hook(ParseState *pstate, Query *query) +pathman_post_parse_analyze_hook(ParseState *pstate, Query *query) { /* Invoke original hook if needed */ - if (post_parse_analyze_hook_next) - post_parse_analyze_hook_next(pstate, query); + if (pathman_post_parse_analyze_hook_next) + pathman_post_parse_analyze_hook_next(pstate, query); /* See cook_partitioning_expression() */ if (!pathman_hooks_enabled) @@ -735,7 +735,7 @@ pathman_post_parse_analysis_hook(ParseState *pstate, Query *query) if (IsPathmanReady() && get_planner_calls_count() > 0) { /* Check that pg_pathman is the last extension loaded */ - if (post_parse_analyze_hook != pathman_post_parse_analysis_hook) + if (post_parse_analyze_hook != pathman_post_parse_analyze_hook) { Oid save_userid; int save_sec_context; @@ -786,8 +786,8 @@ void pathman_shmem_startup_hook(void) { /* Invoke original hook if needed */ - if (shmem_startup_hook_next != NULL) - shmem_startup_hook_next(); + if (pathman_shmem_startup_hook_next) + pathman_shmem_startup_hook_next(); /* Allocate shared memory objects */ LWLockAcquire(AddinShmemInitLock, LW_EXCLUSIVE); @@ -942,8 +942,8 @@ pathman_process_utility_hook(Node *first_arg, } /* Finally call process_utility_hook_next or standard_ProcessUtility */ - call_process_utility_compat((process_utility_hook_next ? - process_utility_hook_next : + call_process_utility_compat((pathman_process_utility_hook_next ? + pathman_process_utility_hook_next : standard_ProcessUtility), first_arg, queryString, context, params, queryEnv, diff --git a/src/include/hooks.h b/src/include/hooks.h index 6a312db3..3d25847a 100644 --- a/src/include/hooks.h +++ b/src/include/hooks.h @@ -20,12 +20,12 @@ #include "tcop/utility.h" -extern set_join_pathlist_hook_type set_join_pathlist_next; -extern set_rel_pathlist_hook_type set_rel_pathlist_hook_next; -extern planner_hook_type planner_hook_next; -extern post_parse_analyze_hook_type post_parse_analyze_hook_next; -extern shmem_startup_hook_type shmem_startup_hook_next; -extern ProcessUtility_hook_type process_utility_hook_next; +extern set_join_pathlist_hook_type pathman_set_join_pathlist_next; +extern set_rel_pathlist_hook_type pathman_set_rel_pathlist_hook_next; +extern planner_hook_type pathman_planner_hook_next; +extern post_parse_analyze_hook_type pathman_post_parse_analyze_hook_next; +extern shmem_startup_hook_type pathman_shmem_startup_hook_next; +extern ProcessUtility_hook_type pathman_process_utility_hook_next; void pathman_join_pathlist_hook(PlannerInfo *root, @@ -46,7 +46,7 @@ PlannedStmt * pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams); -void pathman_post_parse_analysis_hook(ParseState *pstate, +void pathman_post_parse_analyze_hook(ParseState *pstate, Query *query); void pathman_shmem_startup_hook(void); diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 37a2d3f1..c4adef6e 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -299,18 +299,18 @@ _PG_init(void) restore_pathman_init_state(&temp_init_state); /* Set basic hooks */ - set_rel_pathlist_hook_next = set_rel_pathlist_hook; - set_rel_pathlist_hook = pathman_rel_pathlist_hook; - set_join_pathlist_next = set_join_pathlist_hook; - set_join_pathlist_hook = pathman_join_pathlist_hook; - shmem_startup_hook_next = shmem_startup_hook; - shmem_startup_hook = pathman_shmem_startup_hook; - post_parse_analyze_hook_next = post_parse_analyze_hook; - post_parse_analyze_hook = pathman_post_parse_analysis_hook; - planner_hook_next = planner_hook; - planner_hook = pathman_planner_hook; - process_utility_hook_next = ProcessUtility_hook; - ProcessUtility_hook = pathman_process_utility_hook; + pathman_set_rel_pathlist_hook_next = set_rel_pathlist_hook; + set_rel_pathlist_hook = pathman_rel_pathlist_hook; + pathman_set_join_pathlist_next = set_join_pathlist_hook; + set_join_pathlist_hook = pathman_join_pathlist_hook; + pathman_shmem_startup_hook_next = shmem_startup_hook; + shmem_startup_hook = pathman_shmem_startup_hook; + pathman_post_parse_analyze_hook_next = post_parse_analyze_hook; + post_parse_analyze_hook = pathman_post_parse_analyze_hook; + pathman_planner_hook_next = planner_hook; + planner_hook = pathman_planner_hook; + pathman_process_utility_hook_next = ProcessUtility_hook; + ProcessUtility_hook = pathman_process_utility_hook; /* Initialize static data for all subsystems */ init_main_pathman_toggles(); From 1a882435fb98988e1c902be498e22ce426279c12 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 17 May 2018 11:05:12 +0300 Subject: [PATCH 230/528] Make README.md more clear on `make PG_CONFIG=...` --- README.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index b4a8be50..a96894d0 100644 --- a/README.md +++ b/README.md @@ -72,13 +72,19 @@ More interesting features are yet to come. Stay tuned! ## Installation guide To install `pg_pathman`, execute this in the module's directory: + ```shell make install USE_PGXS=1 ``` + +> **Important:** Don't forget to set the `PG_CONFIG` variable (`make PG_CONFIG=...`) in case you want to test `pg_pathman` on a non-default or custom build of PostgreSQL. Read more [here](https://wiki.postgresql.org/wiki/Building_and_Installing_PostgreSQL_Extension_Modules). + Modify the **`shared_preload_libraries`** parameter in `postgresql.conf` as following: + ``` shared_preload_libraries = 'pg_pathman' ``` + > **Important:** `pg_pathman` may cause conflicts with some other extensions that use the same hook functions. For example, `pg_pathman` uses `ProcessUtility_hook` to handle COPY queries for partitioned tables, which means it may interfere with `pg_stat_statements` from time to time. In this case, try listing libraries in certain order: `shared_preload_libraries = 'pg_stat_statements, pg_pathman'`. It is essential to restart the PostgreSQL instance. After that, execute the following query in psql: @@ -88,8 +94,6 @@ CREATE EXTENSION pg_pathman; Done! Now it's time to setup your partitioning schemes. -> **Important:** Don't forget to set the `PG_CONFIG` variable in case you want to test `pg_pathman` on a custom build of PostgreSQL. Read more [here](https://wiki.postgresql.org/wiki/Building_and_Installing_PostgreSQL_Extension_Modules). - ## How to update In order to update pg_pathman: From 603f0fc683799ea1250ed622781e5cb2db490a9f Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sat, 9 Jun 2018 16:01:48 +0300 Subject: [PATCH 231/528] fix locking in merge_range_partitions_internal() --- src/pl_range_funcs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 5e3a7696..8c7bb9b1 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -698,7 +698,7 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) int j; /* Prevent modification of partitions */ - LockRelationOid(parts[0], AccessExclusiveLock); + LockRelationOid(parts[i], AccessExclusiveLock); /* Look for the specified partition */ for (j = 0; j < PrelChildrenCount(prel); j++) From 0ce9a65274bd6455c28e5fdaae0c376486b6d132 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 11 Jun 2018 00:22:55 +0300 Subject: [PATCH 232/528] don't forget to register snapshots! --- src/pl_range_funcs.c | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 8c7bb9b1..b5deff4a 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -30,6 +30,7 @@ #include "utils/numeric.h" #include "utils/ruleutils.h" #include "utils/syscache.h" +#include "utils/snapmgr.h" #if PG_VERSION_NUM >= 100000 #include "utils/regproc.h" @@ -682,6 +683,7 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) *last; FmgrInfo cmp_proc; int i; + Snapshot fresh_snapshot; prel = get_pathman_relation_info(parent); shout_if_prel_is_invalid(parent, prel, PT_RANGE); @@ -739,6 +741,13 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) if (SPI_connect() != SPI_OK_CONNECT) elog(ERROR, "could not connect using SPI"); + /* + * Get latest snapshot to see data that might have been + * added to partitions before this transaction has started, + * but was committed a moment before we acquired the locks. + */ + fresh_snapshot = RegisterSnapshot(GetLatestSnapshot()); + /* Migrate the data from all partition to the first one */ for (i = 1; i < nparts; i++) { @@ -749,10 +758,24 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) get_qualified_rel_name(parts[i]), get_qualified_rel_name(parts[0])); - SPI_exec(query, 0); + SPIPlanPtr plan = SPI_prepare(query, 0, NULL); + + if (!plan) + elog(ERROR, "%s: SPI_prepare returned %d", + CppAsString(merge_range_partitions), + SPI_result); + + SPI_execute_snapshot(plan, NULL, NULL, + fresh_snapshot, + InvalidSnapshot, + false, true, 0); + pfree(query); } + /* Free snapshot */ + UnregisterSnapshot(fresh_snapshot); + SPI_finish(); /* Drop obsolete partitions */ From ba8e164a5aa46ae10641250ac097eaa839efa2e1 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 13 Jun 2018 16:48:59 +0300 Subject: [PATCH 233/528] add small notice regarding builds on Windows --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index a96894d0..472f2d80 100644 --- a/README.md +++ b/README.md @@ -94,6 +94,8 @@ CREATE EXTENSION pg_pathman; Done! Now it's time to setup your partitioning schemes. +> **Windows-specific**: pg_pathman imports several symbols (e.g. None_Receiver, InvalidObjectAddress) from PostgreSQL, which is fine by itself, but requires that those symbols are marked as `PGDLLIMPORT`. Unfortunately, some of them are not exported from vanilla PostgreSQL, which means that you have to either use Postgres Pro Standard/Enterprise (which includes all necessary patches), or patch and build your own distribution of PostgreSQL. + ## How to update In order to update pg_pathman: From c1bbebb4755c6c35751df68e8318cf8f7437f4a2 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 13 Jun 2018 17:22:08 +0300 Subject: [PATCH 234/528] further improvements in README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 472f2d80..e9a5d958 100644 --- a/README.md +++ b/README.md @@ -104,8 +104,8 @@ In order to update pg_pathman: 3. Execute the following queries: ```plpgsql -/* replace X.Y with the version number, e.g. 1.3 */ -ALTER EXTENSION pg_pathman UPDATE TO "X.Y"; +/* only required for major releases, e.g. 1.3 -> 1.4 */ +ALTER EXTENSION pg_pathman UPDATE; SET pg_pathman.enable = t; ``` From 5cdaa7a332db7884d92cfd6902cfd12cd2decc6e Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 20 Jun 2018 14:38:37 +0300 Subject: [PATCH 235/528] fix several issues --- src/hooks.c | 1 + src/include/relation_info.h | 1 - src/pl_range_funcs.c | 2 +- src/relation_info.c | 37 ++++++++++++++++++------------------- src/utility_stmt_hooking.c | 10 +++++----- 5 files changed, 25 insertions(+), 26 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 83f040d8..0c11a666 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -167,6 +167,7 @@ pathman_join_pathlist_hook(PlannerInfo *root, if (IS_OUTER_JOIN(extra->sjinfo->jointype)) { extract_actual_join_clauses(extra->restrictlist, + joinrel->relids, &joinclauses, &otherclauses); } else diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 14286546..fb8b98bf 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -334,7 +334,6 @@ void invalidate_pathman_status_info(Oid relid); void invalidate_pathman_status_info_cache(void); /* Dispatch cache */ -void refresh_pathman_relation_info(Oid relid); void close_pathman_relation_info(PartRelationInfo *prel); bool has_pathman_relation_info(Oid relid); PartRelationInfo *get_pathman_relation_info(Oid relid); diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 242723f1..f69fd852 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -432,7 +432,7 @@ get_part_range_by_oid(PG_FUNCTION_ARGS) shout_if_prel_is_invalid(parent_relid, prel, PT_RANGE); /* Check type of 'dummy' (for correct output) */ - arg_type = get_fn_expr_argtype(fcinfo->flinfo, 2); + arg_type = get_fn_expr_argtype(fcinfo->flinfo, 1); if (getBaseType(arg_type) != getBaseType(prel->ev_type)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("pg_typeof(dummy) should be %s", diff --git a/src/relation_info.c b/src/relation_info.c index b4f75f2a..1ac7c873 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -205,21 +205,23 @@ invalidate_psin_entry(PartStatusInfo *psin) psin->relid, MyProcPid); #endif - /* Mark entry as invalid */ - if (psin->prel && PrelReferenceCount(psin->prel) > 0) - { - PrelIsFresh(psin->prel) = false; - } - else + if (psin->prel) { - if (psin->prel) + if (PrelReferenceCount(psin->prel) > 0) + { + /* Mark entry as outdated and detach it */ + PrelIsFresh(psin->prel) = false; + } + else + { free_pathman_relation_info(psin->prel); - - (void) pathman_cache_search_relid(status_cache, - psin->relid, - HASH_REMOVE, - NULL); + } } + + (void) pathman_cache_search_relid(status_cache, + psin->relid, + HASH_REMOVE, + NULL); } @@ -227,13 +229,6 @@ invalidate_psin_entry(PartStatusInfo *psin) * Dispatch cache routines. */ -/* Make changes to PartRelationInfo visible */ -void -refresh_pathman_relation_info(Oid relid) -{ - -} - /* Close PartRelationInfo entry */ void close_pathman_relation_info(PartRelationInfo *prel) @@ -242,6 +237,10 @@ close_pathman_relation_info(PartRelationInfo *prel) Assert(PrelReferenceCount(prel) > 0); PrelReferenceCount(prel) -= 1; + + /* Remove entry is it's outdated and we're the last user */ + if (PrelReferenceCount(prel) == 0 && !PrelIsFresh(prel)) + free_pathman_relation_info(prel); } /* Check if relation is partitioned by pg_pathman */ diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 0bc5b43d..bf58311b 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -213,6 +213,7 @@ is_pathman_related_alter_column_type(Node *parsetree, AlterTableStmt *alter_table_stmt = (AlterTableStmt *) parsetree; ListCell *lc; Oid parent_relid; + bool result = false; PartRelationInfo *prel; Assert(IsPathmanReady()); @@ -235,8 +236,6 @@ is_pathman_related_alter_column_type(Node *parsetree, /* Return 'parent_relid' and 'prel->parttype' */ if (parent_relid_out) *parent_relid_out = parent_relid; if (part_type_out) *part_type_out = prel->parttype; - - close_pathman_relation_info(prel); } else return false; @@ -264,11 +263,12 @@ is_pathman_related_alter_column_type(Node *parsetree, if (attr_number_out) *attr_number_out = attnum; /* Success! */ - return true; + result = true; } - /* Default failure */ - return false; + close_pathman_relation_info(prel); + + return result; } From 28749f5a7c1f99c17ddef1d873df8f1ab953047d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 20 Jun 2018 14:46:48 +0300 Subject: [PATCH 236/528] fix a few tests --- expected/pathman_column_type.out | 60 ++++++++++++++++---------------- expected/pathman_expressions.out | 4 +-- 2 files changed, 32 insertions(+), 32 deletions(-) diff --git a/expected/pathman_column_type.out b/expected/pathman_column_type.out index 4382db1f..eacdb97a 100644 --- a/expected/pathman_column_type.out +++ b/expected/pathman_column_type.out @@ -20,12 +20,12 @@ SELECT * FROM test_column_type.test; (0 rows) SELECT context, entries FROM pathman_cache_stats ORDER BY context; - context | entries ---------------------------+--------- - maintenance | 0 - partition bounds cache | 10 - partition dispatch cache | 1 - partition parents cache | 10 + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 + partition status cache | 3 (4 rows) /* change column's type (should flush caches) */ @@ -51,12 +51,12 @@ SELECT partrel, cooked_expr FROM pathman_config; (1 row) SELECT context, entries FROM pathman_cache_stats ORDER BY context; - context | entries ---------------------------+--------- - maintenance | 0 - partition bounds cache | 10 - partition dispatch cache | 1 - partition parents cache | 10 + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 + partition status cache | 3 (4 rows) /* check insert dispatching */ @@ -102,12 +102,12 @@ SELECT * FROM test_column_type.test; (0 rows) SELECT context, entries FROM pathman_cache_stats ORDER BY context; - context | entries ---------------------------+--------- - maintenance | 0 - partition bounds cache | 5 - partition dispatch cache | 1 - partition parents cache | 5 + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 5 + partition parents cache | 5 + partition status cache | 3 (4 rows) /* change column's type (should NOT work) */ @@ -120,12 +120,12 @@ SELECT * FROM test_column_type.test; (0 rows) SELECT context, entries FROM pathman_cache_stats ORDER BY context; - context | entries ---------------------------+--------- - maintenance | 0 - partition bounds cache | 5 - partition dispatch cache | 1 - partition parents cache | 5 + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 5 + partition parents cache | 5 + partition status cache | 3 (4 rows) /* change column's type (should flush caches) */ @@ -137,12 +137,12 @@ SELECT * FROM test_column_type.test; (0 rows) SELECT context, entries FROM pathman_cache_stats ORDER BY context; - context | entries ---------------------------+--------- - maintenance | 0 - partition bounds cache | 5 - partition dispatch cache | 1 - partition parents cache | 5 + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 5 + partition parents cache | 5 + partition status cache | 3 (4 rows) /* check insert dispatching */ diff --git a/expected/pathman_expressions.out b/expected/pathman_expressions.out index 9e19d217..685ca2d3 100644 --- a/expected/pathman_expressions.out +++ b/expected/pathman_expressions.out @@ -371,7 +371,7 @@ PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERF SQL statement "SELECT public.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 13 at PERFORM +PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM /* Try using mutable expression */ SELECT create_range_partitions('test_exprs.range_rel', 'RANDOM()', '15 years'::INTERVAL, '1 year'::INTERVAL, 10); @@ -382,7 +382,7 @@ PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERF SQL statement "SELECT public.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 13 at PERFORM +PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM /* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy SELECT * FROM test_exprs.canary WHERE val = 1; From 8ad3bf8561a22c4416d30af5fa07f21d7901d018 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 20 Jun 2018 18:27:24 +0300 Subject: [PATCH 237/528] track PartRelationInfo references using ResourceOwner --- src/init.c | 29 ++++---- src/relation_info.c | 164 +++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 172 insertions(+), 21 deletions(-) diff --git a/src/init.c b/src/init.c index 58479939..ed89bf0b 100644 --- a/src/init.c +++ b/src/init.c @@ -47,14 +47,15 @@ MemoryContext PathmanParentsCacheContext = NULL; MemoryContext PathmanStatusCacheContext = NULL; MemoryContext PathmanBoundsCacheContext = NULL; -/* Storage for PartRelationInfos */ -HTAB *parents_cache = NULL; /* Storage for PartParentInfos */ -HTAB *status_cache = NULL; +HTAB *parents_cache = NULL; + +/* Storage for PartStatusInfos */ +HTAB *status_cache = NULL; /* Storage for PartBoundInfos */ -HTAB *bounds_cache = NULL; +HTAB *bounds_cache = NULL; /* pg_pathman's init status */ PathmanInitState pathman_init_state; @@ -63,10 +64,6 @@ PathmanInitState pathman_init_state; bool pathman_hooks_enabled = true; -/* Shall we install new relcache callback? */ -static bool relcache_callback_needed = true; - - /* Functions for various local caches */ static bool init_pathman_relation_oids(void); static void fini_pathman_relation_oids(void); @@ -196,6 +193,8 @@ init_main_pathman_toggles(void) bool load_config(void) { + static bool relcache_callback_needed = true; + /* * Try to cache important relids. * @@ -321,7 +320,9 @@ init_local_cache(void) Assert(MemoryContextIsValid(PathmanBoundsCacheContext)); /* Clear children */ - MemoryContextResetChildren(TopPathmanContext); + MemoryContextReset(PathmanParentsCacheContext); + MemoryContextReset(PathmanStatusCacheContext); + MemoryContextReset(PathmanBoundsCacheContext); } /* Initialize pg_pathman's memory contexts */ else @@ -356,7 +357,7 @@ init_local_cache(void) memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); - ctl.entrysize = sizeof(PartRelationInfo); + ctl.entrysize = sizeof(PartParentInfo); ctl.hcxt = PathmanParentsCacheContext; parents_cache = hash_create(PATHMAN_PARENTS_CACHE, @@ -394,11 +395,13 @@ fini_local_cache(void) hash_destroy(bounds_cache); parents_cache = NULL; - status_cache = NULL; - bounds_cache = NULL; + status_cache = NULL; + bounds_cache = NULL; /* Now we can clear allocations */ - MemoryContextResetChildren(TopPathmanContext); + MemoryContextReset(PathmanParentsCacheContext); + MemoryContextReset(PathmanStatusCacheContext); + MemoryContextReset(PathmanBoundsCacheContext); } diff --git a/src/relation_info.c b/src/relation_info.c index 1ac7c873..c9c46f95 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -35,6 +35,7 @@ #include "utils/hsearch.h" #include "utils/inval.h" #include "utils/memutils.h" +#include "utils/resowner.h" #include "utils/ruleutils.h" #include "utils/syscache.h" #include "utils/lsyscache.h" @@ -66,6 +67,12 @@ typedef struct cmp_func_info Oid collid; } cmp_func_info; +typedef struct prel_resowner_info +{ + ResourceOwner owner; + List *prels; +} prel_resowner_info; + /* * For pg_pathman.enable_bounds_cache GUC. */ @@ -77,6 +84,11 @@ bool pg_pathman_enable_bounds_cache = true; */ static bool delayed_shutdown = false; /* pathman was dropped */ +/* + * PartRelationInfo is controlled by ResourceOwner; + */ +static HTAB *prel_resowner = NULL; + /* Handy wrappers for Oids */ #define bsearch_oid(key, array, array_size) \ @@ -88,6 +100,13 @@ static void free_pathman_relation_info(PartRelationInfo *prel); static void invalidate_psin_entries_using_relid(Oid relid); static void invalidate_psin_entry(PartStatusInfo *psin); +static PartRelationInfo *resowner_prel_add(PartRelationInfo *prel); +static PartRelationInfo *resowner_prel_del(PartRelationInfo *prel); +static void resonwner_prel_callback(ResourceReleasePhase phase, + bool isCommit, + bool isTopLevel, + void *arg); + static Expr *get_partition_constraint_expr(Oid partition); static void fill_prel_with_partitions(PartRelationInfo *prel, @@ -233,10 +252,7 @@ invalidate_psin_entry(PartStatusInfo *psin) void close_pathman_relation_info(PartRelationInfo *prel) { - /* Check that refcount is valid */ - Assert(PrelReferenceCount(prel) > 0); - - PrelReferenceCount(prel) -= 1; + (void) resowner_prel_del(prel); /* Remove entry is it's outdated and we're the last user */ if (PrelReferenceCount(prel) == 0 && !PrelIsFresh(prel)) @@ -327,10 +343,7 @@ get_pathman_relation_info(Oid relid) (psin->prel ? "live" : "NULL"), relid, MyProcPid); #endif - if (psin->prel) - PrelReferenceCount(psin->prel) += 1; - - return psin->prel; + return resowner_prel_add(psin->prel); } /* Build a new PartRelationInfo for partitioned relation */ @@ -483,6 +496,141 @@ free_pathman_relation_info(PartRelationInfo *prel) MemoryContextDelete(prel->mcxt); } +static PartRelationInfo * +resowner_prel_add(PartRelationInfo *prel) +{ + if (!prel_resowner) + { + HASHCTL ctl; + + memset(&ctl, 0, sizeof(ctl)); + ctl.keysize = sizeof(ResourceOwner); + ctl.entrysize = sizeof(prel_resowner_info); + ctl.hcxt = TopPathmanContext; + + prel_resowner = hash_create("prel resowner", + PART_RELS_SIZE, &ctl, + HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + + RegisterResourceReleaseCallback(resonwner_prel_callback, NULL); + } + + if (prel) + { + ResourceOwner resowner = CurrentResourceOwner; + prel_resowner_info *info; + bool found; + MemoryContext old_mcxt; + + info = hash_search(prel_resowner, + (void *) &resowner, + HASH_ENTER, + &found); + + if (!found) + info->prels = NIL; + + /* Register this 'prel' */ + old_mcxt = MemoryContextSwitchTo(TopPathmanContext); + info->prels = list_append_unique(info->prels, prel); + MemoryContextSwitchTo(old_mcxt); + + /* Finally, increment refcount */ + PrelReferenceCount(prel) += 1; + } + + return prel; +} + +static PartRelationInfo * +resowner_prel_del(PartRelationInfo *prel) +{ + /* Must be active! */ + Assert(prel_resowner); + + if (prel) + { + ResourceOwner resowner = CurrentResourceOwner; + prel_resowner_info *info; + + info = hash_search(prel_resowner, + (void *) &resowner, + HASH_FIND, + NULL); + + if (info) + { + /* Check that 'prel' is registered! */ + Assert(list_member(info->prels, prel)); + + /* Remove it iff we're the only user */ + if (PrelReferenceCount(prel) == 1) + info->prels = list_delete(info->prels, prel); + } + + /* Check that refcount is valid */ + Assert(PrelReferenceCount(prel) > 0); + + /* Finally, decrement refcount */ + PrelReferenceCount(prel) -= 1; + } + + return prel; +} + +static void +resonwner_prel_callback(ResourceReleasePhase phase, + bool isCommit, + bool isTopLevel, + void *arg) +{ + ResourceOwner resowner = CurrentResourceOwner; + prel_resowner_info *info; + + if (prel_resowner) + { + ListCell *lc; + + info = hash_search(prel_resowner, + (void *) &resowner, + HASH_FIND, + NULL); + + if (info) + { + foreach (lc, info->prels) + { + PartRelationInfo *prel = lfirst(lc); + + if (!isCommit) + { + /* Reset refcount for valid entry */ + if (PrelIsFresh(prel)) + { + PrelReferenceCount(prel) = 0; + } + /* Otherwise, free it when refcount is zero */ + else if (--PrelReferenceCount(prel) == 0) + { + free_pathman_relation_info(prel); + } + } + else + elog(ERROR, + "cache reference leak: PartRelationInfo(%d) has count %d", + PrelParentRelid(prel), PrelReferenceCount(prel)); + } + + list_free(info->prels); + + hash_search(prel_resowner, + (void *) &resowner, + HASH_REMOVE, + NULL); + } + } +} + /* Fill PartRelationInfo with partition-related info */ static void fill_prel_with_partitions(PartRelationInfo *prel, From 98fe5faee70302bee374050880535aae133d58fe Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 20 Jun 2018 19:31:26 +0300 Subject: [PATCH 238/528] minor fixes in ResourceOwner-based tracking machinery --- src/relation_info.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/relation_info.c b/src/relation_info.c index c9c46f95..33aa6125 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -532,7 +532,7 @@ resowner_prel_add(PartRelationInfo *prel) /* Register this 'prel' */ old_mcxt = MemoryContextSwitchTo(TopPathmanContext); - info->prels = list_append_unique(info->prels, prel); + info->prels = list_append_unique_ptr(info->prels, prel); MemoryContextSwitchTo(old_mcxt); /* Finally, increment refcount */ @@ -561,11 +561,11 @@ resowner_prel_del(PartRelationInfo *prel) if (info) { /* Check that 'prel' is registered! */ - Assert(list_member(info->prels, prel)); + Assert(list_member_ptr(info->prels, prel)); /* Remove it iff we're the only user */ if (PrelReferenceCount(prel) == 1) - info->prels = list_delete(info->prels, prel); + info->prels = list_delete_ptr(info->prels, prel); } /* Check that refcount is valid */ From d68c9a79acbe37dacef31632be54505072c47c1e Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 21 Jun 2018 13:19:17 +0300 Subject: [PATCH 239/528] small fix in merge_range_partitions() --- src/pl_range_funcs.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index f69fd852..a1b4c0fe 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -743,8 +743,11 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) } } - ObjectAddressSet(object, RelationRelationId, parts[i]); - add_exact_object_address(&object, objects); + if (i > 0) + { + ObjectAddressSet(object, RelationRelationId, parts[i]); + add_exact_object_address(&object, objects); + } } /* Check that partitions are adjacent */ From c6153f4b23d175b600c725e04560ac2aa820f600 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 21 Jun 2018 13:22:46 +0300 Subject: [PATCH 240/528] add a basic leak tracker --- sql/pathman_subpartitions.sql | 3 +++ src/include/relation_info.h | 38 +++++++++++++++++++++++++++++++---- src/relation_info.c | 31 +++++++++++++++++++++++++--- 3 files changed, 65 insertions(+), 7 deletions(-) diff --git a/sql/pathman_subpartitions.sql b/sql/pathman_subpartitions.sql index 7f38f629..8f485503 100644 --- a/sql/pathman_subpartitions.sql +++ b/sql/pathman_subpartitions.sql @@ -3,6 +3,9 @@ CREATE EXTENSION pg_pathman; CREATE SCHEMA subpartitions; +:gdb +select pg_sleep(5); + /* Create two level partitioning structure */ diff --git a/src/include/relation_info.h b/src/include/relation_info.h index fb8b98bf..2ce6fa01 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -31,6 +31,12 @@ #include "utils/relcache.h" +#ifdef USE_ASSERT_CHECKING +#define USE_RELINFO_LOGGING +#define USE_RELINFO_LEAK_TRACKER +#endif + + /* Range bound */ typedef struct { @@ -215,6 +221,10 @@ typedef struct PartRelationInfo Oid cmp_proc, /* comparison fuction for 'ev_type' */ hash_proc; /* hash function for 'ev_type' */ +#ifdef USE_RELINFO_LEAK_TRACKER + List *owners; /* saved callers of get_pathman_relation_info() */ +#endif + MemoryContext mcxt; /* memory context holding this struct */ } PartRelationInfo; @@ -334,9 +344,9 @@ void invalidate_pathman_status_info(Oid relid); void invalidate_pathman_status_info_cache(void); /* Dispatch cache */ -void close_pathman_relation_info(PartRelationInfo *prel); bool has_pathman_relation_info(Oid relid); PartRelationInfo *get_pathman_relation_info(Oid relid); +void close_pathman_relation_info(PartRelationInfo *prel); void shout_if_prel_is_invalid(const Oid parent_oid, const PartRelationInfo *prel, @@ -382,11 +392,31 @@ char *canonicalize_partitioning_expression(const Oid relid, void delay_pathman_shutdown(void); void finish_delayed_invalidation(void); +void init_relation_info_static_data(void); -/* For pg_pathman.enable_bounds_cache GUC */ -extern bool pg_pathman_enable_bounds_cache; -void init_relation_info_static_data(void); +/* For pg_pathman.enable_bounds_cache GUC */ +extern bool pg_pathman_enable_bounds_cache; + + +/* This allows us to track leakers of PartRelationInfo */ +#ifdef USE_RELINFO_LEAK_TRACKER +extern const char *prel_resowner_function; +extern int prel_resowner_line; + +#define get_pathman_relation_info(relid) \ + ( \ + prel_resowner_function = __FUNCTION__, \ + prel_resowner_line = __LINE__, \ + get_pathman_relation_info(relid) \ + ) + +#define close_pathman_relation_info(prel) \ + do { \ + close_pathman_relation_info(prel); \ + prel = NULL; \ + } while (0) +#endif #endif /* RELATION_INFO_H */ diff --git a/src/relation_info.c b/src/relation_info.c index 33aa6125..2d520547 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -55,8 +55,11 @@ #define COOK_PART_EXPR_ERROR "failed to analyze partitioning expression \"%s\"" -#ifdef USE_ASSERT_CHECKING -#define USE_RELINFO_LOGGING +#ifdef USE_RELINFO_LEAK_TRACKER +#undef get_pathman_relation_info +#undef close_pathman_relation_info +const char *prel_resowner_function = NULL; +int prel_resowner_line = 0; #endif @@ -73,6 +76,7 @@ typedef struct prel_resowner_info List *prels; } prel_resowner_info; + /* * For pg_pathman.enable_bounds_cache GUC. */ @@ -373,7 +377,7 @@ build_pathman_relation_info(Oid relid, Datum *values) ALLOCSET_SMALL_SIZES); /* Create a new PartRelationInfo */ - prel = MemoryContextAlloc(prel_mcxt, sizeof(PartRelationInfo)); + prel = MemoryContextAllocZero(prel_mcxt, sizeof(PartRelationInfo)); prel->relid = relid; prel->refcount = 0; prel->fresh = true; @@ -535,6 +539,15 @@ resowner_prel_add(PartRelationInfo *prel) info->prels = list_append_unique_ptr(info->prels, prel); MemoryContextSwitchTo(old_mcxt); +#ifdef USE_RELINFO_LEAK_TRACKER + /* Save current caller (function:line) */ + old_mcxt = MemoryContextSwitchTo(prel->mcxt); + prel->owners = lappend(prel->owners, + list_make2(makeString((char *) prel_resowner_function), + makeInteger(prel_resowner_line))); + MemoryContextSwitchTo(old_mcxt); +#endif + /* Finally, increment refcount */ PrelReferenceCount(prel) += 1; } @@ -616,9 +629,21 @@ resonwner_prel_callback(ResourceReleasePhase phase, } } else + { +#ifdef USE_RELINFO_LEAK_TRACKER + ListCell *lc; + + foreach (lc, prel->owners) + { + char *fun = strVal(linitial(lfirst(lc))); + int line = intVal(lsecond(lfirst(lc))); + elog(WARNING, "PartRelationInfo referenced in %s:%d", fun, line); + } +#endif elog(ERROR, "cache reference leak: PartRelationInfo(%d) has count %d", PrelParentRelid(prel), PrelReferenceCount(prel)); + } } list_free(info->prels); From 7193e4bede940607a1bcad4853577107c592e7da Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 21 Jun 2018 13:33:04 +0300 Subject: [PATCH 241/528] simplified and fixed select_partition_for_insert() --- src/include/partition_filter.h | 20 ++--- src/partition_filter.c | 146 +++++++++++++-------------------- src/utility_stmt_hooking.c | 33 +------- 3 files changed, 70 insertions(+), 129 deletions(-) diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index f50ec342..7c15a017 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -50,9 +50,7 @@ typedef struct /* Default settings for ResultPartsStorage */ -#define RPS_DEFAULT_ENTRY_SIZE sizeof(ResultPartsStorage) #define RPS_DEFAULT_SPECULATIVE false /* speculative inserts */ - #define RPS_CLOSE_RELATIONS true #define RPS_SKIP_RELATIONS false @@ -75,7 +73,7 @@ typedef void (*rri_holder_cb)(ResultRelInfoHolder *rri_holder, */ struct ResultPartsStorage { - ResultRelInfo *base_rri; /* original ResultRelInfo (parent) */ + ResultRelInfo *base_rri; /* original ResultRelInfo */ EState *estate; /* pointer to executor's state */ CmdType command_type; /* INSERT | UPDATE */ @@ -93,6 +91,10 @@ struct ResultPartsStorage bool close_relations; LOCKMODE head_open_lock_mode; LOCKMODE heap_close_lock_mode; + + PartRelationInfo *prel; + ExprState *prel_expr_state; + ExprContext *prel_econtext; }; typedef struct @@ -115,7 +117,6 @@ typedef struct JunkFilter *junkfilter; /* junkfilter for subplan_slot */ ExprContext *tup_convert_econtext; /* ExprContext for projections */ - ExprState *expr_state; /* for partitioning expression */ } PartitionFilterState; @@ -152,10 +153,10 @@ void init_partition_filter_static_data(void); /* Initialize storage for some parent table */ void init_result_parts_storage(ResultPartsStorage *parts_storage, - ResultRelInfo *parent_rri, + Oid parent_relid, + ResultRelInfo *current_rri, EState *estate, CmdType cmd_type, - Size table_entry_size, bool close_relations, bool speculative_inserts, rri_holder_cb init_rri_holder_cb, @@ -178,11 +179,8 @@ Oid * find_partitions_for_value(Datum value, Oid value_type, const PartRelationInfo *prel, int *nparts); -ResultRelInfoHolder *select_partition_for_insert(ExprState *expr_state, - ExprContext *econtext, - EState *estate, - const PartRelationInfo *prel, - ResultPartsStorage *parts_storage); +ResultRelInfoHolder *select_partition_for_insert(ResultPartsStorage *parts_storage, + TupleTableSlot *slot); Plan * make_partition_filter(Plan *subplan, Oid parent_relid, diff --git a/src/partition_filter.c b/src/partition_filter.c index f70c9ef0..fa986c4e 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -143,10 +143,10 @@ init_partition_filter_static_data(void) /* Initialize ResultPartsStorage (hash table etc) */ void init_result_parts_storage(ResultPartsStorage *parts_storage, - ResultRelInfo *parent_rri, + Oid parent_relid, + ResultRelInfo *current_rri, EState *estate, CmdType cmd_type, - Size table_entry_size, bool close_relations, bool speculative_inserts, rri_holder_cb init_rri_holder_cb, @@ -158,13 +158,13 @@ init_result_parts_storage(ResultPartsStorage *parts_storage, memset(result_rels_table_config, 0, sizeof(HASHCTL)); result_rels_table_config->keysize = sizeof(Oid); - result_rels_table_config->entrysize = table_entry_size; + result_rels_table_config->entrysize = sizeof(ResultPartsStorage); parts_storage->result_rels_table = hash_create("ResultRelInfo storage", 10, result_rels_table_config, HASH_ELEM | HASH_BLOBS); - Assert(parent_rri); - parts_storage->base_rri = parent_rri; + Assert(current_rri); + parts_storage->base_rri = current_rri; Assert(estate); parts_storage->estate = estate; @@ -185,6 +185,19 @@ init_result_parts_storage(ResultPartsStorage *parts_storage, parts_storage->close_relations = close_relations; parts_storage->head_open_lock_mode = RowExclusiveLock; parts_storage->heap_close_lock_mode = NoLock; + + /* Fetch PartRelationInfo for this partitioned relation */ + parts_storage->prel = get_pathman_relation_info(parent_relid); + shout_if_prel_is_invalid(parent_relid, parts_storage->prel, PT_ANY); + + /* Build a partitioning expression state */ + parts_storage->prel_expr_state = prepare_expr_state(parts_storage->prel, + parts_storage->base_rri->ri_RelationDesc, + parts_storage->estate, + cmd_type == CMD_UPDATE); + + /* Build expression context */ + parts_storage->prel_econtext = CreateExprContext(parts_storage->estate); } /* Free ResultPartsStorage (close relations etc) */ @@ -222,6 +235,9 @@ fini_result_parts_storage(ResultPartsStorage *parts_storage) /* Finally destroy hash table */ hash_destroy(parts_storage->result_rels_table); + + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(parts_storage->prel); } /* Find a ResultRelInfo for the partition using ResultPartsStorage */ @@ -250,10 +266,6 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) List *translated_vars; MemoryContext old_mcxt; - /* Check that 'base_rri' is set */ - if (!parts_storage->base_rri) - elog(ERROR, "ResultPartsStorage contains no base_rri"); - /* Lock partition and check if it exists */ LockRelationOid(partid, parts_storage->head_open_lock_mode); if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(partid))) @@ -435,23 +447,26 @@ find_partitions_for_value(Datum value, Oid value_type, * Smart wrapper for scan_result_parts_storage(). */ ResultRelInfoHolder * -select_partition_for_insert(ExprState *expr_state, - ExprContext *econtext, - EState *estate, - const PartRelationInfo *prel, - ResultPartsStorage *parts_storage) +select_partition_for_insert(ResultPartsStorage *parts_storage, + TupleTableSlot *slot) { - ResultRelInfoHolder *rri_holder; + bool close_prel = false; + PartRelationInfo *prel = parts_storage->prel; + ExprState *expr_state = parts_storage->prel_expr_state; + ExprContext *expr_context = parts_storage->prel_econtext; Oid parent_relid = PrelParentRelid(prel), partition_relid = InvalidOid; Oid *parts; int nparts; bool isnull; Datum value; + ResultRelInfoHolder *result; + + parts_storage->prel_econtext->ecxt_scantuple = slot; /* Execute expression */ - value = ExecEvalExprCompat(expr_state, econtext, &isnull, - mult_result_handler); + value = ExecEvalExprCompat(expr_state, expr_context, + &isnull, mult_result_handler); if (isnull) elog(ERROR, ERR_PART_ATTR_NULL); @@ -473,54 +488,37 @@ select_partition_for_insert(ExprState *expr_state, else partition_relid = parts[0]; /* Get ResultRelationInfo holder for the selected partition */ - rri_holder = scan_result_parts_storage(partition_relid, parts_storage); + result = scan_result_parts_storage(partition_relid, parts_storage); + + /* Should we close 'prel'? */ + if (close_prel) + close_pathman_relation_info(prel); - /* This partition has been dropped, repeat with a new 'prel' */ - if (rri_holder == NULL) + if (result == NULL || nparts == 0) { - /* Get a fresh PartRelationInfo */ + /* This partition has been dropped | we have a new one */ prel = get_pathman_relation_info(parent_relid); + shout_if_prel_is_invalid(parent_relid, prel, PT_RANGE); - /* Paranoid check (all partitions have vanished) */ - if (!prel) - elog(ERROR, "table \"%s\" is not partitioned", - get_rel_name_or_relid(parent_relid)); + /* Store new 'prel' in 'parts_storage' */ + close_pathman_relation_info(parts_storage->prel); + parts_storage->prel = prel; } - /* This partition might have sub-partitions */ - else if (rri_holder->has_children) + else if (result->has_children) { - const PartRelationInfo *child_prel; + /* This partition is a parent itself, repeat */ + prel = get_pathman_relation_info(partition_relid); + shout_if_prel_is_invalid(partition_relid, prel, PT_RANGE); + close_prel = true; - /* Fetch PartRelationInfo for this partitioned relation */ - child_prel = get_pathman_relation_info(rri_holder->partid); - - /* Might be a false alarm */ - if (!child_prel) - return rri_holder; - - /* Build an expression state if it's not ready yet */ - if (!rri_holder->expr_state) - { - /* Fetch original topmost parent */ - Relation source_rel = parts_storage->base_rri->ri_RelationDesc; - - /* Build a partitioning expression state */ - rri_holder->expr_state = prepare_expr_state(child_prel, - source_rel, - estate, - true); - } - - /* Recursively search for subpartitions */ - rri_holder = select_partition_for_insert(rri_holder->expr_state, - econtext, estate, - child_prel, parts_storage); + /* We're not done yet */ + result = NULL; } } /* Loop until we get some result */ - while (rri_holder == NULL); + while (result == NULL); - return rri_holder; + return result; } static ExprState * @@ -660,11 +658,9 @@ void partition_filter_begin(CustomScanState *node, EState *estate, int eflags) { PartitionFilterState *state = (PartitionFilterState *) node; + Oid parent_relid = state->partitioned_table; PlanState *child_state; ResultRelInfo *current_rri; - Relation current_rel; - PartRelationInfo *prel; - bool try_map; /* It's convenient to store PlanState in 'custom_ps' */ child_state = ExecInitNode(state->subplan, estate, eflags); @@ -672,31 +668,11 @@ partition_filter_begin(CustomScanState *node, EState *estate, int eflags) /* Fetch current result relation (rri + rel) */ current_rri = estate->es_result_relation_info; - current_rel = current_rri->ri_RelationDesc; - - /* - * In UPDATE queries we have to work with child relation tlist, - * but expression contains varattnos of base relation, so we - * map parent varattnos to child varattnos. - * - * We don't need map if current relation == base relation. - */ - try_map = state->command_type == CMD_UPDATE && - RelationGetRelid(current_rel) != state->partitioned_table; - - /* Fetch PartRelationInfo for this partitioned relation */ - prel = get_pathman_relation_info(state->partitioned_table); - - /* Build a partitioning expression state */ - state->expr_state = prepare_expr_state(prel, current_rel, estate, try_map); - - /* Don't forget to close 'prel'! */ - close_pathman_relation_info(prel); /* Init ResultRelInfo cache */ - init_result_parts_storage(&state->result_parts, current_rri, + init_result_parts_storage(&state->result_parts, + parent_relid, current_rri, estate, state->command_type, - RPS_DEFAULT_ENTRY_SIZE, RPS_SKIP_RELATIONS, state->on_conflict_action != ONCONFLICT_NONE, RPS_RRI_CB(prepare_rri_for_insert, state), @@ -736,16 +712,8 @@ partition_filter_exec(CustomScanState *node) /* Switch to per-tuple context */ old_mcxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); - /* Store slot for expression evaluation */ - econtext->ecxt_scantuple = slot; - - /* - * Search for a matching partition. - * WARNING: 'prel' might change after this call! - */ - rri_holder = select_partition_for_insert(state->expr_state, - econtext, estate, - prel, &state->result_parts); + /* Search for a matching partition */ + rri_holder = select_partition_for_insert(&state->result_parts, slot); /* Switch back and clean up per-tuple context */ MemoryContextSwitchTo(old_mcxt); diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index bf58311b..eda203e0 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -482,7 +482,7 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, ResultPartsStorage parts_storage; ResultRelInfo *parent_rri; - ExprState *expr_state = NULL; + Oid parent_relid = RelationGetRelid(parent_rel); MemoryContext query_mcxt = CurrentMemoryContext; EState *estate = CreateExecutorState(); /* for ExecConstraints() */ @@ -505,9 +505,9 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, estate->es_range_table = range_table; /* Initialize ResultPartsStorage */ - init_result_parts_storage(&parts_storage, parent_rri, + init_result_parts_storage(&parts_storage, + parent_relid, parent_rri, estate, CMD_INSERT, - RPS_DEFAULT_ENTRY_SIZE, RPS_CLOSE_RELATIONS, RPS_DEFAULT_SPECULATIVE, RPS_RRI_CB(prepare_rri_for_copy, cstate), @@ -540,7 +540,6 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, Oid tuple_oid = InvalidOid; ExprContext *econtext = GetPerTupleExprContext(estate); - PartRelationInfo *prel; ResultRelInfoHolder *rri_holder; ResultRelInfo *child_rri; @@ -565,34 +564,10 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, ExecSetSlotDescriptor(slot, tupDesc); ExecStoreTuple(tuple, slot, InvalidBuffer, false); - /* Store slot for expression evaluation */ - econtext->ecxt_scantuple = slot; - - /* Fetch PartRelationInfo for parent relation */ - prel = get_pathman_relation_info(RelationGetRelid(parent_rel)); - - /* Initialize expression state */ - if (expr_state == NULL) - { - MemoryContext old_mcxt; - Node *expr; - - old_mcxt = MemoryContextSwitchTo(query_mcxt); - - expr = PrelExpressionForRelid(prel, PART_EXPR_VARNO); - expr_state = ExecInitExpr((Expr *) expr, NULL); - - MemoryContextSwitchTo(old_mcxt); - } - /* Search for a matching partition */ - rri_holder = select_partition_for_insert(expr_state, econtext, estate, - prel, &parts_storage); + rri_holder = select_partition_for_insert(&parts_storage, slot); child_rri = rri_holder->result_rel_info; - /* Don't forget to close 'prel'! */ - close_pathman_relation_info(prel); - /* Magic: replace parent's ResultRelInfo with ours */ estate->es_result_relation_info = child_rri; From 96220424c87fdf97599065a00e71f76b03995838 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 21 Jun 2018 13:33:31 +0300 Subject: [PATCH 242/528] update Dockefile template for testgres --- Dockerfile.tmpl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index 0504dd5a..5ceaeb99 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -13,7 +13,7 @@ RUN if [ "${CHECK_CODE}" = "cppcheck" ] ; then \ RUN if [ "${CHECK_CODE}" = "false" ] ; then \ echo 'http://dl-3.alpinelinux.org/alpine/edge/main' > /etc/apk/repositories; \ - apk --no-cache add curl python3 gcc make musl-dev cmocka-dev;\ + apk --no-cache add curl python3 python3-dev gcc make musl-dev cmocka-dev linux-headers;\ pip3 install virtualenv;\ fi From ce4a301d028d58b790e55d6ee40d82340b35f3b7 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 21 Jun 2018 14:01:53 +0300 Subject: [PATCH 243/528] remove debug code from subpartitions test --- sql/pathman_subpartitions.sql | 3 --- 1 file changed, 3 deletions(-) diff --git a/sql/pathman_subpartitions.sql b/sql/pathman_subpartitions.sql index 8f485503..7f38f629 100644 --- a/sql/pathman_subpartitions.sql +++ b/sql/pathman_subpartitions.sql @@ -3,9 +3,6 @@ CREATE EXTENSION pg_pathman; CREATE SCHEMA subpartitions; -:gdb -select pg_sleep(5); - /* Create two level partitioning structure */ From e1a791a933f29bc4fe086f6d4223070b28f8d39d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 21 Jun 2018 15:15:33 +0300 Subject: [PATCH 244/528] restore previous behavior --- expected/pathman_basic.out | 2 +- src/pl_funcs.c | 22 ++++++++++++---------- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index de3bf727..1bdbcef9 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -147,7 +147,7 @@ PL/pgSQL function pathman.prepare_for_partitioning(regclass,text,boolean) line 9 SQL statement "SELECT pathman.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function pathman.create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 13 at PERFORM +PL/pgSQL function pathman.create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM \set VERBOSITY terse ALTER TABLE test.range_rel ALTER COLUMN dt SET NOT NULL; SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL, 2); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index ef68c11e..c4b13017 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -108,10 +108,12 @@ get_parent_of_partition_pl(PG_FUNCTION_ARGS) Oid partition = PG_GETARG_OID(0), parent = get_parent_of_partition(partition); - if (OidIsValid(parent)) - PG_RETURN_OID(parent); + if (!OidIsValid(parent)) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("\"%s\" is not a partition", + get_rel_name_or_relid(partition)))); - PG_RETURN_NULL(); + PG_RETURN_OID(parent); } /* @@ -121,17 +123,17 @@ Datum get_partition_key_type_pl(PG_FUNCTION_ARGS) { Oid relid = PG_GETARG_OID(0); + Oid typid; PartRelationInfo *prel; - if ((prel = get_pathman_relation_info(relid)) != NULL) - { - Oid result = prel->ev_type; - close_pathman_relation_info(prel); + prel = get_pathman_relation_info(relid); + shout_if_prel_is_invalid(relid, prel, PT_ANY); - PG_RETURN_OID(result); - } + typid = prel->ev_type; + + close_pathman_relation_info(prel); - PG_RETURN_NULL(); + PG_RETURN_OID(typid); } /* From 107acab211514f1a92a6851fec59686777c8cfab Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 21 Jun 2018 16:28:43 +0300 Subject: [PATCH 245/528] refactoring in merge_range_partitions() --- src/include/relation_info.h | 3 + src/pl_range_funcs.c | 186 ++++++++++++------------------------ src/relation_info.c | 30 ++++-- 3 files changed, 86 insertions(+), 133 deletions(-) diff --git a/src/include/relation_info.h b/src/include/relation_info.h index a18edd4d..ee4e9a35 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -348,6 +348,9 @@ bool has_pathman_relation_info(Oid relid); PartRelationInfo *get_pathman_relation_info(Oid relid); void close_pathman_relation_info(PartRelationInfo *prel); +void qsort_range_entries(RangeEntry *entries, int nentries, + const PartRelationInfo *prel); + void shout_if_prel_is_invalid(const Oid parent_oid, const PartRelationInfo *prel, const PartType expected_part_type); diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index c4c14254..feb028a5 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -64,14 +64,6 @@ static ArrayType *construct_bounds_array(Bound *elems, bool elmbyval, char elmalign); -static void check_range_adjacence(Oid cmp_proc, - Oid collid, - List *ranges); - -static void merge_range_partitions_internal(Oid parent, - Oid *parts, - uint32 nparts); - static char *deparse_constraint(Oid relid, Node *expr); static void modify_range_constraint(Oid partition_relid, @@ -639,13 +631,22 @@ merge_range_partitions(PG_FUNCTION_ARGS) Oid parent = InvalidOid; ArrayType *arr = PG_GETARG_ARRAYTYPE_P(0); - Oid *partitions; + Oid *parts; + int nparts; + Datum *datums; bool *nulls; - int nparts; int16 typlen; bool typbyval; char typalign; + + PartRelationInfo *prel; + Bound min_bound, + max_bound; + RangeEntry *bounds; + ObjectAddresses *objects = new_object_addresses(); + Snapshot fresh_snapshot; + FmgrInfo finfo; int i; /* Validate array type */ @@ -657,35 +658,32 @@ merge_range_partitions(PG_FUNCTION_ARGS) typlen, typbyval, typalign, &datums, &nulls, &nparts); - /* Extract partition Oids from array */ - partitions = palloc(sizeof(Oid) * nparts); - for (i = 0; i < nparts; i++) - { - Oid partition_relid; - partition_relid = DatumGetObjectId(datums[i]); - - /* check that is not has subpartitions */ - if (has_subclass(partition_relid)) - ereport(ERROR, (errmsg("cannot merge partitions"), - errdetail("at least one of specified partitions has children"))); - - partitions[i] = partition_relid; - } - if (nparts < 2) ereport(ERROR, (errmsg("cannot merge partitions"), errdetail("there must be at least two partitions"))); - /* Check if all partitions are from the same parent */ + /* Allocate arrays */ + parts = palloc(nparts * sizeof(Oid)); + bounds = palloc(nparts * sizeof(RangeEntry)); + for (i = 0; i < nparts; i++) { - Oid cur_parent = get_parent_of_partition(partitions[i]); + Oid cur_parent; + + /* Extract partition Oids from array */ + parts[i] = DatumGetObjectId(datums[i]); + + /* Prevent modification of partitions */ + LockRelationOid(parts[i], AccessExclusiveLock); + + /* Check if all partitions are from the same parent */ + cur_parent = get_parent_of_partition(parts[i]); /* If we couldn't find a parent, it's not a partition */ if (!OidIsValid(cur_parent)) ereport(ERROR, (errmsg("cannot merge partitions"), errdetail("relation \"%s\" is not a partition", - get_rel_name_or_relid(partitions[i])))); + get_rel_name_or_relid(parts[i])))); /* 'parent' is not initialized */ if (parent == InvalidOid) @@ -697,84 +695,52 @@ merge_range_partitions(PG_FUNCTION_ARGS) errdetail("all relations must share the same parent"))); } - /* Now merge partitions */ - merge_range_partitions_internal(parent, partitions, nparts); - - PG_RETURN_VOID(); -} - -static void -merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) -{ - PartRelationInfo *prel; - List *rentry_list = NIL; - RangeEntry *ranges, - *first, - *last; - FmgrInfo cmp_proc; - ObjectAddresses *objects = new_object_addresses(); - Snapshot fresh_snapshot; - int i; + /* Lock parent till transaction's end */ + LockRelationOid(parent, ShareUpdateExclusiveLock); /* Emit an error if it is not partitioned by RANGE */ prel = get_pathman_relation_info(parent); shout_if_prel_is_invalid(parent, prel, PT_RANGE); - /* Fetch ranges array */ - ranges = PrelGetRangesArray(prel); - - /* Lock parent till transaction's end */ - LockRelationOid(parent, ShareUpdateExclusiveLock); - - /* Process partitions */ + /* Copy rentries from 'prel' */ for (i = 0; i < nparts; i++) { - ObjectAddress object; - int j; - - /* Prevent modification of partitions */ - LockRelationOid(parts[i], AccessExclusiveLock); - - /* Look for the specified partition */ - for (j = 0; j < PrelChildrenCount(prel); j++) - { - if (ranges[j].child_oid == parts[i]) - { - rentry_list = lappend(rentry_list, &ranges[j]); - break; - } - } + uint32 idx = PrelHasPartition(prel, parts[i]); + Assert(idx > 0); - if (i > 0) - { - ObjectAddressSet(object, RelationRelationId, parts[i]); - add_exact_object_address(&object, objects); - } + bounds[i] = PrelGetRangesArray(prel)[idx - 1]; } - /* Check that partitions are adjacent */ - check_range_adjacence(prel->cmp_proc, prel->ev_collid, rentry_list); + /* Sort rentries by increasing bound */ + qsort_range_entries(bounds, nparts, prel); - /* First determine the bounds of a new constraint */ - first = (RangeEntry *) linitial(rentry_list); - last = (RangeEntry *) llast(rentry_list); + fmgr_info(prel->cmp_proc, &finfo); - /* Swap ranges if 'last' < 'first' */ - fmgr_info(prel->cmp_proc, &cmp_proc); - if (cmp_bounds(&cmp_proc, prel->ev_collid, &last->min, &first->min) < 0) + /* Check that partitions are adjacent */ + for (i = 1; i < nparts; i++) { - RangeEntry *tmp = last; + Bound cur_min = bounds[i].min, + prev_max = bounds[i - 1].max; - last = first; - first = tmp; + if (cmp_bounds(&finfo, prel->ev_collid, &cur_min, &prev_max) != 0) + { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("partitions \"%s\" and \"%s\" are not adjacent", + get_rel_name(bounds[i - 1].child_oid), + get_rel_name(bounds[i].child_oid)))); + } } + /* First determine the bounds of a new constraint */ + min_bound = bounds[0].min; + max_bound = bounds[nparts - 1].max; + /* Drop old constraint and create a new one */ modify_range_constraint(parts[0], prel->expr_cstr, prel->ev_type, - &first->min, - &last->max); + &min_bound, + &max_bound); /* Make constraint visible */ CommandCounterIncrement(); @@ -792,6 +758,8 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) /* Migrate the data from all partition to the first one */ for (i = 1; i < nparts; i++) { + ObjectAddress object; + char *query = psprintf("WITH part_data AS ( " "DELETE FROM %s RETURNING " "*) " @@ -812,6 +780,10 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) false, true, 0); pfree(query); + + /* To be deleted */ + ObjectAddressSet(object, RelationRelationId, parts[i]); + add_exact_object_address(&object, objects); } /* Free snapshot */ @@ -823,8 +795,13 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) performMultipleDeletions(objects, DROP_CASCADE, 0); free_object_addresses(objects); + pfree(bounds); + pfree(parts); + /* Don't forget to close 'prel'! */ close_pathman_relation_info(prel); + + PG_RETURN_VOID(); } @@ -1278,40 +1255,3 @@ construct_bounds_array(Bound *elems, return arr; } - -/* - * Check that range entries are adjacent - */ -static void -check_range_adjacence(Oid cmp_proc, Oid collid, List *ranges) -{ - ListCell *lc; - RangeEntry *last = NULL; - FmgrInfo finfo; - - fmgr_info(cmp_proc, &finfo); - - foreach(lc, ranges) - { - RangeEntry *cur = (RangeEntry *) lfirst(lc); - - /* Skip first iteration */ - if (!last) - { - last = cur; - continue; - } - - /* Check that last and current partitions are adjacent */ - if ((cmp_bounds(&finfo, collid, &last->max, &cur->min) != 0) && - (cmp_bounds(&finfo, collid, &cur->max, &last->min) != 0)) - { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("partitions \"%s\" and \"%s\" are not adjacent", - get_rel_name(last->child_oid), - get_rel_name(cur->child_oid)))); - } - - last = cur; - } -} diff --git a/src/relation_info.c b/src/relation_info.c index 2d520547..e9b9245c 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -746,16 +746,9 @@ fill_prel_with_partitions(PartRelationInfo *prel, /* Finalize 'prel' for a RANGE-partitioned table */ if (prel->parttype == PT_RANGE) { - cmp_func_info cmp_info; - - /* Prepare function info */ - fmgr_info(prel->cmp_proc, &cmp_info.flinfo); - cmp_info.collid = prel->ev_collid; - - /* Sort partitions by RangeEntry->min asc */ - qsort_arg((void *) prel->ranges, PrelChildrenCount(prel), - sizeof(RangeEntry), cmp_range_entries, - (void *) &cmp_info); + qsort_range_entries(PrelGetRangesArray(prel), + PrelChildrenCount(prel), + prel); /* Initialize 'prel->children' array */ for (i = 0; i < PrelChildrenCount(prel); i++) @@ -789,6 +782,23 @@ cmp_range_entries(const void *p1, const void *p2, void *arg) return cmp_bounds(&info->flinfo, info->collid, &v1->min, &v2->min); } +void +qsort_range_entries(RangeEntry *entries, int nentries, + const PartRelationInfo *prel) +{ + cmp_func_info cmp_info; + + /* Prepare function info */ + fmgr_info(prel->cmp_proc, &cmp_info.flinfo); + cmp_info.collid = prel->ev_collid; + + /* Sort partitions by RangeEntry->min asc */ + qsort_arg(entries, nentries, + sizeof(RangeEntry), + cmp_range_entries, + (void *) &cmp_info); +} + /* * Common PartRelationInfo checks. Emit ERROR if anything is wrong. */ From 561c971f0ec4c806dd98786ca4bd91199e3de078 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 21 Jun 2018 17:41:19 +0300 Subject: [PATCH 246/528] remove dead code --- src/include/init.h | 2 -- src/init.c | 50 ---------------------------------------------- 2 files changed, 52 deletions(-) diff --git a/src/include/init.h b/src/include/init.h index 799e1c2d..99426810 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -226,8 +226,6 @@ bool read_pathman_params(Oid relid, Datum *values, bool *isnull); -Oid *read_parent_oids(int *nelems); - bool validate_range_constraint(const Expr *expr, const PartRelationInfo *prel, diff --git a/src/init.c b/src/init.c index ad521ced..db71704d 100644 --- a/src/init.c +++ b/src/init.c @@ -71,7 +71,6 @@ static void init_local_cache(void); static void fini_local_cache(void); /* Special handlers for read_pathman_config() */ -static void add_partrel_to_array(Datum *values, bool *isnull, void *context); static void startup_invalidate_parent(Datum *values, bool *isnull, void *context); static void read_pathman_config(void (*per_row_cb)(Datum *values, @@ -796,55 +795,6 @@ read_pathman_params(Oid relid, Datum *values, bool *isnull) } -typedef struct -{ - Oid *array; - int nelems; - int capacity; -} read_parent_oids_cxt; - -/* - * Get a sorted array of partitioned tables' Oids. - */ -Oid * -read_parent_oids(int *nelems) -{ - read_parent_oids_cxt context = { NULL, 0, 0 }; - - read_pathman_config(add_partrel_to_array, &context); - - /* Perform sorting */ - qsort(context.array, context.nelems, sizeof(Oid), oid_cmp); - - /* Return values */ - *nelems = context.nelems; - return context.array; -} - - -/* read_pathman_config(): add parent to array of Oids */ -static void -add_partrel_to_array(Datum *values, bool *isnull, void *context) -{ - Oid relid = DatumGetObjectId(values[Anum_pathman_config_partrel - 1]); - read_parent_oids_cxt *result = (read_parent_oids_cxt *) context; - - if (result->array == NULL) - { - result->capacity = PART_RELS_SIZE; - result->array = palloc(result->capacity * sizeof(Oid)); - } - - if (result->nelems >= result->capacity) - { - result->capacity = result->capacity * 2 + 1; - result->array = repalloc(result->array, result->capacity * sizeof(Oid)); - } - - /* Append current relid */ - result->array[result->nelems++] = relid; -} - /* read_pathman_config(): create dummy cache entry for parent */ static void startup_invalidate_parent(Datum *values, bool *isnull, void *context) From 79e11d94a147095f6e131e980033018c449f8e2e Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 21 Jun 2018 17:43:39 +0300 Subject: [PATCH 247/528] protect data read from pg_pathman's tables --- src/init.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/init.c b/src/init.c index d8fb4c57..569a4c2f 100644 --- a/src/init.c +++ b/src/init.c @@ -664,6 +664,7 @@ pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, /* Extract data if necessary */ if (values && isnull) { + htup = heap_copytuple(htup); heap_deform_tuple(htup, RelationGetDescr(rel), values, isnull); /* Perform checks for non-NULL columns */ @@ -778,6 +779,7 @@ read_pathman_params(Oid relid, Datum *values, bool *isnull) if ((htup = heap_getnext(scan, ForwardScanDirection)) != NULL) { /* Extract data if necessary */ + htup = heap_copytuple(htup); heap_deform_tuple(htup, RelationGetDescr(rel), values, isnull); row_found = true; From 47b8ee25a5825ef9f0251f4888a060b29e766ef4 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 21 Jun 2018 17:46:02 +0300 Subject: [PATCH 248/528] get rid of read_pathman_config() --- src/init.c | 80 ------------------------------------------------------ 1 file changed, 80 deletions(-) diff --git a/src/init.c b/src/init.c index 48e9875a..2c93f974 100644 --- a/src/init.c +++ b/src/init.c @@ -70,14 +70,6 @@ static void fini_pathman_relation_oids(void); static void init_local_cache(void); static void fini_local_cache(void); -/* Special handlers for read_pathman_config() */ -static void startup_invalidate_parent(Datum *values, bool *isnull, void *context); - -static void read_pathman_config(void (*per_row_cb)(Datum *values, - bool *isnull, - void *context), - void *context); - static bool validate_range_opexpr(const Expr *expr, const PartRelationInfo *prel, const TypeCacheEntry *tce, @@ -213,9 +205,6 @@ load_config(void) /* Create various hash tables (caches) */ init_local_cache(); - /* Read PATHMAN_CONFIG table & fill cache */ - read_pathman_config(startup_invalidate_parent, NULL); - /* Register pathman_relcache_hook(), currently we can't unregister it */ if (relcache_callback_needed) { @@ -797,75 +786,6 @@ read_pathman_params(Oid relid, Datum *values, bool *isnull) } -/* read_pathman_config(): create dummy cache entry for parent */ -static void -startup_invalidate_parent(Datum *values, bool *isnull, void *context) -{ - Oid relid = DatumGetObjectId(values[Anum_pathman_config_partrel - 1]); - - /* Check that relation 'relid' exists */ - if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relid))) - { - DisablePathman(); /* disable pg_pathman since config is broken */ - ereport(ERROR, - (errmsg("table \"%s\" contains nonexistent relation %u", - PATHMAN_CONFIG, relid), - errhint(INIT_ERROR_HINT))); - } -} - -/* - * Go through the PATHMAN_CONFIG table and create PartRelationInfo entries. - */ -static void -read_pathman_config(void (*per_row_cb)(Datum *values, - bool *isnull, - void *context), - void *context) -{ - Relation rel; - HeapScanDesc scan; - Snapshot snapshot; - HeapTuple htup; - - /* Open PATHMAN_CONFIG with latest snapshot available */ - rel = heap_open(get_pathman_config_relid(false), AccessShareLock); - - /* Check that 'partrel' column is if regclass type */ - Assert(TupleDescAttr(RelationGetDescr(rel), - Anum_pathman_config_partrel - 1)->atttypid == REGCLASSOID); - - /* Check that number of columns == Natts_pathman_config */ - Assert(RelationGetDescr(rel)->natts == Natts_pathman_config); - - snapshot = RegisterSnapshot(GetLatestSnapshot()); - scan = heap_beginscan(rel, snapshot, 0, NULL); - - /* Examine each row and create a PartRelationInfo in local cache */ - while((htup = heap_getnext(scan, ForwardScanDirection)) != NULL) - { - Datum values[Natts_pathman_config]; - bool isnull[Natts_pathman_config]; - - /* Extract Datums from tuple 'htup' */ - heap_deform_tuple(htup, RelationGetDescr(rel), values, isnull); - - /* These attributes are marked as NOT NULL, check anyway */ - Assert(!isnull[Anum_pathman_config_partrel - 1]); - Assert(!isnull[Anum_pathman_config_parttype - 1]); - Assert(!isnull[Anum_pathman_config_expr - 1]); - - /* Execute per row callback */ - per_row_cb(values, isnull, context); - } - - /* Clean resources */ - heap_endscan(scan); - UnregisterSnapshot(snapshot); - heap_close(rel, AccessShareLock); -} - - /* * Validates range constraint. It MUST have one of the following formats: * 1) EXPRESSION >= CONST AND EXPRESSION < CONST From 758694d10a835d466c8e76b5a05da0e599bf8715 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 22 Jun 2018 14:32:50 +0300 Subject: [PATCH 249/528] fix tuple visibility in add_to_pathman_config() --- expected/pathman_calamity.out | 108 +++++++++++++++++----------------- src/pl_funcs.c | 4 ++ 2 files changed, 58 insertions(+), 54 deletions(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 6243f1d9..14ff9cd6 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -807,23 +807,23 @@ SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10 (1 row) SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ - context | entries ---------------------------+--------- - maintenance | 0 - partition bounds cache | 0 - partition dispatch cache | 1 - partition parents cache | 0 + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 + partition status cache | 2 (4 rows) DROP TABLE calamity.test_pathman_cache_stats CASCADE; NOTICE: drop cascades to 11 other objects SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ - context | entries ---------------------------+--------- - maintenance | 0 - partition bounds cache | 0 - partition dispatch cache | 0 - partition parents cache | 0 + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 + partition status cache | 2 (4 rows) /* Change this setting for code coverage */ @@ -853,23 +853,23 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; (11 rows) SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ - context | entries ---------------------------+--------- - maintenance | 0 - partition bounds cache | 0 - partition dispatch cache | 1 - partition parents cache | 10 + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 10 + partition status cache | 3 (4 rows) DROP TABLE calamity.test_pathman_cache_stats CASCADE; NOTICE: drop cascades to 11 other objects SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ - context | entries ---------------------------+--------- - maintenance | 0 - partition bounds cache | 0 - partition dispatch cache | 0 - partition parents cache | 0 + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 + partition status cache | 2 (4 rows) /* Restore this GUC */ @@ -899,23 +899,23 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; (11 rows) SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ - context | entries ---------------------------+--------- - maintenance | 0 - partition bounds cache | 10 - partition dispatch cache | 1 - partition parents cache | 10 + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 + partition status cache | 3 (4 rows) DROP TABLE calamity.test_pathman_cache_stats CASCADE; NOTICE: drop cascades to 11 other objects SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ - context | entries ---------------------------+--------- - maintenance | 0 - partition bounds cache | 0 - partition dispatch cache | 0 - partition parents cache | 0 + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 + partition status cache | 2 (4 rows) /* check that parents cache has been flushed after partition was dropped */ @@ -943,12 +943,12 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; (11 rows) SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ - context | entries ---------------------------+--------- - maintenance | 0 - partition bounds cache | 10 - partition dispatch cache | 1 - partition parents cache | 10 + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 + partition status cache | 3 (4 rows) SELECT drop_range_partition('calamity.test_pathman_cache_stats_1'); @@ -958,23 +958,23 @@ SELECT drop_range_partition('calamity.test_pathman_cache_stats_1'); (1 row) SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ - context | entries ---------------------------+--------- - maintenance | 0 - partition bounds cache | 9 - partition dispatch cache | 1 - partition parents cache | 0 + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 9 + partition parents cache | 9 + partition status cache | 2 (4 rows) DROP TABLE calamity.test_pathman_cache_stats CASCADE; NOTICE: drop cascades to 10 other objects SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ - context | entries ---------------------------+--------- - maintenance | 0 - partition bounds cache | 0 - partition dispatch cache | 0 - partition parents cache | 0 + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 + partition status cache | 2 (4 rows) DROP SCHEMA calamity CASCADE; diff --git a/src/pl_funcs.c b/src/pl_funcs.c index c4b13017..fb457df1 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -19,6 +19,7 @@ #include "utils.h" #include "access/htup_details.h" +#include "access/xact.h" #include "catalog/dependency.h" #include "catalog/indexing.h" #include "catalog/namespace.h" @@ -785,6 +786,9 @@ add_to_pathman_config(PG_FUNCTION_ARGS) heap_close(pathman_config, RowExclusiveLock); + /* Make changes visible */ + CommandCounterIncrement(); + /* Update caches only if this relation has children */ if (FCS_FOUND == find_inheritance_children_array(relid, NoLock, true, &children_count, From 26e2cc3e42c8100e82473a86a893bc30d8ddbdd6 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 22 Jun 2018 18:26:03 +0300 Subject: [PATCH 250/528] various fixes for multilevel partitioning --- src/hooks.c | 26 +++++++++++++++++--------- src/pg_pathman.c | 9 ++------- 2 files changed, 19 insertions(+), 16 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index e718edbe..b170e2cb 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -108,20 +108,24 @@ pathman_join_pathlist_hook(PlannerInfo *root, if (inner_rte->inh) return; + /* We shouldn't process functions etc */ + if (inner_rte->rtekind != RTE_RELATION) + return; + /* We don't support these join types (since inner will be parameterized) */ if (jointype == JOIN_FULL || jointype == JOIN_RIGHT || jointype == JOIN_UNIQUE_INNER) return; + /* Skip if inner table is not allowed to act as parent (e.g. FROM ONLY) */ + if (PARENTHOOD_DISALLOWED == get_rel_parenthood_status(inner_rte)) + return; + /* Proceed iff relation 'innerrel' is partitioned */ if ((inner_prel = get_pathman_relation_info(inner_rte->relid)) == NULL) return; - /* Skip if inner table is not allowed to act as parent (e.g. FROM ONLY) */ - if (PARENTHOOD_DISALLOWED == get_rel_parenthood_status(inner_rte)) - goto cleanup; - /* * Check if query is: * 1) UPDATE part_table SET = .. FROM part_table. @@ -294,7 +298,6 @@ pathman_join_pathlist_hook(PlannerInfo *root, add_path(joinrel, (Path *) nest_path); } -cleanup: /* Don't forget to close 'inner_prel'! */ close_pathman_relation_info(inner_prel); } @@ -380,14 +383,22 @@ pathman_rel_pathlist_hook(PlannerInfo *root, foreach (lc, root->append_rel_list) { AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(lc); + Oid child_oid, + parent_oid; + + /* Is it actually the same table? */ + child_oid = root->simple_rte_array[appinfo->child_relid]->relid; + parent_oid = root->simple_rte_array[appinfo->parent_relid]->relid; /* * If there's an 'appinfo', it means that somebody * (PG?) has already processed this partitioned table * and added its children to the plan. */ - if (appinfo->child_relid == rti) + if (appinfo->child_relid == rti && child_oid == parent_oid) + { goto cleanup; + } } } @@ -419,9 +430,6 @@ pathman_rel_pathlist_hook(PlannerInfo *root, pathkeyDesc = (PathKey *) linitial(pathkeys); } - /* Mark as partitioned table */ - assign_rel_parenthood_status(rte, PARENTHOOD_DISALLOWED); - children = PrelGetChildrenArray(prel); ranges = list_make1_irange_full(prel, IR_COMPLETE); diff --git a/src/pg_pathman.c b/src/pg_pathman.c index e236735c..87d361fe 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -1951,13 +1951,8 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, set_rel_consider_parallel_compat(root, child_rel, child_rte); #endif - /* - * If inh is True and pathlist is not null then it is a partitioned - * table and we've already filled it, skip it. Otherwise build a - * pathlist for it - */ - if (PARENTHOOD_DISALLOWED != get_rel_parenthood_status(child_rte) || - child_rel->pathlist == NIL) + /* Build a few paths for this relation */ + if (child_rel->pathlist == NIL) { /* Compute child's access paths & sizes */ if (child_rte->relkind == RELKIND_FOREIGN_TABLE) From cf28b4ef0b952dd9ef7fd02ec63892a240625af2 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 22 Jun 2018 18:39:11 +0300 Subject: [PATCH 251/528] revisit some TODOs --- src/hooks.c | 4 ---- src/init.c | 2 +- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index b170e2cb..89466d4b 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -484,10 +484,6 @@ pathman_rel_pathlist_hook(PlannerInfo *root, parent_rowmark = get_plan_rowmark(root->rowMarks, rti); - /* - * WARNING: 'prel' might become invalid after append_child_relation(). - */ - /* Add parent if asked to */ if (prel->enable_parent) append_child_relation(root, parent_rel, parent_rowmark, diff --git a/src/init.c b/src/init.c index 2c93f974..d24b8ee6 100644 --- a/src/init.c +++ b/src/init.c @@ -665,7 +665,7 @@ pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, /* Set ItemPointer if necessary */ if (iptr) - *iptr = htup->t_self; + *iptr = htup->t_self; /* FIXME: callers should lock table beforehand */ } /* Clean resources */ From 14d15f27ae4127ed031ed0473a58f66d57d1d629 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 25 Jun 2018 13:39:09 +0300 Subject: [PATCH 252/528] fix select_partition_for_insert() --- src/partition_filter.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/partition_filter.c b/src/partition_filter.c index 9e97698f..96ca00e8 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -498,7 +498,7 @@ select_partition_for_insert(ResultPartsStorage *parts_storage, { /* This partition has been dropped | we have a new one */ prel = get_pathman_relation_info(parent_relid); - shout_if_prel_is_invalid(parent_relid, prel, PT_RANGE); + shout_if_prel_is_invalid(parent_relid, prel, PT_ANY); /* Store new 'prel' in 'parts_storage' */ close_pathman_relation_info(parts_storage->prel); @@ -508,7 +508,7 @@ select_partition_for_insert(ResultPartsStorage *parts_storage, { /* This partition is a parent itself, repeat */ prel = get_pathman_relation_info(partition_relid); - shout_if_prel_is_invalid(partition_relid, prel, PT_RANGE); + shout_if_prel_is_invalid(partition_relid, prel, PT_ANY); close_prel = true; /* We're not done yet */ From add67fa8a08e398dd1e91234873386347dcb6fe0 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 25 Jun 2018 14:14:42 +0300 Subject: [PATCH 253/528] make use of RegisterCustomScanMethods(), rename some runtime* funcs and files --- Makefile | 2 +- src/hooks.c | 12 ++--- .../{runtimeappend.h => runtime_append.h} | 39 +++++++------- src/include/runtime_merge_append.h | 39 +++++++------- src/nodes_common.c | 2 +- src/partition_filter.c | 6 +-- src/partition_router.c | 4 +- src/pg_pathman.c | 4 +- src/{runtimeappend.c => runtime_append.c} | 54 ++++++++++--------- src/runtime_merge_append.c | 50 ++++++++--------- 10 files changed, 112 insertions(+), 100 deletions(-) rename src/include/{runtimeappend.h => runtime_append.h} (68%) rename src/{runtimeappend.c => runtime_append.c} (58%) diff --git a/Makefile b/Makefile index 1291e252..948bf3b6 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,7 @@ MODULE_big = pg_pathman OBJS = src/init.o src/relation_info.o src/utils.o src/partition_filter.o \ - src/runtimeappend.o src/runtime_merge_append.o src/pg_pathman.o src/rangeset.o \ + src/runtime_append.o src/runtime_merge_append.o src/pg_pathman.o src/rangeset.o \ src/pl_funcs.o src/pl_range_funcs.o src/pl_hash_funcs.o src/pathman_workers.o \ src/hooks.o src/nodes_common.o src/xact_handling.o src/utility_stmt_hooking.o \ src/planner_tree_modification.o src/debug_print.o src/partition_creation.o \ diff --git a/src/hooks.c b/src/hooks.c index 89466d4b..2e547af1 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -19,7 +19,7 @@ #include "partition_router.h" #include "pathman_workers.h" #include "planner_tree_modification.h" -#include "runtimeappend.h" +#include "runtime_append.h" #include "runtime_merge_append.h" #include "utility_stmt_hooking.h" #include "utils.h" @@ -244,7 +244,7 @@ pathman_join_pathlist_hook(PlannerInfo *root, continue; /* Try building RuntimeAppend path, skip if it's not possible */ - inner = create_runtimeappend_path(root, cur_inner_path, ppi, paramsel); + inner = create_runtime_append_path(root, cur_inner_path, ppi, paramsel); if (!inner) continue; @@ -549,8 +549,8 @@ pathman_rel_pathlist_hook(PlannerInfo *root, ppi = get_appendrel_parampathinfo(rel, inner_required); if (IsA(cur_path, AppendPath) && pg_pathman_enable_runtimeappend) - inner_path = create_runtimeappend_path(root, cur_path, - ppi, paramsel); + inner_path = create_runtime_append_path(root, cur_path, + ppi, paramsel); else if (IsA(cur_path, MergeAppendPath) && pg_pathman_enable_runtime_merge_append) { @@ -560,8 +560,8 @@ pathman_rel_pathlist_hook(PlannerInfo *root, elog(FATAL, "Struct layouts of AppendPath and " "MergeAppendPath differ"); - inner_path = create_runtimemergeappend_path(root, cur_path, - ppi, paramsel); + inner_path = create_runtime_merge_append_path(root, cur_path, + ppi, paramsel); } if (inner_path) diff --git a/src/include/runtimeappend.h b/src/include/runtime_append.h similarity index 68% rename from src/include/runtimeappend.h rename to src/include/runtime_append.h index ee25c337..8e003a92 100644 --- a/src/include/runtimeappend.h +++ b/src/include/runtime_append.h @@ -21,6 +21,9 @@ #include "commands/explain.h" +#define RUNTIME_APPEND_NODE_NAME "RuntimeAppend" + + typedef struct { CustomPath cpath; @@ -70,32 +73,32 @@ extern CustomScanMethods runtimeappend_plan_methods; extern CustomExecMethods runtimeappend_exec_methods; -void init_runtimeappend_static_data(void); +void init_runtime_append_static_data(void); -Path * create_runtimeappend_path(PlannerInfo *root, - AppendPath *inner_append, - ParamPathInfo *param_info, - double sel); +Path * create_runtime_append_path(PlannerInfo *root, + AppendPath *inner_append, + ParamPathInfo *param_info, + double sel); -Plan * create_runtimeappend_plan(PlannerInfo *root, RelOptInfo *rel, - CustomPath *best_path, List *tlist, - List *clauses, List *custom_plans); +Plan * create_runtime_append_plan(PlannerInfo *root, RelOptInfo *rel, + CustomPath *best_path, List *tlist, + List *clauses, List *custom_plans); -Node * runtimeappend_create_scan_state(CustomScan *node); +Node * runtime_append_create_scan_state(CustomScan *node); -void runtimeappend_begin(CustomScanState *node, - EState *estate, - int eflags); +void runtime_append_begin(CustomScanState *node, + EState *estate, + int eflags); -TupleTableSlot * runtimeappend_exec(CustomScanState *node); +TupleTableSlot * runtime_append_exec(CustomScanState *node); -void runtimeappend_end(CustomScanState *node); +void runtime_append_end(CustomScanState *node); -void runtimeappend_rescan(CustomScanState *node); +void runtime_append_rescan(CustomScanState *node); -void runtimeappend_explain(CustomScanState *node, - List *ancestors, - ExplainState *es); +void runtime_append_explain(CustomScanState *node, + List *ancestors, + ExplainState *es); #endif /* RUNTIME_APPEND_H */ diff --git a/src/include/runtime_merge_append.h b/src/include/runtime_merge_append.h index 9aa6aed9..8d24bf20 100644 --- a/src/include/runtime_merge_append.h +++ b/src/include/runtime_merge_append.h @@ -14,12 +14,15 @@ #define RUNTIME_MERGE_APPEND_H -#include "runtimeappend.h" +#include "runtime_append.h" #include "pathman.h" #include "postgres.h" +#define RUNTIME_MERGE_APPEND_NODE_NAME "RuntimeMergeAppend" + + typedef struct { RuntimeAppendPath rpath; @@ -54,30 +57,30 @@ extern CustomExecMethods runtime_merge_append_exec_methods; void init_runtime_merge_append_static_data(void); -Path * create_runtimemergeappend_path(PlannerInfo *root, - AppendPath *inner_append, - ParamPathInfo *param_info, - double sel); +Path * create_runtime_merge_append_path(PlannerInfo *root, + AppendPath *inner_append, + ParamPathInfo *param_info, + double sel); -Plan * create_runtimemergeappend_plan(PlannerInfo *root, RelOptInfo *rel, - CustomPath *best_path, List *tlist, - List *clauses, List *custom_plans); +Plan * create_runtime_merge_append_plan(PlannerInfo *root, RelOptInfo *rel, + CustomPath *best_path, List *tlist, + List *clauses, List *custom_plans); -Node * runtimemergeappend_create_scan_state(CustomScan *node); +Node * runtime_merge_append_create_scan_state(CustomScan *node); -void runtimemergeappend_begin(CustomScanState *node, - EState *estate, - int eflags); +void runtime_merge_append_begin(CustomScanState *node, + EState *estate, + int eflags); -TupleTableSlot * runtimemergeappend_exec(CustomScanState *node); +TupleTableSlot * runtime_merge_append_exec(CustomScanState *node); -void runtimemergeappend_end(CustomScanState *node); +void runtime_merge_append_end(CustomScanState *node); -void runtimemergeappend_rescan(CustomScanState *node); +void runtime_merge_append_rescan(CustomScanState *node); -void runtimemergeappend_explain(CustomScanState *node, - List *ancestors, - ExplainState *es); +void runtime_merge_append_explain(CustomScanState *node, + List *ancestors, + ExplainState *es); #endif /* RUNTIME_MERGE_APPEND_H */ diff --git a/src/nodes_common.c b/src/nodes_common.c index 66f2df12..a370c165 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -11,7 +11,7 @@ #include "init.h" #include "nodes_common.h" -#include "runtimeappend.h" +#include "runtime_append.h" #include "utils.h" #include "nodes/nodeFuncs.h" diff --git a/src/partition_filter.c b/src/partition_filter.c index 96ca00e8..3314cef5 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -131,6 +131,8 @@ init_partition_filter_static_data(void) NULL, NULL, NULL); + + RegisterCustomScanMethods(&partition_filter_plan_methods); } @@ -659,12 +661,10 @@ partition_filter_begin(CustomScanState *node, EState *estate, int eflags) { PartitionFilterState *state = (PartitionFilterState *) node; Oid parent_relid = state->partitioned_table; - PlanState *child_state; ResultRelInfo *current_rri; /* It's convenient to store PlanState in 'custom_ps' */ - child_state = ExecInitNode(state->subplan, estate, eflags); - node->custom_ps = list_make1(child_state); + node->custom_ps = list_make1(ExecInitNode(state->subplan, estate, eflags)); /* Fetch current result relation (rri + rel) */ current_rri = estate->es_result_relation_info; diff --git a/src/partition_router.c b/src/partition_router.c index f4a8cb6c..30ebd5d2 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -58,6 +58,8 @@ init_partition_router_static_data(void) NULL, NULL, NULL); + + RegisterCustomScanMethods(&partition_router_plan_methods); } Plan * @@ -121,7 +123,7 @@ partition_router_begin(CustomScanState *node, EState *estate, int eflags) { PartitionRouterState *state = (PartitionRouterState *) node; - /* Initialize PartitionFilter child node */ + /* It's convenient to store PlanState in 'custom_ps' */ node->custom_ps = list_make1(ExecInitNode(state->subplan, estate, eflags)); } diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 87d361fe..bc88217d 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -18,7 +18,7 @@ #include "partition_filter.h" #include "partition_router.h" #include "planner_tree_modification.h" -#include "runtimeappend.h" +#include "runtime_append.h" #include "runtime_merge_append.h" #include "postgres.h" @@ -319,7 +319,7 @@ _PG_init(void) /* Initialize static data for all subsystems */ init_main_pathman_toggles(); init_relation_info_static_data(); - init_runtimeappend_static_data(); + init_runtime_append_static_data(); init_runtime_merge_append_static_data(); init_partition_filter_static_data(); init_partition_router_static_data(); diff --git a/src/runtimeappend.c b/src/runtime_append.c similarity index 58% rename from src/runtimeappend.c rename to src/runtime_append.c index 9e93aedf..e73c5c7b 100644 --- a/src/runtimeappend.c +++ b/src/runtime_append.c @@ -8,7 +8,7 @@ * ------------------------------------------------------------------------ */ -#include "runtimeappend.h" +#include "runtime_append.h" #include "utils/guc.h" @@ -21,25 +21,25 @@ CustomExecMethods runtimeappend_exec_methods; void -init_runtimeappend_static_data(void) +init_runtime_append_static_data(void) { - runtimeappend_path_methods.CustomName = "RuntimeAppend"; - runtimeappend_path_methods.PlanCustomPath = create_runtimeappend_plan; + runtimeappend_path_methods.CustomName = RUNTIME_APPEND_NODE_NAME; + runtimeappend_path_methods.PlanCustomPath = create_runtime_append_plan; - runtimeappend_plan_methods.CustomName = "RuntimeAppend"; - runtimeappend_plan_methods.CreateCustomScanState = runtimeappend_create_scan_state; + runtimeappend_plan_methods.CustomName = RUNTIME_APPEND_NODE_NAME; + runtimeappend_plan_methods.CreateCustomScanState = runtime_append_create_scan_state; - runtimeappend_exec_methods.CustomName = "RuntimeAppend"; - runtimeappend_exec_methods.BeginCustomScan = runtimeappend_begin; - runtimeappend_exec_methods.ExecCustomScan = runtimeappend_exec; - runtimeappend_exec_methods.EndCustomScan = runtimeappend_end; - runtimeappend_exec_methods.ReScanCustomScan = runtimeappend_rescan; + runtimeappend_exec_methods.CustomName = RUNTIME_APPEND_NODE_NAME; + runtimeappend_exec_methods.BeginCustomScan = runtime_append_begin; + runtimeappend_exec_methods.ExecCustomScan = runtime_append_exec; + runtimeappend_exec_methods.EndCustomScan = runtime_append_end; + runtimeappend_exec_methods.ReScanCustomScan = runtime_append_rescan; runtimeappend_exec_methods.MarkPosCustomScan = NULL; runtimeappend_exec_methods.RestrPosCustomScan = NULL; - runtimeappend_exec_methods.ExplainCustomScan = runtimeappend_explain; + runtimeappend_exec_methods.ExplainCustomScan = runtime_append_explain; DefineCustomBoolVariable("pg_pathman.enable_runtimeappend", - "Enables the planner's use of RuntimeAppend custom node.", + "Enables the planner's use of " RUNTIME_APPEND_NODE_NAME " custom node.", NULL, &pg_pathman_enable_runtimeappend, true, @@ -48,13 +48,15 @@ init_runtimeappend_static_data(void) NULL, NULL, NULL); + + RegisterCustomScanMethods(&runtimeappend_plan_methods); } Path * -create_runtimeappend_path(PlannerInfo *root, - AppendPath *inner_append, - ParamPathInfo *param_info, - double sel) +create_runtime_append_path(PlannerInfo *root, + AppendPath *inner_append, + ParamPathInfo *param_info, + double sel) { return create_append_path_common(root, inner_append, param_info, @@ -64,9 +66,9 @@ create_runtimeappend_path(PlannerInfo *root, } Plan * -create_runtimeappend_plan(PlannerInfo *root, RelOptInfo *rel, - CustomPath *best_path, List *tlist, - List *clauses, List *custom_plans) +create_runtime_append_plan(PlannerInfo *root, RelOptInfo *rel, + CustomPath *best_path, List *tlist, + List *clauses, List *custom_plans) { return create_append_plan_common(root, rel, best_path, tlist, @@ -75,7 +77,7 @@ create_runtimeappend_plan(PlannerInfo *root, RelOptInfo *rel, } Node * -runtimeappend_create_scan_state(CustomScan *node) +runtime_append_create_scan_state(CustomScan *node) { return create_append_scan_state_common(node, &runtimeappend_exec_methods, @@ -83,7 +85,7 @@ runtimeappend_create_scan_state(CustomScan *node) } void -runtimeappend_begin(CustomScanState *node, EState *estate, int eflags) +runtime_append_begin(CustomScanState *node, EState *estate, int eflags) { begin_append_common(node, estate, eflags); } @@ -116,25 +118,25 @@ fetch_next_tuple(CustomScanState *node) } TupleTableSlot * -runtimeappend_exec(CustomScanState *node) +runtime_append_exec(CustomScanState *node) { return exec_append_common(node, fetch_next_tuple); } void -runtimeappend_end(CustomScanState *node) +runtime_append_end(CustomScanState *node) { end_append_common(node); } void -runtimeappend_rescan(CustomScanState *node) +runtime_append_rescan(CustomScanState *node) { rescan_append_common(node); } void -runtimeappend_explain(CustomScanState *node, List *ancestors, ExplainState *es) +runtime_append_explain(CustomScanState *node, List *ancestors, ExplainState *es) { RuntimeAppendState *scan_state = (RuntimeAppendState *) node; diff --git a/src/runtime_merge_append.c b/src/runtime_merge_append.c index 453ebab1..836a1fdd 100644 --- a/src/runtime_merge_append.c +++ b/src/runtime_merge_append.c @@ -191,23 +191,23 @@ unpack_runtimemergeappend_private(RuntimeMergeAppendState *scan_state, void init_runtime_merge_append_static_data(void) { - runtime_merge_append_path_methods.CustomName = "RuntimeMergeAppend"; - runtime_merge_append_path_methods.PlanCustomPath = create_runtimemergeappend_plan; + runtime_merge_append_path_methods.CustomName = RUNTIME_MERGE_APPEND_NODE_NAME; + runtime_merge_append_path_methods.PlanCustomPath = create_runtime_merge_append_plan; - runtime_merge_append_plan_methods.CustomName = "RuntimeMergeAppend"; - runtime_merge_append_plan_methods.CreateCustomScanState = runtimemergeappend_create_scan_state; + runtime_merge_append_plan_methods.CustomName = RUNTIME_MERGE_APPEND_NODE_NAME; + runtime_merge_append_plan_methods.CreateCustomScanState = runtime_merge_append_create_scan_state; - runtime_merge_append_exec_methods.CustomName = "RuntimeMergeAppend"; - runtime_merge_append_exec_methods.BeginCustomScan = runtimemergeappend_begin; - runtime_merge_append_exec_methods.ExecCustomScan = runtimemergeappend_exec; - runtime_merge_append_exec_methods.EndCustomScan = runtimemergeappend_end; - runtime_merge_append_exec_methods.ReScanCustomScan = runtimemergeappend_rescan; + runtime_merge_append_exec_methods.CustomName = RUNTIME_MERGE_APPEND_NODE_NAME; + runtime_merge_append_exec_methods.BeginCustomScan = runtime_merge_append_begin; + runtime_merge_append_exec_methods.ExecCustomScan = runtime_merge_append_exec; + runtime_merge_append_exec_methods.EndCustomScan = runtime_merge_append_end; + runtime_merge_append_exec_methods.ReScanCustomScan = runtime_merge_append_rescan; runtime_merge_append_exec_methods.MarkPosCustomScan = NULL; runtime_merge_append_exec_methods.RestrPosCustomScan = NULL; - runtime_merge_append_exec_methods.ExplainCustomScan = runtimemergeappend_explain; + runtime_merge_append_exec_methods.ExplainCustomScan = runtime_merge_append_explain; DefineCustomBoolVariable("pg_pathman.enable_runtimemergeappend", - "Enables the planner's use of RuntimeMergeAppend custom node.", + "Enables the planner's use of " RUNTIME_MERGE_APPEND_NODE_NAME " custom node.", NULL, &pg_pathman_enable_runtime_merge_append, true, @@ -216,13 +216,15 @@ init_runtime_merge_append_static_data(void) NULL, NULL, NULL); + + RegisterCustomScanMethods(&runtime_merge_append_plan_methods); } Path * -create_runtimemergeappend_path(PlannerInfo *root, - AppendPath *inner_append, - ParamPathInfo *param_info, - double sel) +create_runtime_merge_append_path(PlannerInfo *root, + AppendPath *inner_append, + ParamPathInfo *param_info, + double sel) { RelOptInfo *rel = inner_append->path.parent; Path *path; @@ -245,9 +247,9 @@ create_runtimemergeappend_path(PlannerInfo *root, } Plan * -create_runtimemergeappend_plan(PlannerInfo *root, RelOptInfo *rel, - CustomPath *best_path, List *tlist, - List *clauses, List *custom_plans) +create_runtime_merge_append_plan(PlannerInfo *root, RelOptInfo *rel, + CustomPath *best_path, List *tlist, + List *clauses, List *custom_plans) { CustomScan *node; Plan *plan; @@ -337,7 +339,7 @@ create_runtimemergeappend_plan(PlannerInfo *root, RelOptInfo *rel, } Node * -runtimemergeappend_create_scan_state(CustomScan *node) +runtime_merge_append_create_scan_state(CustomScan *node) { Node *state; state = create_append_scan_state_common(node, @@ -350,7 +352,7 @@ runtimemergeappend_create_scan_state(CustomScan *node) } void -runtimemergeappend_begin(CustomScanState *node, EState *estate, int eflags) +runtime_merge_append_begin(CustomScanState *node, EState *estate, int eflags) { begin_append_common(node, estate, eflags); } @@ -412,13 +414,13 @@ fetch_next_tuple(CustomScanState *node) } TupleTableSlot * -runtimemergeappend_exec(CustomScanState *node) +runtime_merge_append_exec(CustomScanState *node) { return exec_append_common(node, fetch_next_tuple); } void -runtimemergeappend_end(CustomScanState *node) +runtime_merge_append_end(CustomScanState *node) { RuntimeMergeAppendState *scan_state = (RuntimeMergeAppendState *) node; @@ -429,7 +431,7 @@ runtimemergeappend_end(CustomScanState *node) } void -runtimemergeappend_rescan(CustomScanState *node) +runtime_merge_append_rescan(CustomScanState *node) { RuntimeMergeAppendState *scan_state = (RuntimeMergeAppendState *) node; int nplans; @@ -475,7 +477,7 @@ runtimemergeappend_rescan(CustomScanState *node) } void -runtimemergeappend_explain(CustomScanState *node, List *ancestors, ExplainState *es) +runtime_merge_append_explain(CustomScanState *node, List *ancestors, ExplainState *es) { RuntimeMergeAppendState *scan_state = (RuntimeMergeAppendState *) node; From 313b31a5103c3e9d873979a79fc42142aecabb9a Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 25 Jun 2018 14:25:07 +0300 Subject: [PATCH 254/528] remove useless junkfilters --- src/hooks.c | 8 ++++---- src/include/partition_filter.h | 8 ++------ 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 2e547af1..2f9e1cd7 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -967,8 +967,8 @@ pathman_executor_hook(QueryDesc *queryDesc, if (IsA(state, ModifyTableState)) { - ModifyTableState *mt_state = (ModifyTableState *) state; - int i; + ModifyTableState *mt_state = (ModifyTableState *) state; + int i; for (i = 0; i < mt_state->mt_nplans; i++) { @@ -980,8 +980,8 @@ pathman_executor_hook(QueryDesc *queryDesc, ResultRelInfo *rri = &mt_state->resultRelInfo[i]; /* - * We unset junkfilter to disable junk - * cleaning in ExecModifyTable. + * HACK: We unset junkfilter to disable + * junk cleaning in ExecModifyTable. */ rri->ri_junkFilter = NULL; diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index 7c15a017..b601a654 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -43,7 +43,6 @@ typedef struct Oid partid; /* partition's relid */ ResultRelInfo *result_rel_info; /* cached ResultRelInfo */ TupleConversionMap *tuple_map; /* tuple mapping (parent => child) */ - JunkFilter *junkfilter; /* junkfilter for cached ResultRelInfo */ bool has_children; /* hint that it might have children */ ExprState *expr_state; /* children have their own expressions */ } ResultRelInfoHolder; @@ -107,15 +106,12 @@ typedef struct Plan *subplan; /* proxy variable to store subplan */ ResultPartsStorage result_parts; /* partition ResultRelInfo cache */ + CmdType command_type; bool warning_triggered; /* warning message counter */ - TupleTableSlot *tup_convert_slot; /* slot for rebuilt tuples */ - CmdType command_type; - TupleTableSlot *subplan_slot; /* slot that was returned from subplan */ - JunkFilter *junkfilter; /* junkfilter for subplan_slot */ - + TupleTableSlot *tup_convert_slot; /* slot for rebuilt tuples */ ExprContext *tup_convert_econtext; /* ExprContext for projections */ } PartitionFilterState; From bb3b2b8501ca8ea5262868762b927b0474751e4f Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 25 Jun 2018 15:08:31 +0300 Subject: [PATCH 255/528] reuse PartitionFilter's ExprContext for ProjectionInfo --- src/include/partition_filter.h | 1 - src/partition_filter.c | 9 +-------- 2 files changed, 1 insertion(+), 9 deletions(-) diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index b601a654..25ab51f3 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -112,7 +112,6 @@ typedef struct TupleTableSlot *subplan_slot; /* slot that was returned from subplan */ TupleTableSlot *tup_convert_slot; /* slot for rebuilt tuples */ - ExprContext *tup_convert_econtext; /* ExprContext for projections */ } PartitionFilterState; diff --git a/src/partition_filter.c b/src/partition_filter.c index 3314cef5..05a8f80c 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -861,9 +861,6 @@ prepare_rri_returning_for_insert(ResultRelInfoHolder *rri_holder, *parent_rri; Index parent_rt_idx; TupleTableSlot *result_slot; - EState *estate; - - estate = rps_storage->estate; /* We don't need to do anything ff there's no map */ if (!rri_holder->tuple_map) @@ -880,10 +877,6 @@ prepare_rri_returning_for_insert(ResultRelInfoHolder *rri_holder, parent_rri = rps_storage->base_rri; parent_rt_idx = parent_rri->ri_RangeTableIndex; - /* Create ExprContext for tuple projections */ - if (!pfstate->tup_convert_econtext) - pfstate->tup_convert_econtext = CreateExprContext(estate); - /* Replace parent's varattnos with child's */ returning_list = (List *) fix_returning_list_mutator((Node *) returning_list, @@ -899,7 +892,7 @@ prepare_rri_returning_for_insert(ResultRelInfoHolder *rri_holder, /* Build new projection info */ child_rri->ri_projectReturning = - ExecBuildProjectionInfoCompat(returning_list, pfstate->tup_convert_econtext, + ExecBuildProjectionInfoCompat(returning_list, pfstate->css.ss.ps.ps_ExprContext, result_slot, NULL /* HACK: no PlanState */, RelationGetDescr(child_rri->ri_RelationDesc)); } From 35b9be724cab347ac8d321a73ad5681f53f238b5 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 25 Jun 2018 18:53:23 +0300 Subject: [PATCH 256/528] minor changes in PartitionRouter --- src/partition_router.c | 37 ++++++++++++++++++------------------- 1 file changed, 18 insertions(+), 19 deletions(-) diff --git a/src/partition_router.c b/src/partition_router.c index 30ebd5d2..a87b514f 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -29,7 +29,6 @@ CustomScanMethods partition_router_plan_methods; CustomExecMethods partition_router_exec_methods; static TupleTableSlot *ExecDeleteInternal(ItemPointer tupleid, - TupleTableSlot *planSlot, EPQState *epqstate, EState *estate); @@ -140,28 +139,27 @@ partition_router_exec(CustomScanState *node) if (!TupIsNull(slot)) { - ResultRelInfo *result_rri, - *parent_rri; - ItemPointer tupleid = NULL; - ItemPointerData tuple_ctid; + ResultRelInfo *new_rri, /* new tuple owner */ + *old_rri; /* previous tuple owner */ EPQState epqstate; PartitionFilterState *child_state; char relkind; + ItemPointerData ctid; + + ItemPointerSetInvalid(&ctid); child_state = (PartitionFilterState *) child_ps; Assert(child_state->command_type == CMD_UPDATE); - EvalPlanQualSetSlot(&epqstate, child_state->subplan_slot); - - parent_rri = child_state->result_parts.base_rri; - result_rri = estate->es_result_relation_info; + old_rri = child_state->result_parts.base_rri; + new_rri = estate->es_result_relation_info; /* Build new junkfilter if we have to */ if (state->junkfilter == NULL) { state->junkfilter = ExecInitJunkFilter(state->subplan->targetlist, - parent_rri->ri_RelationDesc->rd_att->tdhasoid, + old_rri->ri_RelationDesc->rd_att->tdhasoid, ExecInitExtraTupleSlot(estate)); state->junkfilter->jf_junkAttNo = @@ -171,7 +169,7 @@ partition_router_exec(CustomScanState *node) elog(ERROR, "could not find junk ctid column"); } - relkind = parent_rri->ri_RelationDesc->rd_rel->relkind; + relkind = old_rri->ri_RelationDesc->rd_rel->relkind; if (relkind == RELKIND_RELATION) { Datum ctid_datum; @@ -185,9 +183,8 @@ partition_router_exec(CustomScanState *node) if (ctid_isnull) elog(ERROR, "ctid is NULL"); - tupleid = (ItemPointer) DatumGetPointer(ctid_datum); - tuple_ctid = *tupleid; /* be sure we don't free ctid! */ - tupleid = &tuple_ctid; + /* Get item pointer to tuple */ + ctid = *(ItemPointer) DatumGetPointer(ctid_datum); } else if (relkind == RELKIND_FOREIGN_TABLE) elog(ERROR, UPDATE_NODE_NAME " does not support foreign tables"); @@ -202,14 +199,17 @@ partition_router_exec(CustomScanState *node) slot = ExecFilterJunk(state->junkfilter, slot); /* Magic: replace current ResultRelInfo with parent's one (DELETE) */ - estate->es_result_relation_info = parent_rri; + estate->es_result_relation_info = old_rri; - Assert(tupleid != NULL); - ExecDeleteInternal(tupleid, child_state->subplan_slot, &epqstate, estate); + /* Delete tuple from old partition */ + Assert(ItemPointerIsValid(&ctid)); + EvalPlanQualSetSlot(&epqstate, child_state->subplan_slot); + ExecDeleteInternal(&ctid, &epqstate, estate); /* Magic: replace parent's ResultRelInfo with child's one (INSERT) */ - estate->es_result_relation_info = result_rri; + estate->es_result_relation_info = new_rri; + /* Tuple will be inserted by ModifyTable */ return slot; } @@ -246,7 +246,6 @@ partition_router_explain(CustomScanState *node, List *ancestors, ExplainState *e static TupleTableSlot * ExecDeleteInternal(ItemPointer tupleid, - TupleTableSlot *planSlot, EPQState *epqstate, EState *estate) { From 99b7c027c0ed386d94b1015cdacd671ef9aa8cd6 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 26 Jun 2018 00:28:55 +0300 Subject: [PATCH 257/528] make sure that partition creation is visible --- src/include/partition_filter.h | 11 ++-- src/partition_creation.c | 4 ++ src/partition_filter.c | 94 +++++++++++++++++++++++----------- 3 files changed, 74 insertions(+), 35 deletions(-) diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index 25ab51f3..aa3a01e1 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -40,11 +40,12 @@ */ typedef struct { - Oid partid; /* partition's relid */ - ResultRelInfo *result_rel_info; /* cached ResultRelInfo */ - TupleConversionMap *tuple_map; /* tuple mapping (parent => child) */ - bool has_children; /* hint that it might have children */ - ExprState *expr_state; /* children have their own expressions */ + Oid partid; /* partition's relid */ + ResultRelInfo *result_rel_info; /* cached ResultRelInfo */ + TupleConversionMap *tuple_map; /* tuple mapping (parent => child) */ + + PartRelationInfo *prel; /* this child might be a parent... */ + ExprState *prel_expr_state; /* and have its own part. expression */ } ResultRelInfoHolder; diff --git a/src/partition_creation.c b/src/partition_creation.c index 5de44ee4..1ddc39e1 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -41,6 +41,7 @@ #include "utils/builtins.h" #include "utils/datum.h" #include "utils/fmgroids.h" +#include "utils/inval.h" #include "utils/jsonb.h" #include "utils/snapmgr.h" #include "utils/lsyscache.h" @@ -309,6 +310,9 @@ create_partitions_for_value(Oid relid, Datum value, Oid value_type) elog(ERROR, "could not create new partitions for relation \"%s\"", get_rel_name_or_relid(relid)); + /* Make changes visible */ + AcceptInvalidationMessages(); + return last_partition; } diff --git a/src/partition_filter.c b/src/partition_filter.c index 05a8f80c..e100a2c9 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -233,6 +233,10 @@ fini_result_parts_storage(ResultPartsStorage *parts_storage) free_conversion_map(rri_holder->tuple_map); } + + /* Don't forget to close 'prel'! */ + if (rri_holder->prel) + close_pathman_relation_info(rri_holder->prel); } /* Finally destroy hash table */ @@ -352,9 +356,18 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) /* Generate tuple transformation map and some other stuff */ rri_holder->tuple_map = build_part_tuple_map(base_rel, child_rel); - /* Are there subpartitions? */ - rri_holder->has_children = child_rel->rd_rel->relhassubclass; - rri_holder->expr_state = NULL; + /* Default values */ + rri_holder->prel = NULL; + rri_holder->prel_expr_state = NULL; + + if ((rri_holder->prel = get_pathman_relation_info(partid)) != NULL) + { + rri_holder->prel_expr_state = + prepare_expr_state(rri_holder->prel, /* NOTE: this prel! */ + parts_storage->base_rri->ri_RelationDesc, + parts_storage->estate, + parts_storage->command_type == CMD_UPDATE); + } /* Call initialization callback if needed */ if (parts_storage->init_rri_holder_cb) @@ -452,29 +465,41 @@ ResultRelInfoHolder * select_partition_for_insert(ResultPartsStorage *parts_storage, TupleTableSlot *slot) { - bool close_prel = false; PartRelationInfo *prel = parts_storage->prel; ExprState *expr_state = parts_storage->prel_expr_state; ExprContext *expr_context = parts_storage->prel_econtext; + Oid parent_relid = PrelParentRelid(prel), partition_relid = InvalidOid; + + Datum value; + bool isnull; + bool compute_value = true; + Oid *parts; int nparts; - bool isnull; - Datum value; ResultRelInfoHolder *result; - parts_storage->prel_econtext->ecxt_scantuple = slot; + do + { + if (compute_value) + { + /* Prepare expression context */ + ResetExprContext(expr_context); - /* Execute expression */ - value = ExecEvalExprCompat(expr_state, expr_context, - &isnull, mult_result_handler); + /* Execute expression */ + expr_context->ecxt_scantuple = slot; - if (isnull) - elog(ERROR, ERR_PART_ATTR_NULL); + value = ExecEvalExprCompat(expr_state, expr_context, + &isnull, mult_result_handler); + + if (isnull) + elog(ERROR, ERR_PART_ATTR_NULL); + + /* Ok, we have a value */ + compute_value = false; + } - do - { /* Search for matching partitions */ parts = find_partitions_for_value(value, prel->ev_type, prel, &nparts); @@ -492,28 +517,37 @@ select_partition_for_insert(ResultPartsStorage *parts_storage, /* Get ResultRelationInfo holder for the selected partition */ result = scan_result_parts_storage(partition_relid, parts_storage); - /* Should we close 'prel'? */ - if (close_prel) - close_pathman_relation_info(prel); - - if (result == NULL || nparts == 0) + /* Somebody has dropped or created partitions */ + if (!PrelIsFresh(prel) && (nparts == 0 || result == NULL)) { - /* This partition has been dropped | we have a new one */ + /* Compare original and current Oids */ + Oid relid1 = PrelParentRelid(parts_storage->prel), + relid2 = PrelParentRelid(prel); + + /* Reopen 'prel' to make it fresh again */ + close_pathman_relation_info(prel); prel = get_pathman_relation_info(parent_relid); - shout_if_prel_is_invalid(parent_relid, prel, PT_ANY); - /* Store new 'prel' in 'parts_storage' */ - close_pathman_relation_info(parts_storage->prel); - parts_storage->prel = prel; + /* Store new 'prel' */ + if (relid1 == relid2) + { + shout_if_prel_is_invalid(parent_relid, prel, PT_ANY); + parts_storage->prel = prel; + } + else if (result && result->prel) + /* TODO: WTF? this is a new RRI, not the one we used before */ + result->prel = prel; } - else if (result->has_children) + + /* This partition is a parent itself */ + if (result && result->prel) { - /* This partition is a parent itself, repeat */ - prel = get_pathman_relation_info(partition_relid); - shout_if_prel_is_invalid(partition_relid, prel, PT_ANY); - close_prel = true; + prel = result->prel; + expr_state = result->prel_expr_state; + parent_relid = PrelParentRelid(prel); + compute_value = true; - /* We're not done yet */ + /* Repeat with a new dispatch */ result = NULL; } } From 379b686a0683c06d864e84e24a7040c0a6841f69 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 26 Jun 2018 16:04:13 +0300 Subject: [PATCH 258/528] preserve PartRelationInfo for the lifetime of RuntimeAppend --- src/include/runtime_append.h | 3 ++- src/nodes_common.c | 19 +++++-------------- 2 files changed, 7 insertions(+), 15 deletions(-) diff --git a/src/include/runtime_append.h b/src/include/runtime_append.h index 8e003a92..bc76ea70 100644 --- a/src/include/runtime_append.h +++ b/src/include/runtime_append.h @@ -44,8 +44,9 @@ typedef struct /* Refined clauses for partition pruning */ List *canon_custom_exprs; - /* Copy of partitioning expression (protect from invalidations) */ + /* Copy of partitioning expression and dispatch info */ Node *prel_expr; + PartRelationInfo *prel; /* All available plans \ plan states */ HTAB *children_table; diff --git a/src/nodes_common.c b/src/nodes_common.c index a370c165..3662fb6c 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -663,24 +663,20 @@ void begin_append_common(CustomScanState *node, EState *estate, int eflags) { RuntimeAppendState *scan_state = (RuntimeAppendState *) node; - PartRelationInfo *prel; #if PG_VERSION_NUM < 100000 node->ss.ps.ps_TupFromTlist = false; #endif - prel = get_pathman_relation_info(scan_state->relid); - Assert(prel); + scan_state->prel = get_pathman_relation_info(scan_state->relid); + Assert(scan_state->prel); /* Prepare expression according to set_set_customscan_references() */ - scan_state->prel_expr = PrelExpressionForRelid(prel, INDEX_VAR); + scan_state->prel_expr = PrelExpressionForRelid(scan_state->prel, INDEX_VAR); /* Prepare custom expression according to set_set_customscan_references() */ scan_state->canon_custom_exprs = canonicalize_custom_exprs(scan_state->custom_exprs); - - /* Don't forget to close 'prel'! */ - close_pathman_relation_info(prel); } TupleTableSlot * @@ -756,6 +752,7 @@ end_append_common(CustomScanState *node) clear_plan_states(&scan_state->css); hash_destroy(scan_state->children_table); + close_pathman_relation_info(scan_state->prel); } void @@ -763,16 +760,13 @@ rescan_append_common(CustomScanState *node) { RuntimeAppendState *scan_state = (RuntimeAppendState *) node; ExprContext *econtext = node->ss.ps.ps_ExprContext; - PartRelationInfo *prel; + PartRelationInfo *prel = scan_state->prel; List *ranges; ListCell *lc; WalkerContext wcxt; Oid *parts; int nparts; - prel = get_pathman_relation_info(scan_state->relid); - Assert(prel); - /* First we select all available partitions... */ ranges = list_make1_irange_full(prel, IR_COMPLETE); @@ -804,9 +798,6 @@ rescan_append_common(CustomScanState *node) scan_state->ncur_plans, scan_state->css.ss.ps.state); - /* Don't forget to close 'prel'! */ - close_pathman_relation_info(prel); - scan_state->running_idx = 0; } From d31d8829fe5c6dd0727d887c3a52f08f2b0b3b4d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 26 Jun 2018 16:08:34 +0300 Subject: [PATCH 259/528] restore compatibility with 9.5 --- src/include/compat/pg_compat.h | 9 +++++++++ src/runtime_append.c | 2 ++ 2 files changed, 11 insertions(+) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 273e023c..6d9f1c21 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -723,6 +723,15 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, #endif +/* + * RegisterCustomScanMethods() + */ +#if PG_VERSION_NUM < 96000 +#define RegisterCustomScanMethods(methods) +#endif + + + /* * ------------- * Common code diff --git a/src/runtime_append.c b/src/runtime_append.c index e73c5c7b..a90c101a 100644 --- a/src/runtime_append.c +++ b/src/runtime_append.c @@ -8,6 +8,8 @@ * ------------------------------------------------------------------------ */ +#include "compat/pg_compat.h" + #include "runtime_append.h" #include "utils/guc.h" From 159a9a229453efc7feba18b5545169fdaecb4a94 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 26 Jun 2018 19:06:21 +0300 Subject: [PATCH 260/528] introduce smart Array* wrappers --- src/include/utils.h | 26 ++++++++++++++++++++++++++ src/init.c | 11 ++--------- src/nodes_common.c | 45 ++++++++++++++------------------------------- 3 files changed, 42 insertions(+), 40 deletions(-) diff --git a/src/include/utils.h b/src/include/utils.h index 885d4bfc..0697b923 100644 --- a/src/include/utils.h +++ b/src/include/utils.h @@ -31,6 +31,32 @@ bool match_expr_to_operand(const Node *expr, const Node *operand); Oid get_pathman_schema(void); List *list_reverse(List *l); +/* + * Dynamic arrays. + */ + +#define ARRAY_EXP 2 + +#define ArrayAlloc(array, alloced, used, size) \ + do { \ + (array) = palloc((size) * sizeof(*(array))); \ + (alloced) = (size); \ + (used) = 0; \ + } while (0) + +#define ArrayPush(array, alloced, used, value) \ + do { \ + if ((alloced) <= (used)) \ + { \ + (alloced) = (alloced) * ARRAY_EXP + 1; \ + (array) = repalloc((array), (alloced) * sizeof(*(array))); \ + } \ + \ + (array)[(used)] = (value); \ + \ + (used)++; \ + } while (0) + /* * Useful functions for relations. */ diff --git a/src/init.c b/src/init.c index d24b8ee6..2994aaf8 100644 --- a/src/init.c +++ b/src/init.c @@ -441,9 +441,7 @@ find_inheritance_children_array(Oid parent_relid, /* * Scan pg_inherits and build a working array of subclass OIDs. */ - maxoids = 32; - oidarr = (Oid *) palloc(maxoids * sizeof(Oid)); - numoids = 0; + ArrayAlloc(oidarr, maxoids, numoids, 32); relation = heap_open(InheritsRelationId, AccessShareLock); @@ -460,12 +458,7 @@ find_inheritance_children_array(Oid parent_relid, Oid inhrelid; inhrelid = ((Form_pg_inherits) GETSTRUCT(inheritsTuple))->inhrelid; - if (numoids >= maxoids) - { - maxoids *= 2; - oidarr = (Oid *) repalloc(oidarr, maxoids * sizeof(Oid)); - } - oidarr[numoids++] = inhrelid; + ArrayPush(oidarr, maxoids, numoids, inhrelid); } systable_endscan(scan); diff --git a/src/nodes_common.c b/src/nodes_common.c index 3662fb6c..f8721037 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -25,7 +25,6 @@ /* Allocation settings */ #define INITIAL_ALLOC_NUM 10 -#define ALLOC_EXP 2 /* Compare plans by 'original_order' */ @@ -92,12 +91,12 @@ transform_plans_into_states(RuntimeAppendState *scan_state, static ChildScanCommon * select_required_plans(HTAB *children_table, Oid *parts, int nparts, int *nres) { - uint32 allocated = INITIAL_ALLOC_NUM, - used = 0; + uint32 allocated, + used; ChildScanCommon *result; int i; - result = (ChildScanCommon *) palloc(allocated * sizeof(ChildScanCommon)); + ArrayAlloc(result, allocated, used, INITIAL_ALLOC_NUM); for (i = 0; i < nparts; i++) { @@ -107,13 +106,7 @@ select_required_plans(HTAB *children_table, Oid *parts, int nparts, int *nres) if (!child) continue; /* no plan for this partition */ - if (allocated <= used) - { - allocated = allocated * ALLOC_EXP + 1; - result = repalloc(result, allocated * sizeof(ChildScanCommon)); - } - - result[used++] = child; + ArrayPush(result, allocated, used, child); } /* Get rid of useless array */ @@ -418,11 +411,13 @@ get_partition_oids(List *ranges, int *n, const PartRelationInfo *prel, bool include_parent) { ListCell *range_cell; - uint32 allocated = INITIAL_ALLOC_NUM, - used = 0; - Oid *result = (Oid *) palloc(allocated * sizeof(Oid)); + uint32 allocated, + used; + Oid *result; Oid *children = PrelGetChildrenArray(prel); + ArrayAlloc(result, allocated, used, INITIAL_ALLOC_NUM); + /* If required, add parent to result */ Assert(INITIAL_ALLOC_NUM >= 1); if (include_parent) @@ -437,14 +432,8 @@ get_partition_oids(List *ranges, int *n, const PartRelationInfo *prel, for (i = a; i <= b; i++) { - if (allocated <= used) - { - allocated = allocated * ALLOC_EXP + 1; - result = repalloc(result, allocated * sizeof(Oid)); - } - Assert(i < PrelChildrenCount(prel)); - result[used++] = children[i]; + ArrayPush(result, allocated, used, children[i]); } } @@ -826,14 +815,14 @@ explain_append_common(CustomScanState *node, /* Construct excess PlanStates */ if (!es->analyze) { - uint32 allocated = INITIAL_ALLOC_NUM, - used = 0; + uint32 allocated, + used; ChildScanCommon *custom_ps, child; HASH_SEQ_STATUS seqstat; int i; - custom_ps = (ChildScanCommon *) palloc(allocated * sizeof(ChildScanCommon)); + ArrayAlloc(custom_ps, allocated, used, INITIAL_ALLOC_NUM); /* There can't be any nodes since we're not scanning anything */ Assert(!node->custom_ps); @@ -843,13 +832,7 @@ explain_append_common(CustomScanState *node, while ((child = (ChildScanCommon) hash_seq_search(&seqstat))) { - if (allocated <= used) - { - allocated = allocated * ALLOC_EXP + 1; - custom_ps = repalloc(custom_ps, allocated * sizeof(ChildScanCommon)); - } - - custom_ps[used++] = child; + ArrayPush(custom_ps, allocated, used, child); } /* From bd635df88cca4c5cf8340d8bfe7b237693206ac1 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 27 Jun 2018 16:06:07 +0300 Subject: [PATCH 261/528] fix resowner for PartRelationInfo --- src/relation_info.c | 36 +++++++++++++++++------------------- 1 file changed, 17 insertions(+), 19 deletions(-) diff --git a/src/relation_info.c b/src/relation_info.c index e9b9245c..9c130a55 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -536,7 +536,7 @@ resowner_prel_add(PartRelationInfo *prel) /* Register this 'prel' */ old_mcxt = MemoryContextSwitchTo(TopPathmanContext); - info->prels = list_append_unique_ptr(info->prels, prel); + info->prels = lappend(info->prels, prel); MemoryContextSwitchTo(old_mcxt); #ifdef USE_RELINFO_LEAK_TRACKER @@ -576,9 +576,8 @@ resowner_prel_del(PartRelationInfo *prel) /* Check that 'prel' is registered! */ Assert(list_member_ptr(info->prels, prel)); - /* Remove it iff we're the only user */ - if (PrelReferenceCount(prel) == 1) - info->prels = list_delete_ptr(info->prels, prel); + /* Remove it from list */ + info->prels = list_delete_ptr(info->prels, prel); } /* Check that refcount is valid */ @@ -615,20 +614,7 @@ resonwner_prel_callback(ResourceReleasePhase phase, { PartRelationInfo *prel = lfirst(lc); - if (!isCommit) - { - /* Reset refcount for valid entry */ - if (PrelIsFresh(prel)) - { - PrelReferenceCount(prel) = 0; - } - /* Otherwise, free it when refcount is zero */ - else if (--PrelReferenceCount(prel) == 0) - { - free_pathman_relation_info(prel); - } - } - else + if (isCommit) { #ifdef USE_RELINFO_LEAK_TRACKER ListCell *lc; @@ -640,10 +626,22 @@ resonwner_prel_callback(ResourceReleasePhase phase, elog(WARNING, "PartRelationInfo referenced in %s:%d", fun, line); } #endif - elog(ERROR, + elog(WARNING, "cache reference leak: PartRelationInfo(%d) has count %d", PrelParentRelid(prel), PrelReferenceCount(prel)); } + + /* Check that refcount is valid */ + Assert(PrelReferenceCount(prel) > 0); + + /* Decrease refcount */ + PrelReferenceCount(prel) -= 1; + + /* Free this entry if it's time */ + if (PrelReferenceCount(prel) == 0 && !PrelIsFresh(prel)) + { + free_pathman_relation_info(prel); + } } list_free(info->prels); From 5f3b17d97ccd62fc6344f39115cda69e90ae429e Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 27 Jun 2018 16:30:31 +0300 Subject: [PATCH 262/528] fixes in PartitionFilter & select_partition_for_insert() --- src/include/partition_filter.h | 6 ++-- src/partition_filter.c | 64 ++++++++++++++++++---------------- 2 files changed, 37 insertions(+), 33 deletions(-) diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index aa3a01e1..0940a59f 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -164,8 +164,10 @@ void init_result_parts_storage(ResultPartsStorage *parts_storage, void fini_result_parts_storage(ResultPartsStorage *parts_storage); /* Find ResultRelInfo holder in storage */ -ResultRelInfoHolder * scan_result_parts_storage(Oid partid, - ResultPartsStorage *storage); +ResultRelInfoHolder * scan_result_parts_storage(ResultPartsStorage *storage, Oid partid); + +/* Refresh PartRelationInfo in storage */ +void refresh_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid); TupleConversionMap * build_part_tuple_map(Relation parent_rel, Relation child_rel); diff --git a/src/partition_filter.c b/src/partition_filter.c index e100a2c9..f64603cf 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -248,7 +248,7 @@ fini_result_parts_storage(ResultPartsStorage *parts_storage) /* Find a ResultRelInfo for the partition using ResultPartsStorage */ ResultRelInfoHolder * -scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) +scan_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid) { #define CopyToResultRelInfo(field_name) \ ( child_result_rel_info->field_name = parts_storage->base_rri->field_name ) @@ -383,6 +383,32 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) return rri_holder; } +/* Refresh PartRelationInfo for the partition in storage */ +void +refresh_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid) +{ + if (partid == PrelParentRelid(parts_storage->prel)) + { + close_pathman_relation_info(parts_storage->prel); + parts_storage->prel = get_pathman_relation_info(partid); + shout_if_prel_is_invalid(partid, parts_storage->prel, PT_ANY); + } + else + { + ResultRelInfoHolder *rri_holder; + + rri_holder = hash_search(parts_storage->result_rels_table, + (const void *) &partid, + HASH_FIND, NULL); + + if (rri_holder && rri_holder->prel) + { + close_pathman_relation_info(rri_holder->prel); + rri_holder->prel = get_pathman_relation_info(partid); + shout_if_prel_is_invalid(partid, rri_holder->prel, PT_ANY); + } + } +} /* Build tuple conversion map (e.g. parent has a dropped column) */ TupleConversionMap * @@ -486,10 +512,9 @@ select_partition_for_insert(ResultPartsStorage *parts_storage, { /* Prepare expression context */ ResetExprContext(expr_context); - - /* Execute expression */ expr_context->ecxt_scantuple = slot; + /* Execute expression */ value = ExecEvalExprCompat(expr_state, expr_context, &isnull, mult_result_handler); @@ -515,28 +540,13 @@ select_partition_for_insert(ResultPartsStorage *parts_storage, else partition_relid = parts[0]; /* Get ResultRelationInfo holder for the selected partition */ - result = scan_result_parts_storage(partition_relid, parts_storage); + result = scan_result_parts_storage(parts_storage, partition_relid); /* Somebody has dropped or created partitions */ - if (!PrelIsFresh(prel) && (nparts == 0 || result == NULL)) + if ((nparts == 0 || result == NULL) && !PrelIsFresh(prel)) { - /* Compare original and current Oids */ - Oid relid1 = PrelParentRelid(parts_storage->prel), - relid2 = PrelParentRelid(prel); - - /* Reopen 'prel' to make it fresh again */ - close_pathman_relation_info(prel); - prel = get_pathman_relation_info(parent_relid); - - /* Store new 'prel' */ - if (relid1 == relid2) - { - shout_if_prel_is_invalid(parent_relid, prel, PT_ANY); - parts_storage->prel = prel; - } - else if (result && result->prel) - /* TODO: WTF? this is a new RRI, not the one we used before */ - result->prel = prel; + /* Try building a new 'prel' for this relation */ + refresh_result_parts_storage(parts_storage, parent_relid); } /* This partition is a parent itself */ @@ -544,7 +554,7 @@ select_partition_for_insert(ResultPartsStorage *parts_storage, { prel = result->prel; expr_state = result->prel_expr_state; - parent_relid = PrelParentRelid(prel); + parent_relid = result->partid; compute_value = true; /* Repeat with a new dispatch */ @@ -735,14 +745,9 @@ partition_filter_exec(CustomScanState *node) if (!TupIsNull(slot)) { MemoryContext old_mcxt; - PartRelationInfo *prel; ResultRelInfoHolder *rri_holder; ResultRelInfo *resultRelInfo; - /* Fetch PartRelationInfo for this partitioned relation */ - if ((prel = get_pathman_relation_info(state->partitioned_table)) == NULL) - return slot; /* table is not partitioned anymore */ - /* Switch to per-tuple context */ old_mcxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); @@ -779,9 +784,6 @@ partition_filter_exec(CustomScanState *node) slot = state->tup_convert_slot; } - /* Don't forget to close 'prel'! */ - close_pathman_relation_info(prel); - return slot; } From 8995eb851cb27b2850f613e7bd65a8ca5d46b4c3 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 27 Jun 2018 17:20:35 +0300 Subject: [PATCH 263/528] improved leak tracker --- src/include/relation_info.h | 1 + src/relation_info.c | 82 +++++++++++++++++++++++++++---------- 2 files changed, 62 insertions(+), 21 deletions(-) diff --git a/src/include/relation_info.h b/src/include/relation_info.h index ee4e9a35..6c1d5435 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -223,6 +223,7 @@ typedef struct PartRelationInfo #ifdef USE_RELINFO_LEAK_TRACKER List *owners; /* saved callers of get_pathman_relation_info() */ + uint64 access_total; /* total amount of accesses to this entry */ #endif MemoryContext mcxt; /* memory context holding this struct */ diff --git a/src/relation_info.c b/src/relation_info.c index 9c130a55..ef170b58 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -58,8 +58,48 @@ #ifdef USE_RELINFO_LEAK_TRACKER #undef get_pathman_relation_info #undef close_pathman_relation_info + const char *prel_resowner_function = NULL; int prel_resowner_line = 0; + +#define LeakTrackerAdd(prel) \ + do { \ + MemoryContext old_mcxt = MemoryContextSwitchTo((prel)->mcxt); \ + (prel)->owners = \ + list_append_unique( \ + (prel)->owners, \ + list_make2(makeString((char *) prel_resowner_function), \ + makeInteger(prel_resowner_line))); \ + MemoryContextSwitchTo(old_mcxt); \ + \ + (prel)->access_total++; \ + } while (0) + +#define LeakTrackerPrint(prel) \ + do { \ + ListCell *lc; \ + foreach (lc, (prel)->owners) \ + { \ + char *fun = strVal(linitial(lfirst(lc))); \ + int line = intVal(lsecond(lfirst(lc))); \ + elog(WARNING, "PartRelationInfo referenced in %s:%d", fun, line); \ + } \ + } while (0) + +#define LeakTrackerFree(prel) \ + do { \ + ListCell *lc; \ + foreach (lc, (prel)->owners) \ + { \ + list_free_deep(lfirst(lc)); \ + } \ + list_free((prel)->owners); \ + (prel)->owners = NIL; \ + } while (0) +#else +#define LeakTrackerAdd(prel) +#define LeakTrackerPrint(prel) +#define LeakTrackerFree(prel) #endif @@ -256,11 +296,9 @@ invalidate_psin_entry(PartStatusInfo *psin) void close_pathman_relation_info(PartRelationInfo *prel) { - (void) resowner_prel_del(prel); + AssertArg(prel); - /* Remove entry is it's outdated and we're the last user */ - if (PrelReferenceCount(prel) == 0 && !PrelIsFresh(prel)) - free_pathman_relation_info(prel); + (void) resowner_prel_del(prel); } /* Check if relation is partitioned by pg_pathman */ @@ -539,14 +577,8 @@ resowner_prel_add(PartRelationInfo *prel) info->prels = lappend(info->prels, prel); MemoryContextSwitchTo(old_mcxt); -#ifdef USE_RELINFO_LEAK_TRACKER /* Save current caller (function:line) */ - old_mcxt = MemoryContextSwitchTo(prel->mcxt); - prel->owners = lappend(prel->owners, - list_make2(makeString((char *) prel_resowner_function), - makeInteger(prel_resowner_line))); - MemoryContextSwitchTo(old_mcxt); -#endif + LeakTrackerAdd(prel); /* Finally, increment refcount */ PrelReferenceCount(prel) += 1; @@ -583,8 +615,20 @@ resowner_prel_del(PartRelationInfo *prel) /* Check that refcount is valid */ Assert(PrelReferenceCount(prel) > 0); - /* Finally, decrement refcount */ + /* Decrease refcount */ PrelReferenceCount(prel) -= 1; + + /* Free list of owners */ + if (PrelReferenceCount(prel) == 0) + { + LeakTrackerFree(prel); + } + + /* Free this entry if it's time */ + if (PrelReferenceCount(prel) == 0 && !PrelIsFresh(prel)) + { + free_pathman_relation_info(prel); + } } return prel; @@ -616,16 +660,9 @@ resonwner_prel_callback(ResourceReleasePhase phase, if (isCommit) { -#ifdef USE_RELINFO_LEAK_TRACKER - ListCell *lc; + /* Print verbose list of *possible* owners */ + LeakTrackerPrint(prel); - foreach (lc, prel->owners) - { - char *fun = strVal(linitial(lfirst(lc))); - int line = intVal(lsecond(lfirst(lc))); - elog(WARNING, "PartRelationInfo referenced in %s:%d", fun, line); - } -#endif elog(WARNING, "cache reference leak: PartRelationInfo(%d) has count %d", PrelParentRelid(prel), PrelReferenceCount(prel)); @@ -637,6 +674,9 @@ resonwner_prel_callback(ResourceReleasePhase phase, /* Decrease refcount */ PrelReferenceCount(prel) -= 1; + /* Free list of owners */ + LeakTrackerFree(prel); + /* Free this entry if it's time */ if (PrelReferenceCount(prel) == 0 && !PrelIsFresh(prel)) { From 718627e781a9ab77991e52bcc2577cb83ef82418 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 27 Jun 2018 18:57:12 +0300 Subject: [PATCH 264/528] fix expression evaluation in PartitionFilter --- src/partition_filter.c | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/src/partition_filter.c b/src/partition_filter.c index f64603cf..87facbc0 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -71,8 +71,7 @@ CustomExecMethods partition_filter_exec_methods; static ExprState *prepare_expr_state(const PartRelationInfo *prel, Relation source_rel, - EState *estate, - bool try_map); + EState *estate); static void prepare_rri_for_insert(ResultRelInfoHolder *rri_holder, const ResultPartsStorage *rps_storage); @@ -195,8 +194,7 @@ init_result_parts_storage(ResultPartsStorage *parts_storage, /* Build a partitioning expression state */ parts_storage->prel_expr_state = prepare_expr_state(parts_storage->prel, parts_storage->base_rri->ri_RelationDesc, - parts_storage->estate, - cmd_type == CMD_UPDATE); + parts_storage->estate); /* Build expression context */ parts_storage->prel_econtext = CreateExprContext(parts_storage->estate); @@ -365,8 +363,7 @@ scan_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid) rri_holder->prel_expr_state = prepare_expr_state(rri_holder->prel, /* NOTE: this prel! */ parts_storage->base_rri->ri_RelationDesc, - parts_storage->estate, - parts_storage->command_type == CMD_UPDATE); + parts_storage->estate); } /* Call initialization callback if needed */ @@ -570,8 +567,7 @@ select_partition_for_insert(ResultPartsStorage *parts_storage, static ExprState * prepare_expr_state(const PartRelationInfo *prel, Relation source_rel, - EState *estate, - bool try_map) + EState *estate) { ExprState *expr_state; MemoryContext old_mcxt; @@ -584,9 +580,8 @@ prepare_expr_state(const PartRelationInfo *prel, expr = PrelExpressionForRelid(prel, PART_EXPR_VARNO); /* Should we try using map? */ - if (try_map) + if (PrelParentRelid(prel) != RelationGetRelid(source_rel)) { - AttrNumber *map; int map_length; TupleDesc source_tupdesc = RelationGetDescr(source_rel); From 92f587828fa333d43ffe479c0b9f3bffaa255a25 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 27 Jun 2018 19:09:05 +0300 Subject: [PATCH 265/528] rewrite some of subpartitioning tests --- expected/pathman_subpartitions.out | 193 ++++++++++++----------------- sql/pathman_subpartitions.sql | 73 ++++++----- 2 files changed, 112 insertions(+), 154 deletions(-) diff --git a/expected/pathman_subpartitions.out b/expected/pathman_subpartitions.out index 27be6b1e..924d1bde 100644 --- a/expected/pathman_subpartitions.out +++ b/expected/pathman_subpartitions.out @@ -237,7 +237,7 @@ SELECT create_range_partitions('subpartitions.abc_2', 'b', 0, 50, 2); /* 100 - 2 (1 row) INSERT INTO subpartitions.abc SELECT 25, 25 FROM generate_series(1, 10); -SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitions.abc_1_1 */ +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_1_1 */ tableoid | a | b -----------------------+----+---- subpartitions.abc_1_1 | 25 | 25 @@ -253,7 +253,7 @@ SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitio (10 rows) UPDATE subpartitions.abc SET a = 125 WHERE a = 25 and b = 25; -SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitions.abc_2_1 */ +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_1 */ tableoid | a | b -----------------------+-----+---- subpartitions.abc_2_1 | 125 | 25 @@ -269,7 +269,7 @@ SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitio (10 rows) UPDATE subpartitions.abc SET b = 75 WHERE a = 125 and b = 25; -SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitions.abc_2_2 */ +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_2 */ tableoid | a | b -----------------------+-----+---- subpartitions.abc_2_2 | 125 | 75 @@ -285,7 +285,7 @@ SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitio (10 rows) UPDATE subpartitions.abc SET b = 125 WHERE a = 125 and b = 75; -SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should create subpartitions.abc_2_3 */ +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_3 */ tableoid | a | b -----------------------+-----+----- subpartitions.abc_2_3 | 125 | 125 @@ -301,9 +301,9 @@ SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should create subpartiti (10 rows) /* split_range_partition */ -SELECT split_range_partition('subpartitions.abc_2', 150); -ERROR: could not split partition if it has children -SELECT split_range_partition('subpartitions.abc_2_2', 75); +SELECT split_range_partition('subpartitions.abc_2', 150); /* FAIL */ +ERROR: cannot split partition that has children +SELECT split_range_partition('subpartitions.abc_2_2', 75); /* OK */ split_range_partition ----------------------- {50,100} @@ -324,144 +324,103 @@ SELECT subpartitions.partitions_tree('subpartitions.abc'); (9 rows) /* merge_range_partitions */ -SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); /* 200 - 300 */ +TRUNCATE subpartitions.abc; +INSERT INTO subpartitions.abc VALUES (150, 0); +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); /* 200 - 300 */ append_range_partition ------------------------ subpartitions.abc_3 (1 row) -select merge_range_partitions('subpartitions.abc_2', 'subpartitions.abc_3'); -ERROR: cannot merge partitions -select merge_range_partitions('subpartitions.abc_2_1', 'subpartitions.abc_2_2'); +INSERT INTO subpartitions.abc VALUES (250, 50); +SELECT merge_range_partitions('subpartitions.abc_2', 'subpartitions.abc_3'); /* OK */ merge_range_partitions ------------------------ (1 row) -DROP TABLE subpartitions.abc CASCADE; -NOTICE: drop cascades to 11 other objects -/* subpartitions on same expressions */ -CREATE TABLE subpartitions.abc(a INTEGER NOT NULL); -INSERT INTO subpartitions.abc SELECT i FROM generate_series(1, 200, 20) as i; -SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 4); - create_range_partitions -------------------------- - 4 -(1 row) +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_1 | 150 | 0 + subpartitions.abc_2_2 | 250 | 50 +(2 rows) -SELECT create_range_partitions('subpartitions.abc_1', 'a', 0, 11, 9); /* not multiple */ - create_range_partitions -------------------------- - 9 +SELECT merge_range_partitions('subpartitions.abc_2_1', 'subpartitions.abc_2_2'); /* OK */ + merge_range_partitions +------------------------ + (1 row) -SELECT create_range_partitions('subpartitions.abc_2', 'a', 150, 11, 8); /* start_value should be lower */ -WARNING: "start_value" was set to 100 +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_1 | 150 | 0 + subpartitions.abc_2_1 | 250 | 50 +(2 rows) + +DROP TABLE subpartitions.abc CASCADE; +NOTICE: drop cascades to 10 other objects +/* Check insert & update with dropped columns */ +CREATE TABLE subpartitions.abc(a int, b int, c int, id1 int not null, id2 int not null, val serial); +SELECT create_range_partitions('subpartitions.abc', 'id1', 0, 100, 2); create_range_partitions ------------------------- - 8 + 2 (1 row) -SELECT create_range_partitions('subpartitions.abc_3', 'a', 200, 11, 20); /* too big p_count */ -WARNING: "p_interval" is not multiple of range (200, 310) -NOTICE: "p_count" was limited to 10 - create_range_partitions +ALTER TABLE subpartitions.abc DROP COLUMN c; +SELECT prepend_range_partition('subpartitions.abc'); + prepend_range_partition ------------------------- - 10 + subpartitions.abc_3 (1 row) -SELECT create_range_partitions('subpartitions.abc_4', 'a', ARRAY[301, 350, 400]); /* bounds check */ -ERROR: Bounds should start from 300 -SELECT create_range_partitions('subpartitions.abc_4', 'a', ARRAY[300, 450, 500]); /* bounds check */ -ERROR: Lower bound of rightmost partition should be less than 400 -SELECT create_range_partitions('subpartitions.abc_4', 'a', ARRAY[300, 350, 450]); /* bounds check */ +ALTER TABLE subpartitions.abc DROP COLUMN b; +SELECT create_range_partitions('subpartitions.abc_3', 'id2', 0, 10, 3); create_range_partitions ------------------------- - 2 + 3 (1 row) -SELECT * FROM pathman_partition_list; - parent | partition | parttype | expr | range_min | range_max ----------------------+------------------------+----------+------+-----------+----------- - subpartitions.abc | subpartitions.abc_1 | 2 | a | 0 | 100 - subpartitions.abc | subpartitions.abc_2 | 2 | a | 100 | 200 - subpartitions.abc | subpartitions.abc_3 | 2 | a | 200 | 300 - subpartitions.abc | subpartitions.abc_4 | 2 | a | 300 | 400 - subpartitions.abc_1 | subpartitions.abc_1_1 | 2 | a | 0 | 11 - subpartitions.abc_1 | subpartitions.abc_1_2 | 2 | a | 11 | 22 - subpartitions.abc_1 | subpartitions.abc_1_3 | 2 | a | 22 | 33 - subpartitions.abc_1 | subpartitions.abc_1_4 | 2 | a | 33 | 44 - subpartitions.abc_1 | subpartitions.abc_1_5 | 2 | a | 44 | 55 - subpartitions.abc_1 | subpartitions.abc_1_6 | 2 | a | 55 | 66 - subpartitions.abc_1 | subpartitions.abc_1_7 | 2 | a | 66 | 77 - subpartitions.abc_1 | subpartitions.abc_1_8 | 2 | a | 77 | 88 - subpartitions.abc_1 | subpartitions.abc_1_9 | 2 | a | 88 | 99 - subpartitions.abc_2 | subpartitions.abc_2_1 | 2 | a | 100 | 111 - subpartitions.abc_2 | subpartitions.abc_2_2 | 2 | a | 111 | 122 - subpartitions.abc_2 | subpartitions.abc_2_3 | 2 | a | 122 | 133 - subpartitions.abc_2 | subpartitions.abc_2_4 | 2 | a | 133 | 144 - subpartitions.abc_2 | subpartitions.abc_2_5 | 2 | a | 144 | 155 - subpartitions.abc_2 | subpartitions.abc_2_6 | 2 | a | 155 | 166 - subpartitions.abc_2 | subpartitions.abc_2_7 | 2 | a | 166 | 177 - subpartitions.abc_2 | subpartitions.abc_2_8 | 2 | a | 177 | 188 - subpartitions.abc_3 | subpartitions.abc_3_1 | 2 | a | 200 | 211 - subpartitions.abc_3 | subpartitions.abc_3_2 | 2 | a | 211 | 222 - subpartitions.abc_3 | subpartitions.abc_3_3 | 2 | a | 222 | 233 - subpartitions.abc_3 | subpartitions.abc_3_4 | 2 | a | 233 | 244 - subpartitions.abc_3 | subpartitions.abc_3_5 | 2 | a | 244 | 255 - subpartitions.abc_3 | subpartitions.abc_3_6 | 2 | a | 255 | 266 - subpartitions.abc_3 | subpartitions.abc_3_7 | 2 | a | 266 | 277 - subpartitions.abc_3 | subpartitions.abc_3_8 | 2 | a | 277 | 288 - subpartitions.abc_3 | subpartitions.abc_3_9 | 2 | a | 288 | 299 - subpartitions.abc_3 | subpartitions.abc_3_10 | 2 | a | 299 | 310 - subpartitions.abc_4 | subpartitions.abc_4_1 | 2 | a | 300 | 350 - subpartitions.abc_4 | subpartitions.abc_4_2 | 2 | a | 350 | 450 -(33 rows) - -SELECT append_range_partition('subpartitions.abc_1'::regclass); - append_range_partition ------------------------- - subpartitions.abc_1_10 +ALTER TABLE subpartitions.abc DROP COLUMN a; +SELECT prepend_range_partition('subpartitions.abc_3'); + prepend_range_partition +------------------------- + subpartitions.abc_3_4 (1 row) -SELECT append_range_partition('subpartitions.abc_1'::regclass); -ERROR: reached upper bound in the current level of subpartitions -DROP TABLE subpartitions.abc_1_10; -/* detach_range_partition */ -SELECt detach_range_partition('subpartitions.abc_1'); -ERROR: could not detach partition if it has children -/* attach_range_partition */ -CREATE TABLE subpartitions.abc_c(LIKE subpartitions.abc_1 INCLUDING ALL); -SELECT attach_range_partition('subpartitions.abc_1', 'subpartitions.abc_c', 98, 110); /* fail */ -ERROR: specified range [98, 110) overlaps with existing partitions -SELECT attach_range_partition('subpartitions.abc_1', 'subpartitions.abc_c', 100, 110); /* fail */ -ERROR: "start value" exceeds upper bound of the current level of subpartitions -SELECT attach_range_partition('subpartitions.abc_1', 'subpartitions.abc_c', 99, 110); /* ok */ - attach_range_partition ------------------------- - subpartitions.abc_c -(1 row) +SELECT * FROM pathman_partition_list ORDER BY parent, partition; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc | subpartitions.abc_1 | 2 | id1 | 0 | 100 + subpartitions.abc | subpartitions.abc_2 | 2 | id1 | 100 | 200 + subpartitions.abc | subpartitions.abc_3 | 2 | id1 | -100 | 0 + subpartitions.abc_3 | subpartitions.abc_3_1 | 2 | id2 | 0 | 10 + subpartitions.abc_3 | subpartitions.abc_3_2 | 2 | id2 | 10 | 20 + subpartitions.abc_3 | subpartitions.abc_3_3 | 2 | id2 | 20 | 30 + subpartitions.abc_3 | subpartitions.abc_3_4 | 2 | id2 | -10 | 0 +(7 rows) -DROP TABLE subpartitions.abc CASCADE; -NOTICE: drop cascades to 39 other objects -/* subpartitions on same expression but dates */ -CREATE TABLE subpartitions.abc(a DATE NOT NULL); -INSERT INTO subpartitions.abc SELECT '2017-10-02'::DATE + i FROM generate_series(1, 200, 20) as i; -SELECT create_range_partitions('subpartitions.abc', 'a', '2017-10-02'::DATE, '1 month'::INTERVAL); - create_range_partitions -------------------------- - 6 -(1 row) +INSERT INTO subpartitions.abc VALUES (10, 0), (110, 0), (-1, 0), (-1, -1); +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY id1, id2, val; + tableoid | id1 | id2 | val +-----------------------+-----+-----+----- + subpartitions.abc_3_4 | -1 | -1 | 4 + subpartitions.abc_3_1 | -1 | 0 | 3 + subpartitions.abc_1 | 10 | 0 | 1 + subpartitions.abc_2 | 110 | 0 | 2 +(4 rows) -SELECT create_range_partitions('subpartitions.abc_1', 'a', '2017-10-02'::DATE + 1, - '32 day'::INTERVAL, 10); /* not multiple, and limited p_count */ -WARNING: "start_value" was set to 10-02-2017 -WARNING: "p_interval" is not multiple of range (10-02-2017, 11-03-2017) -NOTICE: "p_count" was limited to 1 - create_range_partitions -------------------------- - 1 -(1 row) +SET pg_pathman.enable_partitionrouter = ON; +UPDATE subpartitions.abc SET id1 = -1, id2 = -1 RETURNING tableoid::regclass, *; + tableoid | id1 | id2 | val +-----------------------+-----+-----+----- + subpartitions.abc_3_4 | -1 | -1 | 1 + subpartitions.abc_3_4 | -1 | -1 | 2 + subpartitions.abc_3_4 | -1 | -1 | 3 + subpartitions.abc_3_4 | -1 | -1 | 4 +(4 rows) DROP TABLE subpartitions.abc CASCADE; NOTICE: drop cascades to 9 other objects diff --git a/sql/pathman_subpartitions.sql b/sql/pathman_subpartitions.sql index 7f38f629..b790c20e 100644 --- a/sql/pathman_subpartitions.sql +++ b/sql/pathman_subpartitions.sql @@ -86,61 +86,60 @@ SELECT create_range_partitions('subpartitions.abc_1', 'b', 0, 50, 2); /* 0 - 100 SELECT create_range_partitions('subpartitions.abc_2', 'b', 0, 50, 2); /* 100 - 200 */ INSERT INTO subpartitions.abc SELECT 25, 25 FROM generate_series(1, 10); -SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitions.abc_1_1 */ +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_1_1 */ UPDATE subpartitions.abc SET a = 125 WHERE a = 25 and b = 25; -SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitions.abc_2_1 */ +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_1 */ UPDATE subpartitions.abc SET b = 75 WHERE a = 125 and b = 25; -SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitions.abc_2_2 */ +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_2 */ UPDATE subpartitions.abc SET b = 125 WHERE a = 125 and b = 75; -SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should create subpartitions.abc_2_3 */ +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_3 */ + /* split_range_partition */ -SELECT split_range_partition('subpartitions.abc_2', 150); -SELECT split_range_partition('subpartitions.abc_2_2', 75); +SELECT split_range_partition('subpartitions.abc_2', 150); /* FAIL */ +SELECT split_range_partition('subpartitions.abc_2_2', 75); /* OK */ SELECT subpartitions.partitions_tree('subpartitions.abc'); + /* merge_range_partitions */ -SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); /* 200 - 300 */ -select merge_range_partitions('subpartitions.abc_2', 'subpartitions.abc_3'); -select merge_range_partitions('subpartitions.abc_2_1', 'subpartitions.abc_2_2'); +TRUNCATE subpartitions.abc; +INSERT INTO subpartitions.abc VALUES (150, 0); -DROP TABLE subpartitions.abc CASCADE; +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); /* 200 - 300 */ +INSERT INTO subpartitions.abc VALUES (250, 50); -/* subpartitions on same expressions */ -CREATE TABLE subpartitions.abc(a INTEGER NOT NULL); -INSERT INTO subpartitions.abc SELECT i FROM generate_series(1, 200, 20) as i; -SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 4); -SELECT create_range_partitions('subpartitions.abc_1', 'a', 0, 11, 9); /* not multiple */ -SELECT create_range_partitions('subpartitions.abc_2', 'a', 150, 11, 8); /* start_value should be lower */ -SELECT create_range_partitions('subpartitions.abc_3', 'a', 200, 11, 20); /* too big p_count */ -SELECT create_range_partitions('subpartitions.abc_4', 'a', ARRAY[301, 350, 400]); /* bounds check */ -SELECT create_range_partitions('subpartitions.abc_4', 'a', ARRAY[300, 450, 500]); /* bounds check */ -SELECT create_range_partitions('subpartitions.abc_4', 'a', ARRAY[300, 350, 450]); /* bounds check */ -SELECT * FROM pathman_partition_list; -SELECT append_range_partition('subpartitions.abc_1'::regclass); -SELECT append_range_partition('subpartitions.abc_1'::regclass); -DROP TABLE subpartitions.abc_1_10; +SELECT merge_range_partitions('subpartitions.abc_2', 'subpartitions.abc_3'); /* OK */ +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; -/* detach_range_partition */ -SELECt detach_range_partition('subpartitions.abc_1'); +SELECT merge_range_partitions('subpartitions.abc_2_1', 'subpartitions.abc_2_2'); /* OK */ +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; -/* attach_range_partition */ -CREATE TABLE subpartitions.abc_c(LIKE subpartitions.abc_1 INCLUDING ALL); -SELECT attach_range_partition('subpartitions.abc_1', 'subpartitions.abc_c', 98, 110); /* fail */ -SELECT attach_range_partition('subpartitions.abc_1', 'subpartitions.abc_c', 100, 110); /* fail */ -SELECT attach_range_partition('subpartitions.abc_1', 'subpartitions.abc_c', 99, 110); /* ok */ DROP TABLE subpartitions.abc CASCADE; -/* subpartitions on same expression but dates */ -CREATE TABLE subpartitions.abc(a DATE NOT NULL); -INSERT INTO subpartitions.abc SELECT '2017-10-02'::DATE + i FROM generate_series(1, 200, 20) as i; -SELECT create_range_partitions('subpartitions.abc', 'a', '2017-10-02'::DATE, '1 month'::INTERVAL); -SELECT create_range_partitions('subpartitions.abc_1', 'a', '2017-10-02'::DATE + 1, - '32 day'::INTERVAL, 10); /* not multiple, and limited p_count */ + +/* Check insert & update with dropped columns */ +CREATE TABLE subpartitions.abc(a int, b int, c int, id1 int not null, id2 int not null, val serial); +SELECT create_range_partitions('subpartitions.abc', 'id1', 0, 100, 2); +ALTER TABLE subpartitions.abc DROP COLUMN c; +SELECT prepend_range_partition('subpartitions.abc'); +ALTER TABLE subpartitions.abc DROP COLUMN b; +SELECT create_range_partitions('subpartitions.abc_3', 'id2', 0, 10, 3); +ALTER TABLE subpartitions.abc DROP COLUMN a; +SELECT prepend_range_partition('subpartitions.abc_3'); + +SELECT * FROM pathman_partition_list ORDER BY parent, partition; + +INSERT INTO subpartitions.abc VALUES (10, 0), (110, 0), (-1, 0), (-1, -1); +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY id1, id2, val; + +SET pg_pathman.enable_partitionrouter = ON; +UPDATE subpartitions.abc SET id1 = -1, id2 = -1 RETURNING tableoid::regclass, *; + + DROP TABLE subpartitions.abc CASCADE; DROP SCHEMA subpartitions CASCADE; From 6afa610c8b20c98b9fc85e078786f481f6f30d3e Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 2 Jul 2018 15:01:25 +0300 Subject: [PATCH 266/528] attempt to fix performance issues described in issue #164 --- expected/pathman_rebuild_updates.out | 8 +- src/include/compat/pg_compat.h | 1 + src/pg_pathman.c | 1 - src/planner_tree_modification.c | 224 ++++++++++++++++++--------- 4 files changed, 153 insertions(+), 81 deletions(-) diff --git a/expected/pathman_rebuild_updates.out b/expected/pathman_rebuild_updates.out index d06f7c5b..297089af 100644 --- a/expected/pathman_rebuild_updates.out +++ b/expected/pathman_rebuild_updates.out @@ -43,14 +43,10 @@ UPDATE test_updates.test SET b = 0 WHERE val = 1 RETURNING *, tableoid::REGCLASS EXPLAIN (COSTS OFF) UPDATE test_updates.test SET b = 0 WHERE val = 101; QUERY PLAN ----------------------------- - Update on test - Update on test - Update on test_11 - -> Seq Scan on test - Filter: (val = 101) + Update on test_11 -> Seq Scan on test_11 Filter: (val = 101) -(7 rows) +(3 rows) UPDATE test_updates.test SET b = 0 WHERE val = 101 RETURNING *, tableoid::REGCLASS; val | b | tableoid diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 61d1ab1f..8632578e 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -29,6 +29,7 @@ #include "nodes/pg_list.h" #include "optimizer/cost.h" #include "optimizer/paths.h" +#include "optimizer/prep.h" #include "utils/memutils.h" /* diff --git a/src/pg_pathman.c b/src/pg_pathman.c index c4adef6e..8cb2ee9c 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -26,7 +26,6 @@ #include "miscadmin.h" #include "optimizer/clauses.h" #include "optimizer/plancat.h" -#include "optimizer/prep.h" #include "optimizer/restrictinfo.h" #include "optimizer/cost.h" #include "utils/datum.h" diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 3225e59e..45ec1b0f 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -94,6 +94,13 @@ typedef struct } transform_query_cxt; +typedef struct +{ + Index child_varno; + List *translated_vars; +} adjust_appendrel_varnos_cxt; + + static bool pathman_transform_query_walker(Node *node, void *context); @@ -103,6 +110,7 @@ static void handle_modification_query(Query *parse, transform_query_cxt *context static void partition_filter_visitor(Plan *plan, void *context); static Node *eval_extern_params_mutator(Node *node, ParamListInfo params); +static bool adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context); /* @@ -366,20 +374,20 @@ handle_modification_query(Query *parse, transform_query_cxt *context) WrapperNode *wrap; Expr *expr; WalkerContext wcxt; - Index result_rel; + Index result_rti; int num_selected; ParamListInfo params; /* Fetch index of result relation */ - result_rel = parse->resultRelation; + result_rti = parse->resultRelation; /* Exit if it's not a DELETE or UPDATE query */ - if (result_rel == 0 || + if (result_rti == 0 || (parse->commandType != CMD_UPDATE && parse->commandType != CMD_DELETE)) return; - rte = rt_fetch(result_rel, parse->rtable); + rte = rt_fetch(result_rti, parse->rtable); /* Exit if it's DELETE FROM ONLY table */ if (!rte->inh) return; @@ -406,7 +414,7 @@ handle_modification_query(Query *parse, transform_query_cxt *context) expr = (Expr *) eval_extern_params_mutator((Node *) expr, params); /* Prepare partitioning expression */ - prel_expr = PrelExpressionForRelid(prel, result_rel); + prel_expr = PrelExpressionForRelid(prel, result_rti); /* Parse syntax tree and extract partition ranges */ InitWalkerContext(&wcxt, prel_expr, prel, NULL); @@ -430,13 +438,14 @@ handle_modification_query(Query *parse, transform_query_cxt *context) Relation child_rel, parent_rel; - void *tuple_map; /* we don't need the map itself */ - LOCKMODE lockmode = RowExclusiveLock; /* UPDATE | DELETE */ HeapTuple syscache_htup; char child_relkind; + List *translated_vars; + adjust_appendrel_varnos_cxt aav_cxt; + /* Lock 'child' table */ LockRelationOid(child, lockmode); @@ -460,19 +469,23 @@ handle_modification_query(Query *parse, transform_query_cxt *context) child_rel = heap_open(child, NoLock); parent_rel = heap_open(parent, NoLock); - /* Build a conversion map (may be trivial, i.e. NULL) */ - tuple_map = build_part_tuple_map(parent_rel, child_rel); - if (tuple_map) - free_conversion_map((TupleConversionMap *) tuple_map); + make_inh_translation_list(parent_rel, child_rel, 0, &translated_vars); + + /* Translate varnos for this child */ + aav_cxt.child_varno = result_rti; + aav_cxt.translated_vars = translated_vars; + if (adjust_appendrel_varnos((Node *) parse, &aav_cxt)) + return; /* failed to perform rewrites */ + + /* Translate column privileges for this child */ + rte->selectedCols = translate_col_privs(rte->selectedCols, translated_vars); + rte->insertedCols = translate_col_privs(rte->insertedCols, translated_vars); + rte->updatedCols = translate_col_privs(rte->updatedCols, translated_vars); /* Close relations (should remain locked, though) */ heap_close(child_rel, NoLock); heap_close(parent_rel, NoLock); - /* Exit if tuple map was NOT trivial */ - if (tuple_map) /* just checking the pointer! */ - return; - /* Update RTE's relid and relkind (for FDW) */ rte->relid = child; rte->relkind = child_relkind; @@ -490,6 +503,128 @@ handle_modification_query(Query *parse, transform_query_cxt *context) } } +/* Replace extern param nodes with consts */ +static Node * +eval_extern_params_mutator(Node *node, ParamListInfo params) +{ + if (node == NULL) + return NULL; + + if (IsA(node, Param)) + { + Param *param = (Param *) node; + + Assert(params); + + /* Look to see if we've been given a value for this Param */ + if (param->paramkind == PARAM_EXTERN && + param->paramid > 0 && + param->paramid <= params->numParams) + { + ParamExternData *prm = ¶ms->params[param->paramid - 1]; + + if (OidIsValid(prm->ptype)) + { + /* OK to substitute parameter value? */ + if (prm->pflags & PARAM_FLAG_CONST) + { + /* + * Return a Const representing the param value. + * Must copy pass-by-ref datatypes, since the + * Param might be in a memory context + * shorter-lived than our output plan should be. + */ + int16 typLen; + bool typByVal; + Datum pval; + + Assert(prm->ptype == param->paramtype); + get_typlenbyval(param->paramtype, + &typLen, &typByVal); + if (prm->isnull || typByVal) + pval = prm->value; + else + pval = datumCopy(prm->value, typByVal, typLen); + return (Node *) makeConst(param->paramtype, + param->paramtypmod, + param->paramcollid, + (int) typLen, + pval, + prm->isnull, + typByVal); + } + } + } + } + + return expression_tree_mutator(node, eval_extern_params_mutator, + (void *) params); +} + +static bool +adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context) +{ + if (node == NULL) + return false; + + if (IsA(node, Query)) + { + Query *query = (Query *) node; + ListCell *lc; + + foreach (lc, query->targetList) + { + TargetEntry *te = (TargetEntry *) lfirst(lc); + Var *child_var; + + if (te->resjunk) + continue; + + if (te->resno > list_length(context->translated_vars)) + return true; + + child_var = list_nth(context->translated_vars, te->resno - 1); + if (!child_var) + return true; + + /* Transform attribute number */ + te->resno = child_var->varattno; + } + + return query_tree_walker((Query *) node, + adjust_appendrel_varnos, + context, + QTW_IGNORE_RC_SUBQUERIES); + } + + if (IsA(node, Var)) + { + Var *var = (Var *) node; + + /* Don't tranform system columns & other relations' Vars */ + if (var->varoattno > 0 && var->varno == context->child_varno) + { + Var *child_var; + + if (var->varattno > list_length(context->translated_vars)) + return true; + + child_var = list_nth(context->translated_vars, var->varattno - 1); + if (!child_var) + return true; + + /* Transform attribute number */ + var->varattno = child_var->varattno; + } + + return false; + } + + return expression_tree_walker(node, + adjust_appendrel_varnos, + context); +} + /* * ------------------------------- @@ -592,65 +727,6 @@ get_rel_parenthood_status(RangeTblEntry *rte) } -/* Replace extern param nodes with consts */ -static Node * -eval_extern_params_mutator(Node *node, ParamListInfo params) -{ - if (node == NULL) - return NULL; - - if (IsA(node, Param)) - { - Param *param = (Param *) node; - - Assert(params); - - /* Look to see if we've been given a value for this Param */ - if (param->paramkind == PARAM_EXTERN && - param->paramid > 0 && - param->paramid <= params->numParams) - { - ParamExternData *prm = ¶ms->params[param->paramid - 1]; - - if (OidIsValid(prm->ptype)) - { - /* OK to substitute parameter value? */ - if (prm->pflags & PARAM_FLAG_CONST) - { - /* - * Return a Const representing the param value. - * Must copy pass-by-ref datatypes, since the - * Param might be in a memory context - * shorter-lived than our output plan should be. - */ - int16 typLen; - bool typByVal; - Datum pval; - - Assert(prm->ptype == param->paramtype); - get_typlenbyval(param->paramtype, - &typLen, &typByVal); - if (prm->isnull || typByVal) - pval = prm->value; - else - pval = datumCopy(prm->value, typByVal, typLen); - return (Node *) makeConst(param->paramtype, - param->paramtypmod, - param->paramcollid, - (int) typLen, - pval, - prm->isnull, - typByVal); - } - } - } - } - - return expression_tree_mutator(node, eval_extern_params_mutator, - (void *) params); -} - - /* * ----------------------------------------------- * Count number of times we've visited planner() From edf46f91a679ff467f65896f6f9a26029ebda118 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 2 Jul 2018 15:28:17 +0300 Subject: [PATCH 267/528] small fixes --- src/planner_tree_modification.c | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 45ec1b0f..c4b4073d 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -93,15 +93,14 @@ typedef struct CommonTableExpr *parent_cte; } transform_query_cxt; - typedef struct { Index child_varno; + Oid parent_relid; List *translated_vars; } adjust_appendrel_varnos_cxt; - static bool pathman_transform_query_walker(Node *node, void *context); static void disable_standard_inheritance(Query *parse, transform_query_cxt *context); @@ -473,9 +472,9 @@ handle_modification_query(Query *parse, transform_query_cxt *context) /* Translate varnos for this child */ aav_cxt.child_varno = result_rti; + aav_cxt.parent_relid = parent; aav_cxt.translated_vars = translated_vars; - if (adjust_appendrel_varnos((Node *) parse, &aav_cxt)) - return; /* failed to perform rewrites */ + adjust_appendrel_varnos((Node *) parse, &aav_cxt); /* Translate column privileges for this child */ rte->selectedCols = translate_col_privs(rte->selectedCols, translated_vars); @@ -561,6 +560,7 @@ eval_extern_params_mutator(Node *node, ParamListInfo params) (void *) params); } +/* Remap parent's attributes to child ones s*/ static bool adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context) { @@ -572,6 +572,7 @@ adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context) Query *query = (Query *) node; ListCell *lc; + /* FIXME: we might need to reorder TargetEntries */ foreach (lc, query->targetList) { TargetEntry *te = (TargetEntry *) lfirst(lc); @@ -581,11 +582,13 @@ adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context) continue; if (te->resno > list_length(context->translated_vars)) - return true; + elog(ERROR, "attribute %d of relation \"%s\" does not exist", + te->resno, get_rel_name(context->parent_relid)); child_var = list_nth(context->translated_vars, te->resno - 1); if (!child_var) - return true; + elog(ERROR, "attribute %d of relation \"%s\" does not exist", + te->resno, get_rel_name(context->parent_relid)); /* Transform attribute number */ te->resno = child_var->varattno; @@ -601,17 +604,19 @@ adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context) { Var *var = (Var *) node; - /* Don't tranform system columns & other relations' Vars */ + /* Don't transform system columns & other relations' Vars */ if (var->varoattno > 0 && var->varno == context->child_varno) { Var *child_var; if (var->varattno > list_length(context->translated_vars)) - return true; + elog(ERROR, "attribute %d of relation \"%s\" does not exist", + var->varattno, get_rel_name(context->parent_relid)); child_var = list_nth(context->translated_vars, var->varattno - 1); if (!child_var) - return true; + elog(ERROR, "attribute %d of relation \"%s\" does not exist", + var->varattno, get_rel_name(context->parent_relid)); /* Transform attribute number */ var->varattno = child_var->varattno; From da4c916300b9fbf07e033f306c91292dd2ffce39 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 2 Jul 2018 16:15:42 +0300 Subject: [PATCH 268/528] don't use 'varoattno' where possible --- src/nodes_common.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/nodes_common.c b/src/nodes_common.c index 7a4b71fe..e8d056bf 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -174,7 +174,7 @@ build_parent_tlist(List *tlist, AppendRelInfo *appinfo) elog(ERROR, "table \"%s\" has no attribute %d of partition \"%s\"", get_rel_name_or_relid(appinfo->parent_relid), - tlist_var->varoattno, + tlist_var->varattno, get_rel_name_or_relid(appinfo->child_relid)); } @@ -232,7 +232,7 @@ append_part_attr_to_tlist(List *tlist, TargetEntry *te = (TargetEntry *) lfirst(lc); Var *var = (Var *) te->expr; - if (IsA(var, Var) && var->varoattno == child_var->varattno) + if (IsA(var, Var) && var->varattno == child_var->varattno) { part_attr_found = true; break; From 6c4f5964f276386acfaa6323df76fe7ac99eeef5 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 2 Jul 2018 19:27:02 +0300 Subject: [PATCH 269/528] attempt to fix issue #165 --- expected/pathman_views.out | 68 +++++++++++++++++++++++++++++++++++- expected/pathman_views_1.out | 68 +++++++++++++++++++++++++++++++++++- sql/pathman_views.sql | 15 ++++++++ src/hooks.c | 7 ++-- 4 files changed, 154 insertions(+), 4 deletions(-) diff --git a/expected/pathman_views.out b/expected/pathman_views.out index 2341919a..45423ef5 100644 --- a/expected/pathman_views.out +++ b/expected/pathman_views.out @@ -16,6 +16,9 @@ select create_hash_partitions('views._abc', 'id', 10); (1 row) insert into views._abc select generate_series(1, 100); +/* create a dummy table */ +create table views._abc_add (like views._abc); +vacuum analyze; /* create a facade view */ create view views.abc as select * from views._abc; create or replace function views.disable_modification() @@ -117,6 +120,69 @@ explain (costs off) delete from views.abc where id = 1 or id = 2; delete from views.abc where id = 1 or id = 2; ERROR: DELETE +/* Test SELECT with UNION */ +create view views.abc_union as table views._abc union table views._abc_add; +create view views.abc_union_all as table views._abc union all table views._abc_add; +explain (costs off) table views.abc_union; + QUERY PLAN +-------------------------------------- + HashAggregate + Group Key: _abc_0.id + -> Append + -> Append + -> Seq Scan on _abc_0 + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 + -> Seq Scan on _abc_add +(15 rows) + +explain (costs off) select * from views.abc_union where id = 5; + QUERY PLAN +-------------------------------------- + HashAggregate + Group Key: _abc_8.id + -> Append + -> Append + -> Seq Scan on _abc_8 + Filter: (id = 5) + -> Seq Scan on _abc_add + Filter: (id = 5) +(8 rows) + +explain (costs off) table views.abc_union_all; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on _abc_0 + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 + -> Seq Scan on _abc_add +(12 rows) + +explain (costs off) select * from views.abc_union_all where id = 5; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on _abc_8 + Filter: (id = 5) + -> Seq Scan on _abc_add + Filter: (id = 5) +(5 rows) + DROP SCHEMA views CASCADE; -NOTICE: drop cascades to 13 other objects +NOTICE: drop cascades to 16 other objects DROP EXTENSION pg_pathman; diff --git a/expected/pathman_views_1.out b/expected/pathman_views_1.out index fdf19f28..bead6de1 100644 --- a/expected/pathman_views_1.out +++ b/expected/pathman_views_1.out @@ -16,6 +16,9 @@ select create_hash_partitions('views._abc', 'id', 10); (1 row) insert into views._abc select generate_series(1, 100); +/* create a dummy table */ +create table views._abc_add (like views._abc); +vacuum analyze; /* create a facade view */ create view views.abc as select * from views._abc; create or replace function views.disable_modification() @@ -173,6 +176,69 @@ explain (costs off) delete from views.abc where id = 1 or id = 2; delete from views.abc where id = 1 or id = 2; ERROR: DELETE +/* Test SELECT with UNION */ +create view views.abc_union as table views._abc union table views._abc_add; +create view views.abc_union_all as table views._abc union all table views._abc_add; +explain (costs off) table views.abc_union; + QUERY PLAN +-------------------------------------- + HashAggregate + Group Key: _abc_0.id + -> Append + -> Append + -> Seq Scan on _abc_0 + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 + -> Seq Scan on _abc_add +(15 rows) + +explain (costs off) select * from views.abc_union where id = 5; + QUERY PLAN +-------------------------------------- + HashAggregate + Group Key: _abc_8.id + -> Append + -> Append + -> Seq Scan on _abc_8 + Filter: (id = 5) + -> Seq Scan on _abc_add + Filter: (id = 5) +(8 rows) + +explain (costs off) table views.abc_union_all; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on _abc_0 + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 + -> Seq Scan on _abc_add +(12 rows) + +explain (costs off) select * from views.abc_union_all where id = 5; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on _abc_8 + Filter: (id = 5) + -> Seq Scan on _abc_add + Filter: (id = 5) +(5 rows) + DROP SCHEMA views CASCADE; -NOTICE: drop cascades to 13 other objects +NOTICE: drop cascades to 16 other objects DROP EXTENSION pg_pathman; diff --git a/sql/pathman_views.sql b/sql/pathman_views.sql index 90118fe0..9f386a3d 100644 --- a/sql/pathman_views.sql +++ b/sql/pathman_views.sql @@ -17,6 +17,12 @@ create table views._abc(id int4 not null); select create_hash_partitions('views._abc', 'id', 10); insert into views._abc select generate_series(1, 100); +/* create a dummy table */ +create table views._abc_add (like views._abc); + + +vacuum analyze; + /* create a facade view */ create view views.abc as select * from views._abc; @@ -60,6 +66,15 @@ explain (costs off) delete from views.abc where id = 1 or id = 2; delete from views.abc where id = 1 or id = 2; +/* Test SELECT with UNION */ +create view views.abc_union as table views._abc union table views._abc_add; +create view views.abc_union_all as table views._abc union all table views._abc_add; +explain (costs off) table views.abc_union; +explain (costs off) select * from views.abc_union where id = 5; +explain (costs off) table views.abc_union_all; +explain (costs off) select * from views.abc_union_all where id = 5; + + DROP SCHEMA views CASCADE; DROP EXTENSION pg_pathman; diff --git a/src/hooks.c b/src/hooks.c index 96efad08..d78d1943 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -384,11 +384,14 @@ pathman_rel_pathlist_hook(PlannerInfo *root, AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(lc); /* - * If there's an 'appinfo', it means that somebody + * If there's an 'appinfo' with Oid, it means that somebody * (PG?) has already processed this partitioned table * and added its children to the plan. + * + * NOTE: there's no Oid iff it's UNION. */ - if (appinfo->child_relid == rti) + if (appinfo->child_relid == rti && + OidIsValid(appinfo->parent_reloid)) return; } } From a6f968ca3ebc0e0470a73967f73856ab723668b1 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 3 Jul 2018 13:35:14 +0300 Subject: [PATCH 270/528] use VARIADIC in merge_range_partitions() --- README.md | 9 ++------- expected/pathman_basic.out | 8 ++++---- expected/pathman_calamity.out | 8 ++++---- expected/pathman_domains.out | 2 +- expected/pathman_subpartitions.out | 4 ++-- range.sql | 16 ++-------------- sql/pathman_calamity.sql | 8 ++++---- src/pl_range_funcs.c | 14 +++++++------- 8 files changed, 26 insertions(+), 43 deletions(-) diff --git a/README.md b/README.md index c83df46c..c89d99de 100644 --- a/README.md +++ b/README.md @@ -203,14 +203,9 @@ split_range_partition(partition REGCLASS, Split RANGE `partition` in two by `split_value`. Partition creation callback is invoked for a new partition if available. ```plpgsql -merge_range_partitions(partition1 REGCLASS, partition2 REGCLASS) +merge_range_partitions(variadic partitions REGCLASS[]) ``` -Merge two adjacent RANGE partitions. First, data from `partition2` is copied to `partition1`, then `partition2` is removed. - -```plpgsql -merge_range_partitions(partitions REGCLASS[]) -``` -Merge several adjacent RANGE partitions (partitions must be specified in ascending or descending order). All the data will be accumulated in the first partition. +Merge several adjacent RANGE partitions. Partitions are automatically ordered by increasing bounds; all the data will be accumulated in the first partition. ```plpgsql append_range_partition(parent REGCLASS, diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 1bdbcef9..b4b062d3 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -933,7 +933,7 @@ SELECT pathman.split_range_partition('test.range_rel_1', '2015-01-15'::DATE); SELECT pathman.merge_range_partitions('test.num_range_rel_1', 'test.num_range_rel_' || currval('test.num_range_rel_seq')); merge_range_partitions ------------------------ - + test.num_range_rel_1 (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 700; @@ -947,7 +947,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 70 SELECT pathman.merge_range_partitions('test.range_rel_1', 'test.range_rel_' || currval('test.range_rel_seq')); merge_range_partitions ------------------------ - + test.range_rel_1 (1 row) /* Append and prepend partitions */ @@ -1524,7 +1524,7 @@ SELECT pathman.prepend_range_partition('test."RangeRel"'); SELECT pathman.merge_range_partitions('test."RangeRel_1"', 'test."RangeRel_' || currval('test."RangeRel_seq"') || '"'); merge_range_partitions ------------------------ - + test."RangeRel_1" (1 row) SELECT pathman.split_range_partition('test."RangeRel_1"', '2015-01-01'::DATE); @@ -1594,7 +1594,7 @@ SELECT create_range_partitions('test.range_rel', 'dt', '2010-01-01'::date, '1 mo SELECT merge_range_partitions('test.range_rel_1', 'test.range_rel_2'); merge_range_partitions ------------------------ - + test.range_rel_1 (1 row) SELECT split_range_partition('test.range_rel_1', '2010-02-15'::date); diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 14ff9cd6..2889cc80 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -746,9 +746,9 @@ SELECT get_part_range('calamity.test_range_oid_1', NULL::INT4); /* OK */ DROP TABLE calamity.test_range_oid CASCADE; NOTICE: drop cascades to 2 other objects /* check function merge_range_partitions() */ -SELECT merge_range_partitions('{pg_class}'); /* not ok */ +SELECT merge_range_partitions('pg_class'); /* not ok */ ERROR: cannot merge partitions -SELECT merge_range_partitions('{pg_class, pg_inherits}'); /* not ok */ +SELECT merge_range_partitions('pg_class', 'pg_inherits'); /* not ok */ ERROR: cannot merge partitions CREATE TABLE calamity.merge_test_a(val INT4 NOT NULL); CREATE TABLE calamity.merge_test_b(val INT4 NOT NULL); @@ -764,8 +764,8 @@ SELECT create_range_partitions('calamity.merge_test_b', 'val', 1, 10, 2); 2 (1 row) -SELECT merge_range_partitions('{calamity.merge_test_a_1, - calamity.merge_test_b_1}'); /* not ok */ +SELECT merge_range_partitions('calamity.merge_test_a_1', + 'calamity.merge_test_b_1'); /* not ok */ ERROR: cannot merge partitions DROP TABLE calamity.merge_test_a,calamity.merge_test_b CASCADE; NOTICE: drop cascades to 6 other objects diff --git a/expected/pathman_domains.out b/expected/pathman_domains.out index e5e882c0..e6fc43fe 100644 --- a/expected/pathman_domains.out +++ b/expected/pathman_domains.out @@ -41,7 +41,7 @@ SELECT prepend_range_partition('domains.dom_table'); SELECT merge_range_partitions('domains.dom_table_1', 'domains.dom_table_2'); merge_range_partitions ------------------------ - + domains.dom_table_1 (1 row) SELECT split_range_partition('domains.dom_table_1', 50); diff --git a/expected/pathman_subpartitions.out b/expected/pathman_subpartitions.out index 924d1bde..c5446c94 100644 --- a/expected/pathman_subpartitions.out +++ b/expected/pathman_subpartitions.out @@ -336,7 +336,7 @@ INSERT INTO subpartitions.abc VALUES (250, 50); SELECT merge_range_partitions('subpartitions.abc_2', 'subpartitions.abc_3'); /* OK */ merge_range_partitions ------------------------ - + subpartitions.abc_2 (1 row) SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; @@ -349,7 +349,7 @@ SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; SELECT merge_range_partitions('subpartitions.abc_2_1', 'subpartitions.abc_2_2'); /* OK */ merge_range_partitions ------------------------ - + subpartitions.abc_2_1 (1 row) SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; diff --git a/range.sql b/range.sql index dad82ff2..4b5c74a0 100644 --- a/range.sql +++ b/range.sql @@ -392,18 +392,6 @@ BEGIN END $$ LANGUAGE plpgsql; -/* - * The special case of merging two partitions - */ -CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions( - partition1 REGCLASS, - partition2 REGCLASS) -RETURNS VOID AS $$ -BEGIN - PERFORM @extschema@.merge_range_partitions(array[partition1, partition2]::regclass[]); -END -$$ LANGUAGE plpgsql; - /* * Append new partition. */ @@ -883,8 +871,8 @@ SET client_min_messages = WARNING; /* mute NOTICE message */ * The rest of partitions will be dropped. */ CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions( - partitions REGCLASS[]) -RETURNS VOID AS 'pg_pathman', 'merge_range_partitions' + variadic partitions REGCLASS[]) +RETURNS REGCLASS AS 'pg_pathman', 'merge_range_partitions' LANGUAGE C STRICT; /* diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index ed0eae95..1c48138e 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -320,8 +320,8 @@ DROP TABLE calamity.test_range_oid CASCADE; /* check function merge_range_partitions() */ -SELECT merge_range_partitions('{pg_class}'); /* not ok */ -SELECT merge_range_partitions('{pg_class, pg_inherits}'); /* not ok */ +SELECT merge_range_partitions('pg_class'); /* not ok */ +SELECT merge_range_partitions('pg_class', 'pg_inherits'); /* not ok */ CREATE TABLE calamity.merge_test_a(val INT4 NOT NULL); CREATE TABLE calamity.merge_test_b(val INT4 NOT NULL); @@ -329,8 +329,8 @@ CREATE TABLE calamity.merge_test_b(val INT4 NOT NULL); SELECT create_range_partitions('calamity.merge_test_a', 'val', 1, 10, 2); SELECT create_range_partitions('calamity.merge_test_b', 'val', 1, 10, 2); -SELECT merge_range_partitions('{calamity.merge_test_a_1, - calamity.merge_test_b_1}'); /* not ok */ +SELECT merge_range_partitions('calamity.merge_test_a_1', + 'calamity.merge_test_b_1'); /* not ok */ DROP TABLE calamity.merge_test_a,calamity.merge_test_b CASCADE; diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index feb028a5..6289e065 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -628,7 +628,8 @@ build_sequence_name(PG_FUNCTION_ARGS) Datum merge_range_partitions(PG_FUNCTION_ARGS) { - Oid parent = InvalidOid; + Oid parent = InvalidOid, + partition = InvalidOid; ArrayType *arr = PG_GETARG_ARRAYTYPE_P(0); Oid *parts; @@ -734,9 +735,10 @@ merge_range_partitions(PG_FUNCTION_ARGS) /* First determine the bounds of a new constraint */ min_bound = bounds[0].min; max_bound = bounds[nparts - 1].max; + partition = parts[0]; /* Drop old constraint and create a new one */ - modify_range_constraint(parts[0], + modify_range_constraint(partition, prel->expr_cstr, prel->ev_type, &min_bound, @@ -801,7 +803,7 @@ merge_range_partitions(PG_FUNCTION_ARGS) /* Don't forget to close 'prel'! */ close_pathman_relation_info(prel); - PG_RETURN_VOID(); + PG_RETURN_OID(partition); } @@ -851,12 +853,10 @@ drop_range_partition_expand_next(PG_FUNCTION_ARGS) ranges = PrelGetRangesArray(prel); /* Looking for partition in child relations */ - for (i = 0; i < PrelChildrenCount(prel); i++) - if (ranges[i].child_oid == partition) - break; + i = PrelHasPartition(prel, partition) - 1; /* Should have found it */ - Assert(i < PrelChildrenCount(prel)); + Assert(i >= 0 && i < PrelChildrenCount(prel)); /* Expand next partition if it exists */ if (i < PrelLastChild(prel)) From 464e840bf19182d6909f01157e583ad97b4e83f6 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 3 Jul 2018 13:38:45 +0300 Subject: [PATCH 271/528] reorder some code --- src/pl_range_funcs.c | 700 +++++++++++++++++++++---------------------- 1 file changed, 349 insertions(+), 351 deletions(-) diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 6289e065..7fa00cf7 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -384,242 +384,111 @@ generate_range_bounds_pl(PG_FUNCTION_ARGS) PG_RETURN_ARRAYTYPE_P(array); } - -/* - * ------------------------ - * Various useful getters - * ------------------------ - */ - /* - * Returns range entry (min, max) (in form of array). - * - * arg #1 is the parent's Oid. - * arg #2 is the partition's Oid. + * Takes text representation of interval value and checks + * if it corresponds to partitioning expression. + * NOTE: throws an ERROR if it fails to convert text to Datum. */ Datum -get_part_range_by_oid(PG_FUNCTION_ARGS) +validate_interval_value(PG_FUNCTION_ARGS) { - Oid partition_relid, - parent_relid; - Oid arg_type; - RangeEntry *ranges; - PartRelationInfo *prel; - uint32 idx; - - if (!PG_ARGISNULL(0)) - { - partition_relid = PG_GETARG_OID(0); - } - else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("'partition_relid' should not be NULL"))); - - parent_relid = get_parent_of_partition(partition_relid); - if (!OidIsValid(parent_relid)) - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("relation \"%s\" is not a partition", - get_rel_name_or_relid(partition_relid)))); - - /* Emit an error if it is not partitioned by RANGE */ - prel = get_pathman_relation_info(parent_relid); - shout_if_prel_is_invalid(parent_relid, prel, PT_RANGE); - - /* Check type of 'dummy' (for correct output) */ - arg_type = get_fn_expr_argtype(fcinfo->flinfo, 1); - if (getBaseType(arg_type) != getBaseType(prel->ev_type)) - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("pg_typeof(dummy) should be %s", - format_type_be(getBaseType(prel->ev_type))))); +#define ARG_PARTREL 0 +#define ARG_EXPRESSION 1 +#define ARG_PARTTYPE 2 +#define ARG_RANGE_INTERVAL 3 +#define ARG_EXPRESSION_P 4 - ranges = PrelGetRangesArray(prel); + Oid partrel; + PartType parttype; + char *expr_cstr; + Oid expr_type; - /* Look for the specified partition */ - if ((idx = PrelHasPartition(prel, partition_relid)) > 0) + if (PG_ARGISNULL(ARG_PARTREL)) { - ArrayType *arr; - Bound elems[2]; - - elems[0] = ranges[idx - 1].min; - elems[1] = ranges[idx - 1].max; - - arr = construct_bounds_array(elems, 2, - prel->ev_type, - prel->ev_len, - prel->ev_byval, - prel->ev_align); - - /* Don't forget to close 'prel'! */ - close_pathman_relation_info(prel); - - PG_RETURN_ARRAYTYPE_P(arr); + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'partrel' should not be NULL"))); } + else partrel = PG_GETARG_OID(ARG_PARTREL); - /* No partition found, report error */ - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("relation \"%s\" has no partition \"%s\"", - get_rel_name_or_relid(parent_relid), - get_rel_name_or_relid(partition_relid)))); - - PG_RETURN_NULL(); /* keep compiler happy */ -} - -/* - * Returns N-th range entry (min, max) (in form of array). - * - * arg #1 is the parent's Oid. - * arg #2 is the index of the range - * (if it is negative then the last range will be returned). - */ -Datum -get_part_range_by_idx(PG_FUNCTION_ARGS) -{ - Oid parent_relid; - int partition_idx = 0; - Oid arg_type; - Bound elems[2]; - RangeEntry *ranges; - PartRelationInfo *prel; - ArrayType *arr; + /* Check that relation exists */ + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(partrel))) + elog(ERROR, "relation \"%u\" does not exist", partrel); - if (!PG_ARGISNULL(0)) + if (PG_ARGISNULL(ARG_EXPRESSION)) { - parent_relid = PG_GETARG_OID(0); + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'expression' should not be NULL"))); } - else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("'parent_relid' should not be NULL"))); + else expr_cstr = TextDatumGetCString(PG_GETARG_TEXT_P(ARG_EXPRESSION)); - if (!PG_ARGISNULL(1)) + if (PG_ARGISNULL(ARG_PARTTYPE)) { - partition_idx = PG_GETARG_INT32(1); + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'parttype' should not be NULL"))); } - else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("'partition_idx' should not be NULL"))); - - /* Emit an error if it is not partitioned by RANGE */ - prel = get_pathman_relation_info(parent_relid); - shout_if_prel_is_invalid(parent_relid, prel, PT_RANGE); + else parttype = DatumGetPartType(PG_GETARG_DATUM(ARG_PARTTYPE)); - /* Check type of 'dummy' (for correct output) */ - arg_type = get_fn_expr_argtype(fcinfo->flinfo, 2); - if (getBaseType(arg_type) != getBaseType(prel->ev_type)) - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("pg_typeof(dummy) should be %s", - format_type_be(getBaseType(prel->ev_type))))); + /* + * Fetch partitioning expression's type using + * either user's expression or parsed expression. + * + * NOTE: we check number of function's arguments + * in case of late updates (e.g. 1.1 => 1.4). + */ + if (PG_ARGISNULL(ARG_EXPRESSION_P) || PG_NARGS() <= ARG_EXPRESSION_P) + { + Datum expr_datum; + /* We'll have to parse expression with our own hands */ + expr_datum = cook_partitioning_expression(partrel, expr_cstr, &expr_type); - /* Now we have to deal with 'idx' */ - if (partition_idx < -1) - { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("negative indices other than -1" - " (last partition) are not allowed"))); - } - else if (partition_idx == -1) - { - partition_idx = PrelLastChild(prel); + /* Free both expressions */ + pfree(DatumGetPointer(expr_datum)); + pfree(expr_cstr); } - else if (((uint32) abs(partition_idx)) >= PrelChildrenCount(prel)) + else { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("partition #%d does not exist (total amount is %u)", - partition_idx, PrelChildrenCount(prel)))); - } - - ranges = PrelGetRangesArray(prel); - - /* Build args for construct_infinitable_array() */ - elems[0] = ranges[partition_idx].min; - elems[1] = ranges[partition_idx].max; - - arr = construct_bounds_array(elems, 2, - prel->ev_type, - prel->ev_len, - prel->ev_byval, - prel->ev_align); - - /* Don't forget to close 'prel'! */ - close_pathman_relation_info(prel); - - PG_RETURN_ARRAYTYPE_P(arr); -} - - -/* - * ------------------------ - * Useful string builders - * ------------------------ - */ - -/* Build range condition for a CHECK CONSTRAINT. */ -Datum -build_range_condition(PG_FUNCTION_ARGS) -{ - Oid partition_relid; - char *expression; - Node *expr; + char *expr_p_cstr; - Bound min, - max; - Oid bounds_type = get_fn_expr_argtype(fcinfo->flinfo, 2); - Constraint *con; - char *result; + /* Good, let's use a cached parsed expression */ + expr_p_cstr = TextDatumGetCString(PG_GETARG_TEXT_P(ARG_EXPRESSION_P)); + expr_type = exprType(stringToNode(expr_p_cstr)); - if (!PG_ARGISNULL(0)) - { - partition_relid = PG_GETARG_OID(0); + /* Free both expressions */ + pfree(expr_p_cstr); + pfree(expr_cstr); } - else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("'partition_relid' should not be NULL"))); - if (!PG_ARGISNULL(1)) + /* + * NULL interval is fine for both HASH and RANGE. + * But for RANGE we need to make some additional checks. + */ + if (!PG_ARGISNULL(ARG_RANGE_INTERVAL)) { - expression = TextDatumGetCString(PG_GETARG_TEXT_P(1)); - } - else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("'expression' should not be NULL")));; - - min = PG_ARGISNULL(2) ? - MakeBoundInf(MINUS_INFINITY) : - MakeBound(PG_GETARG_DATUM(2)); - - max = PG_ARGISNULL(3) ? - MakeBoundInf(PLUS_INFINITY) : - MakeBound(PG_GETARG_DATUM(3)); - - expr = parse_partitioning_expression(partition_relid, expression, NULL, NULL); - con = build_range_check_constraint(partition_relid, - expr, - &min, &max, - bounds_type); - - result = deparse_constraint(partition_relid, con->raw_expr); - - PG_RETURN_TEXT_P(cstring_to_text(result)); -} - -/* Build name for sequence for auto partition naming */ -Datum -build_sequence_name(PG_FUNCTION_ARGS) -{ - Oid parent_relid = PG_GETARG_OID(0); - Oid parent_nsp; - char *seq_name; - char *result; + Datum interval_text = PG_GETARG_DATUM(ARG_RANGE_INTERVAL), + interval_value; + Oid interval_type; - if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(parent_relid))) - ereport(ERROR, (errmsg("relation \"%u\" does not exist", parent_relid))); + if (parttype == PT_HASH) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("interval should be NULL for HASH partitioned table"))); - parent_nsp = get_rel_namespace(parent_relid); - seq_name = build_sequence_name_relid_internal(parent_relid); + /* Try converting textual representation */ + interval_value = extract_binary_interval_from_text(interval_text, + expr_type, + &interval_type); - result = psprintf("%s.%s", - quote_identifier(get_namespace_name(parent_nsp)), - quote_identifier(seq_name)); + /* Check that interval isn't trivial */ + if (interval_is_trivial(expr_type, interval_value, interval_type)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("interval should not be trivial"))); + } - PG_RETURN_TEXT_P(cstring_to_text(result)); + PG_RETURN_BOOL(true); } - /* * Merge multiple partitions. * All data will be copied to the first one. @@ -806,196 +675,325 @@ merge_range_partitions(PG_FUNCTION_ARGS) PG_RETURN_OID(partition); } - /* - * Drops partition and expands the next partition - * so that it could cover the dropped one. - * - * This function was written in order to support - * Oracle-like ALTER TABLE ... DROP PARTITION. + * Drops partition and expands the next partition + * so that it could cover the dropped one. + * + * This function was written in order to support + * Oracle-like ALTER TABLE ... DROP PARTITION. + * + * In Oracle partitions only have upper bound and when partition + * is dropped the next one automatically covers freed range. + */ +Datum +drop_range_partition_expand_next(PG_FUNCTION_ARGS) +{ + Oid partition = PG_GETARG_OID(0), + parent; + PartRelationInfo *prel; + ObjectAddress object; + RangeEntry *ranges; + int i; + + /* Lock the partition we're going to drop */ + LockRelationOid(partition, AccessExclusiveLock); + + /* Check if partition exists */ + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(partition))) + elog(ERROR, "relation %u does not exist", partition); + + /* Get parent's relid */ + parent = get_parent_of_partition(partition); + + /* Prevent changes in partitioning scheme */ + LockRelationOid(parent, ShareUpdateExclusiveLock); + + /* Check if parent exists */ + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(parent))) + elog(ERROR, "relation \"%s\" is not a partition", + get_rel_name(partition)); + + /* Emit an error if it is not partitioned by RANGE */ + prel = get_pathman_relation_info(parent); + shout_if_prel_is_invalid(parent, prel, PT_RANGE); + + /* Fetch ranges array */ + ranges = PrelGetRangesArray(prel); + + /* Looking for partition in child relations */ + i = PrelHasPartition(prel, partition) - 1; + + /* Should have found it */ + Assert(i >= 0 && i < PrelChildrenCount(prel)); + + /* Expand next partition if it exists */ + if (i < PrelLastChild(prel)) + { + RangeEntry *cur = &ranges[i], + *next = &ranges[i + 1]; + Oid next_partition = next->child_oid; + LOCKMODE lockmode = AccessExclusiveLock; + + /* Lock next partition */ + LockRelationOid(next_partition, lockmode); + + /* Does next partition exist? */ + if (SearchSysCacheExists1(RELOID, ObjectIdGetDatum(next_partition))) + { + /* Stretch next partition to cover range */ + modify_range_constraint(next_partition, + prel->expr_cstr, + prel->ev_type, + &cur->min, + &next->max); + } + /* Bad luck, unlock missing partition */ + else UnlockRelationOid(next_partition, lockmode); + } + + /* Drop partition */ + ObjectAddressSet(object, RelationRelationId, partition); + performDeletion(&object, DROP_CASCADE, 0); + + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); + + PG_RETURN_VOID(); +} + + +/* + * ------------------------ + * Various useful getters + * ------------------------ + */ + +/* + * Returns range entry (min, max) (in form of array). + * + * arg #1 is the parent's Oid. + * arg #2 is the partition's Oid. + */ +Datum +get_part_range_by_oid(PG_FUNCTION_ARGS) +{ + Oid partition_relid, + parent_relid; + Oid arg_type; + RangeEntry *ranges; + PartRelationInfo *prel; + uint32 idx; + + if (!PG_ARGISNULL(0)) + { + partition_relid = PG_GETARG_OID(0); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'partition_relid' should not be NULL"))); + + parent_relid = get_parent_of_partition(partition_relid); + if (!OidIsValid(parent_relid)) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("relation \"%s\" is not a partition", + get_rel_name_or_relid(partition_relid)))); + + /* Emit an error if it is not partitioned by RANGE */ + prel = get_pathman_relation_info(parent_relid); + shout_if_prel_is_invalid(parent_relid, prel, PT_RANGE); + + /* Check type of 'dummy' (for correct output) */ + arg_type = get_fn_expr_argtype(fcinfo->flinfo, 1); + if (getBaseType(arg_type) != getBaseType(prel->ev_type)) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("pg_typeof(dummy) should be %s", + format_type_be(getBaseType(prel->ev_type))))); + + ranges = PrelGetRangesArray(prel); + + /* Look for the specified partition */ + if ((idx = PrelHasPartition(prel, partition_relid)) > 0) + { + ArrayType *arr; + Bound elems[2]; + + elems[0] = ranges[idx - 1].min; + elems[1] = ranges[idx - 1].max; + + arr = construct_bounds_array(elems, 2, + prel->ev_type, + prel->ev_len, + prel->ev_byval, + prel->ev_align); + + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); + + PG_RETURN_ARRAYTYPE_P(arr); + } + + /* No partition found, report error */ + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("relation \"%s\" has no partition \"%s\"", + get_rel_name_or_relid(parent_relid), + get_rel_name_or_relid(partition_relid)))); + + PG_RETURN_NULL(); /* keep compiler happy */ +} + +/* + * Returns N-th range entry (min, max) (in form of array). * - * In Oracle partitions only have upper bound and when partition - * is dropped the next one automatically covers freed range. + * arg #1 is the parent's Oid. + * arg #2 is the index of the range + * (if it is negative then the last range will be returned). */ Datum -drop_range_partition_expand_next(PG_FUNCTION_ARGS) +get_part_range_by_idx(PG_FUNCTION_ARGS) { - Oid partition = PG_GETARG_OID(0), - parent; - PartRelationInfo *prel; - ObjectAddress object; + Oid parent_relid; + int partition_idx = 0; + Oid arg_type; + Bound elems[2]; RangeEntry *ranges; - int i; - - /* Lock the partition we're going to drop */ - LockRelationOid(partition, AccessExclusiveLock); - - /* Check if partition exists */ - if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(partition))) - elog(ERROR, "relation %u does not exist", partition); - - /* Get parent's relid */ - parent = get_parent_of_partition(partition); + PartRelationInfo *prel; + ArrayType *arr; - /* Prevent changes in partitioning scheme */ - LockRelationOid(parent, ShareUpdateExclusiveLock); + if (!PG_ARGISNULL(0)) + { + parent_relid = PG_GETARG_OID(0); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'parent_relid' should not be NULL"))); - /* Check if parent exists */ - if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(parent))) - elog(ERROR, "relation \"%s\" is not a partition", - get_rel_name(partition)); + if (!PG_ARGISNULL(1)) + { + partition_idx = PG_GETARG_INT32(1); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'partition_idx' should not be NULL"))); /* Emit an error if it is not partitioned by RANGE */ - prel = get_pathman_relation_info(parent); - shout_if_prel_is_invalid(parent, prel, PT_RANGE); - - /* Fetch ranges array */ - ranges = PrelGetRangesArray(prel); + prel = get_pathman_relation_info(parent_relid); + shout_if_prel_is_invalid(parent_relid, prel, PT_RANGE); - /* Looking for partition in child relations */ - i = PrelHasPartition(prel, partition) - 1; + /* Check type of 'dummy' (for correct output) */ + arg_type = get_fn_expr_argtype(fcinfo->flinfo, 2); + if (getBaseType(arg_type) != getBaseType(prel->ev_type)) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("pg_typeof(dummy) should be %s", + format_type_be(getBaseType(prel->ev_type))))); - /* Should have found it */ - Assert(i >= 0 && i < PrelChildrenCount(prel)); - /* Expand next partition if it exists */ - if (i < PrelLastChild(prel)) + /* Now we have to deal with 'idx' */ + if (partition_idx < -1) { - RangeEntry *cur = &ranges[i], - *next = &ranges[i + 1]; - Oid next_partition = next->child_oid; - LOCKMODE lockmode = AccessExclusiveLock; + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("negative indices other than -1" + " (last partition) are not allowed"))); + } + else if (partition_idx == -1) + { + partition_idx = PrelLastChild(prel); + } + else if (((uint32) abs(partition_idx)) >= PrelChildrenCount(prel)) + { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("partition #%d does not exist (total amount is %u)", + partition_idx, PrelChildrenCount(prel)))); + } - /* Lock next partition */ - LockRelationOid(next_partition, lockmode); + ranges = PrelGetRangesArray(prel); - /* Does next partition exist? */ - if (SearchSysCacheExists1(RELOID, ObjectIdGetDatum(next_partition))) - { - /* Stretch next partition to cover range */ - modify_range_constraint(next_partition, - prel->expr_cstr, - prel->ev_type, - &cur->min, - &next->max); - } - /* Bad luck, unlock missing partition */ - else UnlockRelationOid(next_partition, lockmode); - } + /* Build args for construct_infinitable_array() */ + elems[0] = ranges[partition_idx].min; + elems[1] = ranges[partition_idx].max; - /* Drop partition */ - ObjectAddressSet(object, RelationRelationId, partition); - performDeletion(&object, DROP_CASCADE, 0); + arr = construct_bounds_array(elems, 2, + prel->ev_type, + prel->ev_len, + prel->ev_byval, + prel->ev_align); /* Don't forget to close 'prel'! */ close_pathman_relation_info(prel); - PG_RETURN_VOID(); + PG_RETURN_ARRAYTYPE_P(arr); } + /* - * Takes text representation of interval value and checks - * if it corresponds to partitioning expression. - * NOTE: throws an ERROR if it fails to convert text to Datum. + * ------------------------ + * Useful string builders + * ------------------------ */ + +/* Build range condition for a CHECK CONSTRAINT. */ Datum -validate_interval_value(PG_FUNCTION_ARGS) +build_range_condition(PG_FUNCTION_ARGS) { -#define ARG_PARTREL 0 -#define ARG_EXPRESSION 1 -#define ARG_PARTTYPE 2 -#define ARG_RANGE_INTERVAL 3 -#define ARG_EXPRESSION_P 4 - - Oid partrel; - PartType parttype; - char *expr_cstr; - Oid expr_type; - - if (PG_ARGISNULL(ARG_PARTREL)) - { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("'partrel' should not be NULL"))); - } - else partrel = PG_GETARG_OID(ARG_PARTREL); + Oid partition_relid; + char *expression; + Node *expr; - /* Check that relation exists */ - if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(partrel))) - elog(ERROR, "relation \"%u\" does not exist", partrel); + Bound min, + max; + Oid bounds_type = get_fn_expr_argtype(fcinfo->flinfo, 2); + Constraint *con; + char *result; - if (PG_ARGISNULL(ARG_EXPRESSION)) + if (!PG_ARGISNULL(0)) { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("'expression' should not be NULL"))); + partition_relid = PG_GETARG_OID(0); } - else expr_cstr = TextDatumGetCString(PG_GETARG_TEXT_P(ARG_EXPRESSION)); + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'partition_relid' should not be NULL"))); - if (PG_ARGISNULL(ARG_PARTTYPE)) + if (!PG_ARGISNULL(1)) { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("'parttype' should not be NULL"))); + expression = TextDatumGetCString(PG_GETARG_TEXT_P(1)); } - else parttype = DatumGetPartType(PG_GETARG_DATUM(ARG_PARTTYPE)); + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'expression' should not be NULL")));; - /* - * Fetch partitioning expression's type using - * either user's expression or parsed expression. - * - * NOTE: we check number of function's arguments - * in case of late updates (e.g. 1.1 => 1.4). - */ - if (PG_ARGISNULL(ARG_EXPRESSION_P) || PG_NARGS() <= ARG_EXPRESSION_P) - { - Datum expr_datum; + min = PG_ARGISNULL(2) ? + MakeBoundInf(MINUS_INFINITY) : + MakeBound(PG_GETARG_DATUM(2)); - /* We'll have to parse expression with our own hands */ - expr_datum = cook_partitioning_expression(partrel, expr_cstr, &expr_type); + max = PG_ARGISNULL(3) ? + MakeBoundInf(PLUS_INFINITY) : + MakeBound(PG_GETARG_DATUM(3)); - /* Free both expressions */ - pfree(DatumGetPointer(expr_datum)); - pfree(expr_cstr); - } - else - { - char *expr_p_cstr; + expr = parse_partitioning_expression(partition_relid, expression, NULL, NULL); + con = build_range_check_constraint(partition_relid, + expr, + &min, &max, + bounds_type); - /* Good, let's use a cached parsed expression */ - expr_p_cstr = TextDatumGetCString(PG_GETARG_TEXT_P(ARG_EXPRESSION_P)); - expr_type = exprType(stringToNode(expr_p_cstr)); + result = deparse_constraint(partition_relid, con->raw_expr); - /* Free both expressions */ - pfree(expr_p_cstr); - pfree(expr_cstr); - } + PG_RETURN_TEXT_P(cstring_to_text(result)); +} - /* - * NULL interval is fine for both HASH and RANGE. - * But for RANGE we need to make some additional checks. - */ - if (!PG_ARGISNULL(ARG_RANGE_INTERVAL)) - { - Datum interval_text = PG_GETARG_DATUM(ARG_RANGE_INTERVAL), - interval_value; - Oid interval_type; +/* Build name for sequence for auto partition naming */ +Datum +build_sequence_name(PG_FUNCTION_ARGS) +{ + Oid parent_relid = PG_GETARG_OID(0); + Oid parent_nsp; + char *seq_name; + char *result; - if (parttype == PT_HASH) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("interval should be NULL for HASH partitioned table"))); + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(parent_relid))) + ereport(ERROR, (errmsg("relation \"%u\" does not exist", parent_relid))); - /* Try converting textual representation */ - interval_value = extract_binary_interval_from_text(interval_text, - expr_type, - &interval_type); + parent_nsp = get_rel_namespace(parent_relid); + seq_name = build_sequence_name_relid_internal(parent_relid); - /* Check that interval isn't trivial */ - if (interval_is_trivial(expr_type, interval_value, interval_type)) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("interval should not be trivial"))); - } + result = psprintf("%s.%s", + quote_identifier(get_namespace_name(parent_nsp)), + quote_identifier(seq_name)); - PG_RETURN_BOOL(true); + PG_RETURN_TEXT_P(cstring_to_text(result)); } From 2559992db6c7c1f7f5ea738429348c837004570c Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 3 Jul 2018 16:22:48 +0300 Subject: [PATCH 272/528] rewrite split_range_partition() in C language --- expected/pathman_basic.out | 30 +++-- expected/pathman_domains.out | 2 +- expected/pathman_subpartitions.out | 2 +- range.sql | 112 ++--------------- sql/pathman_basic.sql | 1 + src/pl_range_funcs.c | 187 ++++++++++++++++++++++++++--- 6 files changed, 202 insertions(+), 132 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index b4b062d3..e9950470 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -910,7 +910,7 @@ NOTICE: drop cascades to 4 other objects SELECT pathman.split_range_partition('test.num_range_rel_1', 500); split_range_partition ----------------------- - {0,1000} + test.num_range_rel_5 (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 700; @@ -923,10 +923,18 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 70 Index Cond: (id <= 700) (5 rows) +SELECT tableoid::regclass, id FROM test.num_range_rel WHERE id IN (499, 500, 501) ORDER BY id; + tableoid | id +----------------------+----- + test.num_range_rel_1 | 499 + test.num_range_rel_5 | 500 + test.num_range_rel_5 | 501 +(3 rows) + SELECT pathman.split_range_partition('test.range_rel_1', '2015-01-15'::DATE); - split_range_partition -------------------------- - {01-01-2015,02-01-2015} + split_range_partition +----------------------- + test.range_rel_5 (1 row) /* Merge two partitions into one */ @@ -1207,7 +1215,7 @@ SELECT pathman.prepend_range_partition('test.zero', 'test.zero_prepended'); SELECT pathman.split_range_partition('test.zero_50', 60, 'test.zero_60'); split_range_partition ----------------------- - {50,70} + test."test.zero_60" (1 row) DROP TABLE test.zero CASCADE; @@ -1528,9 +1536,9 @@ SELECT pathman.merge_range_partitions('test."RangeRel_1"', 'test."RangeRel_' || (1 row) SELECT pathman.split_range_partition('test."RangeRel_1"', '2015-01-01'::DATE); - split_range_partition -------------------------- - {12-31-2014,01-02-2015} + split_range_partition +----------------------- + test."RangeRel_6" (1 row) DROP TABLE test."RangeRel" CASCADE; @@ -1598,9 +1606,9 @@ SELECT merge_range_partitions('test.range_rel_1', 'test.range_rel_2'); (1 row) SELECT split_range_partition('test.range_rel_1', '2010-02-15'::date); - split_range_partition -------------------------- - {01-01-2010,03-01-2010} + split_range_partition +----------------------- + test.range_rel_13 (1 row) SELECT append_range_partition('test.range_rel'); diff --git a/expected/pathman_domains.out b/expected/pathman_domains.out index e6fc43fe..41c8bfbb 100644 --- a/expected/pathman_domains.out +++ b/expected/pathman_domains.out @@ -47,7 +47,7 @@ SELECT merge_range_partitions('domains.dom_table_1', 'domains.dom_table_2'); SELECT split_range_partition('domains.dom_table_1', 50); split_range_partition ----------------------- - {1,201} + domains.dom_table_14 (1 row) INSERT INTO domains.dom_table VALUES(1101); diff --git a/expected/pathman_subpartitions.out b/expected/pathman_subpartitions.out index c5446c94..4dd5f5dd 100644 --- a/expected/pathman_subpartitions.out +++ b/expected/pathman_subpartitions.out @@ -306,7 +306,7 @@ ERROR: cannot split partition that has children SELECT split_range_partition('subpartitions.abc_2_2', 75); /* OK */ split_range_partition ----------------------- - {50,100} + subpartitions.abc_2_4 (1 row) SELECT subpartitions.partitions_tree('subpartitions.abc'); diff --git a/range.sql b/range.sql index 4b5c74a0..a014ed0f 100644 --- a/range.sql +++ b/range.sql @@ -294,104 +294,6 @@ END $$ LANGUAGE plpgsql; - -/* - * Split RANGE partition - */ -CREATE OR REPLACE FUNCTION @extschema@.split_range_partition( - partition_relid REGCLASS, - split_value ANYELEMENT, - partition_name TEXT DEFAULT NULL, - tablespace TEXT DEFAULT NULL, - OUT p_range ANYARRAY) -RETURNS ANYARRAY AS $$ -DECLARE - parent_relid REGCLASS; - part_type INTEGER; - part_expr TEXT; - part_expr_type REGTYPE; - check_name TEXT; - check_cond TEXT; - new_partition TEXT; - -BEGIN - parent_relid = @extschema@.get_parent_of_partition(partition_relid); - - PERFORM @extschema@.validate_relname(parent_relid); - PERFORM @extschema@.validate_relname(partition_relid); - - /* Acquire lock on parent's scheme */ - PERFORM @extschema@.prevent_part_modification(parent_relid); - - /* Acquire lock on partition's scheme */ - PERFORM @extschema@.prevent_part_modification(partition_relid); - - /* Acquire data modification lock (prevent further modifications) */ - PERFORM @extschema@.prevent_data_modification(partition_relid); - - /* Check that partition is not partitioned */ - if @extschema@.get_number_of_partitions(partition_relid) > 0 THEN - RAISE EXCEPTION 'cannot split partition that has children'; - END IF; - - part_expr_type = @extschema@.get_partition_key_type(parent_relid); - part_expr := @extschema@.get_partition_key(parent_relid); - part_type := @extschema@.get_partition_type(parent_relid); - - /* Check if this is a RANGE partition */ - IF part_type != 2 THEN - RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; - END IF; - - /* Get partition values range */ - EXECUTE format('SELECT @extschema@.get_part_range($1, NULL::%s)', - @extschema@.get_base_type(part_expr_type)::TEXT) - USING partition_relid - INTO p_range; - - IF p_range IS NULL THEN - RAISE EXCEPTION 'could not find specified partition'; - END IF; - - /* Check if value fit into the range */ - IF p_range[1] > split_value OR p_range[2] <= split_value - THEN - RAISE EXCEPTION 'specified value does not fit into the range [%, %)', - p_range[1], p_range[2]; - END IF; - - /* Create new partition */ - new_partition := @extschema@.create_single_range_partition(parent_relid, - split_value, - p_range[2], - partition_name, - tablespace); - - /* Copy data */ - check_cond := @extschema@.build_range_condition(new_partition::regclass, - part_expr, split_value, p_range[2]); - EXECUTE format('WITH part_data AS (DELETE FROM %s WHERE %s RETURNING *) - INSERT INTO %s SELECT * FROM part_data', - partition_relid::TEXT, - check_cond, - new_partition); - - /* Alter original partition */ - check_cond := @extschema@.build_range_condition(partition_relid::regclass, - part_expr, p_range[1], split_value); - check_name := @extschema@.build_check_constraint_name(partition_relid); - - EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', - partition_relid::TEXT, - check_name); - - EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', - partition_relid::TEXT, - check_name, - check_cond); -END -$$ LANGUAGE plpgsql; - /* * Append new partition. */ @@ -867,8 +769,18 @@ SET client_min_messages = WARNING; /* mute NOTICE message */ /* - * Merge multiple partitions. All data will be copied to the first one. - * The rest of partitions will be dropped. + * Split RANGE partition in two using a pivot. + */ +CREATE OR REPLACE FUNCTION @extschema@.split_range_partition( + partition_relid REGCLASS, + split_value ANYELEMENT, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS REGCLASS AS 'pg_pathman', 'split_range_partition' +LANGUAGE C; + +/* + * Merge RANGE partitions. */ CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions( variadic partitions REGCLASS[]) diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 11639852..3eb0afff 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -263,6 +263,7 @@ DROP TABLE test.hash_varchar CASCADE; /* Split first partition in half */ SELECT pathman.split_range_partition('test.num_range_rel_1', 500); EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 700; +SELECT tableoid::regclass, id FROM test.num_range_rel WHERE id IN (499, 500, 501) ORDER BY id; SELECT pathman.split_range_partition('test.range_rel_1', '2015-01-15'::DATE); diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 7fa00cf7..7d17d407 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -46,15 +46,16 @@ PG_FUNCTION_INFO_V1( create_single_range_partition_pl ); PG_FUNCTION_INFO_V1( create_range_partitions_internal ); PG_FUNCTION_INFO_V1( check_range_available_pl ); PG_FUNCTION_INFO_V1( generate_range_bounds_pl ); +PG_FUNCTION_INFO_V1( validate_interval_value ); +PG_FUNCTION_INFO_V1( split_range_partition ); +PG_FUNCTION_INFO_V1( merge_range_partitions ); +PG_FUNCTION_INFO_V1( drop_range_partition_expand_next ); PG_FUNCTION_INFO_V1( get_part_range_by_oid ); PG_FUNCTION_INFO_V1( get_part_range_by_idx ); PG_FUNCTION_INFO_V1( build_range_condition ); PG_FUNCTION_INFO_V1( build_sequence_name ); -PG_FUNCTION_INFO_V1( merge_range_partitions ); -PG_FUNCTION_INFO_V1( drop_range_partition_expand_next ); -PG_FUNCTION_INFO_V1( validate_interval_value ); static ArrayType *construct_bounds_array(Bound *elems, @@ -489,6 +490,162 @@ validate_interval_value(PG_FUNCTION_ARGS) PG_RETURN_BOOL(true); } +Datum +split_range_partition(PG_FUNCTION_ARGS) +{ + Oid parent = InvalidOid, + partition1, + partition2; + RangeVar *part_name = NULL; + char *tablespace_name = NULL; + + Datum pivot_value; + Oid pivot_type; + + PartRelationInfo *prel; + Bound min_bound, + max_bound, + split_bound; + + Snapshot fresh_snapshot; + FmgrInfo finfo; + SPIPlanPtr plan; + char *query; + int i; + + if (!PG_ARGISNULL(0)) + { + partition1 = PG_GETARG_OID(0); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'partition1' should not be NULL"))); + + if (!PG_ARGISNULL(1)) + { + pivot_value = PG_GETARG_DATUM(1); + pivot_type = get_fn_expr_argtype(fcinfo->flinfo, 1); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'split_value' should not be NULL"))); + + LockRelationOid(partition1, ExclusiveLock); + + /* Get parent of partition */ + parent = get_parent_of_partition(partition1); + if (!OidIsValid(parent)) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("relation \"%s\" is not a partition", + get_rel_name_or_relid(partition1)))); + + /* This partition should not have children */ + if (has_pathman_relation_info(partition1)) + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot split partition that has children"))); + + /* Prevent changes in partitioning scheme */ + LockRelationOid(parent, ShareUpdateExclusiveLock); + + /* Emit an error if it is not partitioned by RANGE */ + prel = get_pathman_relation_info(parent); + shout_if_prel_is_invalid(parent, prel, PT_RANGE); + + i = PrelHasPartition(prel, partition1) - 1; + Assert(i >= 0 && i < PrelChildrenCount(prel)); + + min_bound = PrelGetRangesArray(prel)[i].min; + max_bound = PrelGetRangesArray(prel)[i].max; + + split_bound = MakeBound(perform_type_cast(pivot_value, + getBaseType(pivot_type), + getBaseType(prel->ev_type), + NULL)); + + fmgr_info(prel->cmp_proc, &finfo); + + /* Validate pivot's value */ + if (cmp_bounds(&finfo, prel->ev_collid, &split_bound, &min_bound) <= 0 || + cmp_bounds(&finfo, prel->ev_collid, &split_bound, &max_bound) >= 0) + { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("specified value does not fit into the range (%s, %s)", + BoundToCString(&min_bound, prel->ev_type), + BoundToCString(&max_bound, prel->ev_type)))); + } + + if (!PG_ARGISNULL(2)) + { + part_name = makeRangeVar(get_namespace_name(get_rel_namespace(parent)), + TextDatumGetCString(PG_GETARG_DATUM(2)), + 0); + } + + if (!PG_ARGISNULL(3)) + { + tablespace_name = TextDatumGetCString(PG_GETARG_DATUM(3)); + } + + /* Create a new partition */ + partition2 = create_single_range_partition_internal(parent, + &split_bound, + &max_bound, + prel->ev_type, + part_name, + tablespace_name); + + /* Make constraint visible */ + CommandCounterIncrement(); + + if (SPI_connect() != SPI_OK_CONNECT) + elog(ERROR, "could not connect using SPI"); + + /* + * Get latest snapshot to see data that might have been + * added to partitions before this transaction has started, + * but was committed a moment before we acquired the locks. + */ + fresh_snapshot = RegisterSnapshot(GetLatestSnapshot()); + + query = psprintf("WITH part_data AS ( " + "DELETE FROM %1$s WHERE (%3$s) >= $1 RETURNING " + "*) " + "INSERT INTO %2$s SELECT * FROM part_data", + get_qualified_rel_name(partition1), + get_qualified_rel_name(partition2), + prel->expr_cstr); + + plan = SPI_prepare(query, 1, &prel->ev_type); + + if (!plan) + elog(ERROR, "%s: SPI_prepare returned %d", + __FUNCTION__, SPI_result); + + SPI_execute_snapshot(plan, + &split_bound.value, NULL, + fresh_snapshot, + InvalidSnapshot, + false, true, 0); + + /* Free snapshot */ + UnregisterSnapshot(fresh_snapshot); + + SPI_finish(); + + /* Drop old constraint and create a new one */ + modify_range_constraint(partition1, + prel->expr_cstr, + prel->ev_type, + &min_bound, + &split_bound); + + /* Make constraint visible */ + CommandCounterIncrement(); + + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); + + PG_RETURN_OID(partition2); +} + /* * Merge multiple partitions. * All data will be copied to the first one. @@ -565,7 +722,7 @@ merge_range_partitions(PG_FUNCTION_ARGS) errdetail("all relations must share the same parent"))); } - /* Lock parent till transaction's end */ + /* Prevent changes in partitioning scheme */ LockRelationOid(parent, ShareUpdateExclusiveLock); /* Emit an error if it is not partitioned by RANGE */ @@ -632,9 +789,9 @@ merge_range_partitions(PG_FUNCTION_ARGS) ObjectAddress object; char *query = psprintf("WITH part_data AS ( " - "DELETE FROM %s RETURNING " + "DELETE FROM %1$s RETURNING " "*) " - "INSERT INTO %s SELECT * FROM part_data", + "INSERT INTO %2$s SELECT * FROM part_data", get_qualified_rel_name(parts[i]), get_qualified_rel_name(parts[0])); @@ -642,8 +799,7 @@ merge_range_partitions(PG_FUNCTION_ARGS) if (!plan) elog(ERROR, "%s: SPI_prepare returned %d", - CppAsString(merge_range_partitions), - SPI_result); + __FUNCTION__, SPI_result); SPI_execute_snapshot(plan, NULL, NULL, fresh_snapshot, @@ -698,21 +854,16 @@ drop_range_partition_expand_next(PG_FUNCTION_ARGS) /* Lock the partition we're going to drop */ LockRelationOid(partition, AccessExclusiveLock); - /* Check if partition exists */ - if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(partition))) - elog(ERROR, "relation %u does not exist", partition); - /* Get parent's relid */ parent = get_parent_of_partition(partition); + if (!OidIsValid(parent)) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("relation \"%s\" is not a partition", + get_rel_name_or_relid(partition)))); /* Prevent changes in partitioning scheme */ LockRelationOid(parent, ShareUpdateExclusiveLock); - /* Check if parent exists */ - if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(parent))) - elog(ERROR, "relation \"%s\" is not a partition", - get_rel_name(partition)); - /* Emit an error if it is not partitioned by RANGE */ prel = get_pathman_relation_info(parent); shout_if_prel_is_invalid(parent, prel, PT_RANGE); @@ -722,8 +873,6 @@ drop_range_partition_expand_next(PG_FUNCTION_ARGS) /* Looking for partition in child relations */ i = PrelHasPartition(prel, partition) - 1; - - /* Should have found it */ Assert(i >= 0 && i < PrelChildrenCount(prel)); /* Expand next partition if it exists */ From fa874e99a0f128341641281a5900e7ec921e6829 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 3 Jul 2018 16:30:13 +0300 Subject: [PATCH 273/528] disabled cppcheck-based builds --- .travis.yml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/.travis.yml b/.travis.yml index 3ca602c2..265ac48d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -18,14 +18,10 @@ script: env: - DOCKER_IMAGE=pathman/pg95_clang_check_code - - DOCKER_IMAGE=pathman/pg95_cppcheck - DOCKER_IMAGE=pathman/pg95_pathman_tests - DOCKER_IMAGE=pathman/pg96_clang_check_code - - DOCKER_IMAGE=pathman/pg96_cppcheck - DOCKER_IMAGE=pathman/pg96_pathman_tests - DOCKER_IMAGE=pathman/pg10_clang_check_code - - DOCKER_IMAGE=pathman/pg10_cppcheck - DOCKER_IMAGE=pathman/pg10_pathman_tests - DOCKER_IMAGE=pathman/pg10_ca_clang_check_code - - DOCKER_IMAGE=pathman/pg10_ca_cppcheck - DOCKER_IMAGE=pathman/pg10_ca_pathman_tests From f87a871d55510c16f4b6dfde897e2f8c397399af Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 3 Jul 2018 16:43:33 +0300 Subject: [PATCH 274/528] fix python-based tests --- tests/python/partitioning_test.py | 554 ------------------------------ 1 file changed, 554 deletions(-) mode change 100755 => 100644 tests/python/partitioning_test.py diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py old mode 100755 new mode 100644 index 27ad6613..12475b9e --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -15,48 +15,6 @@ import threading import time import unittest -<<<<<<< HEAD - -from distutils.version import LooseVersion -from testgres import get_new_node, get_bin_path, get_pg_version - -# set setup base logging config, it can be turned on by `use_logging` -# parameter on node setup - -import logging -import logging.config - -logfile = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'tests.log') -LOG_CONFIG = { - 'version': 1, - 'handlers': { - 'console': { - 'class': 'logging.StreamHandler', - 'formatter': 'base_format', - 'level': logging.DEBUG, - }, - 'file': { - 'class': 'logging.FileHandler', - 'filename': logfile, - 'formatter': 'base_format', - 'level': logging.DEBUG, - }, - }, - 'formatters': { - 'base_format': { - 'format': '%(node)-5s: %(message)s', - }, - }, - 'root': { - 'handlers': ('file', ), - 'level': 'DEBUG', - }, -} - -logging.config.dictConfig(LOG_CONFIG) -version = LooseVersion(get_pg_version()) - -======= import functools from distutils.version import LooseVersion @@ -99,7 +57,6 @@ logging.config.dictConfig(LOG_CONFIG) version = LooseVersion(get_pg_version()) ->>>>>>> master # Helper function for json equality def ordered(obj, skip_keys=None): @@ -112,18 +69,6 @@ def ordered(obj, skip_keys=None): return obj -<<<<<<< HEAD -def if_fdw_enabled(func): - """ To run tests with FDW support, set environment variable TEST_FDW=1 """ - - def wrapper(*args, **kwargs): - if os.environ.get('FDW_DISABLED') != '1': - func(*args, **kwargs) - else: - print('Warning: FDW features tests are disabled, skipping...') - - return wrapper -======= # Check if postgres_fdw is available @functools.lru_cache(maxsize=1) def is_postgres_fdw_ready(): @@ -136,7 +81,6 @@ def is_postgres_fdw_ready(): return True return False ->>>>>>> master class Tests(unittest.TestCase): @@ -145,25 +89,6 @@ def set_trace(self, con, command="pg_debug"): p = subprocess.Popen([command], stdin=subprocess.PIPE) p.communicate(str(pid).encode()) -<<<<<<< HEAD - def start_new_pathman_cluster(self, - name='test', - allow_streaming=False, - test_data=False): - node = get_new_node(name) - node.init(allow_streaming=allow_streaming) - node.append_conf("postgresql.conf", "shared_preload_libraries='pg_pathman'\n") - node.start() - node.psql('postgres', 'create extension pg_pathman') - if test_data: - cmds = ( - "create table abc(id serial, t text)", - "insert into abc select generate_series(1, 300000)", - "select create_hash_partitions('abc', 'id', 3, partition_data := false)", - ) - for cmd in cmds: - node.safe_psql('postgres', cmd) -======= def start_new_pathman_cluster(self, allow_streaming=False, test_data=False): node = get_new_node() node.init(allow_streaming=allow_streaming) @@ -179,7 +104,6 @@ def start_new_pathman_cluster(self, allow_streaming=False, test_data=False): """) node.safe_psql('vacuum analyze') ->>>>>>> master return node @@ -187,29 +111,17 @@ def test_concurrent(self): """ Test concurrent partitioning """ with self.start_new_pathman_cluster(test_data=True) as node: -<<<<<<< HEAD - node.psql('postgres', "select partition_table_concurrently('abc')") - - while True: - # update some rows to check for deadlocks - node.safe_psql('postgres', """ -======= node.psql("select partition_table_concurrently('abc')") while True: # update some rows to check for deadlocks node.safe_psql(""" ->>>>>>> master update abc set t = 'test' where id in (select (random() * 300000)::int from generate_series(1, 3000)) """) -<<<<<<< HEAD - count = node.execute('postgres', """ -======= count = node.execute(""" ->>>>>>> master select count(*) from pathman_concurrent_part_tasks """) @@ -218,15 +130,9 @@ def test_concurrent(self): break time.sleep(1) -<<<<<<< HEAD - data = node.execute('postgres', 'select count(*) from only abc') - self.assertEqual(data[0][0], 0) - data = node.execute('postgres', 'select count(*) from abc') -======= data = node.execute('select count(*) from only abc') self.assertEqual(data[0][0], 0) data = node.execute('select count(*) from abc') ->>>>>>> master self.assertEqual(data[0][0], 300000) node.stop() @@ -234,47 +140,22 @@ def test_replication(self): """ Test how pg_pathman works with replication """ with self.start_new_pathman_cluster(allow_streaming=True, test_data=True) as node: -<<<<<<< HEAD - with node.replicate('node2') as replica: -======= with node.replicate() as replica: ->>>>>>> master replica.start() replica.catchup() # check that results are equal self.assertEqual( -<<<<<<< HEAD - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc')) - - # enable parent and see if it is enabled in replica - node.psql('postgres', "select enable_parent('abc')") -======= node.psql('explain (costs off) select * from abc'), replica.psql('explain (costs off) select * from abc')) # enable parent and see if it is enabled in replica node.psql("select enable_parent('abc')") ->>>>>>> master # wait until replica catches up replica.catchup() self.assertEqual( -<<<<<<< HEAD - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc')) - self.assertEqual( - node.psql('postgres', 'select * from abc'), - replica.psql('postgres', 'select * from abc')) - self.assertEqual( - node.execute('postgres', 'select count(*) from abc')[0][0], 300000) - - # check that UPDATE in pathman_config_params invalidates cache - node.psql('postgres', - 'update pathman_config_params set enable_parent = false') -======= node.psql('explain (costs off) select * from abc'), replica.psql('explain (costs off) select * from abc')) self.assertEqual( @@ -285,21 +166,11 @@ def test_replication(self): # check that UPDATE in pathman_config_params invalidates cache node.psql('update pathman_config_params set enable_parent = false') ->>>>>>> master # wait until replica catches up replica.catchup() self.assertEqual( -<<<<<<< HEAD - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc')) - self.assertEqual( - node.psql('postgres', 'select * from abc'), - replica.psql('postgres', 'select * from abc')) - self.assertEqual( - node.execute('postgres', 'select count(*) from abc')[0][0], 0) -======= node.psql('explain (costs off) select * from abc'), replica.psql('explain (costs off) select * from abc')) self.assertEqual( @@ -307,7 +178,6 @@ def test_replication(self): replica.psql('select * from abc')) self.assertEqual( node.execute('select count(*) from abc')[0][0], 0) ->>>>>>> master def test_locks(self): """ @@ -337,39 +207,22 @@ def add_partition(node, flag, query): We expect that this query will wait until another session commits or rolls back """ -<<<<<<< HEAD - node.safe_psql('postgres', query) -======= node.safe_psql(query) ->>>>>>> master with lock: flag.set(True) # Initialize master server -<<<<<<< HEAD - with get_new_node('master') as node: - node.init() - node.append_conf("postgresql.conf", "shared_preload_libraries='pg_pathman'\n") - node.start() - sql = """ -======= with get_new_node() as node: node.init() node.append_conf("shared_preload_libraries='pg_pathman'") node.start() node.safe_psql(""" ->>>>>>> master create extension pg_pathman; create table abc(id serial, t text); insert into abc select generate_series(1, 100000); select create_range_partitions('abc', 'id', 1, 50000); -<<<<<<< HEAD - """ - node.safe_psql('postgres', sql) -======= """) ->>>>>>> master # Start transaction that will create partition with node.connect() as con: @@ -381,13 +234,9 @@ def add_partition(node, flag, query): query = ( "select prepend_range_partition('abc')", "select append_range_partition('abc')", -<<<<<<< HEAD - "select add_range_partition('abc', 500000, 550000)", ) -======= "select add_range_partition('abc', 500000, 550000)", ) ->>>>>>> master threads = [] for i in range(3): thread = threading.Thread( @@ -396,11 +245,7 @@ def add_partition(node, flag, query): thread.start() time.sleep(3) -<<<<<<< HEAD - # This threads should wait until current transaction finished -======= # These threads should wait until current transaction finished ->>>>>>> master with lock: for i in range(3): self.assertEqual(flags[i].get(), False) @@ -422,10 +267,6 @@ def add_partition(node, flag, query): # Check that all partitions are created self.assertEqual( node.safe_psql( -<<<<<<< HEAD - 'postgres', -======= ->>>>>>> master "select count(*) from pg_inherits where inhparent='abc'::regclass"), b'6\n') @@ -433,68 +274,21 @@ def test_tablespace(self): """ Check tablespace support """ def check_tablespace(node, tablename, tablespace): -<<<<<<< HEAD - res = node.execute('postgres', - "select get_tablespace('{}')".format(tablename)) -======= res = node.execute("select get_tablespace('{}')".format(tablename)) ->>>>>>> master if len(res) == 0: return False return res[0][0] == tablespace -<<<<<<< HEAD - with get_new_node('master') as node: - node.init() - node.append_conf('postgresql.conf', - "shared_preload_libraries='pg_pathman'\n") - node.start() - node.psql('postgres', 'create extension pg_pathman') -======= with get_new_node() as node: node.init() node.append_conf("shared_preload_libraries='pg_pathman'") node.start() node.psql('create extension pg_pathman') ->>>>>>> master # create tablespace path = os.path.join(node.data_dir, 'test_space_location') os.mkdir(path) -<<<<<<< HEAD - node.psql('postgres', - "create tablespace test_space location '{}'".format(path)) - - # create table in this tablespace - node.psql('postgres', - 'create table abc(a serial, b int) tablespace test_space') - - # create three partitions. Excpect that they will be created in the - # same tablespace as the parent table - node.psql('postgres', - "select create_range_partitions('abc', 'a', 1, 10, 3)") - self.assertTrue(check_tablespace(node, 'abc', 'test_space')) - - # check tablespace for appended partition - node.psql('postgres', - "select append_range_partition('abc', 'abc_appended')") - self.assertTrue(check_tablespace(node, 'abc_appended', 'test_space')) - - # check tablespace for prepended partition - node.psql('postgres', - "select prepend_range_partition('abc', 'abc_prepended')") - self.assertTrue(check_tablespace(node, 'abc_prepended', 'test_space')) - - # check tablespace for prepended partition - node.psql('postgres', - "select add_range_partition('abc', 41, 51, 'abc_added')") - self.assertTrue(check_tablespace(node, 'abc_added', 'test_space')) - - # check tablespace for split - node.psql('postgres', - "select split_range_partition('abc_added', 45, 'abc_splitted')") -======= node.psql("create tablespace test_space location '{}'".format(path)) # create table in this tablespace @@ -519,36 +313,19 @@ def check_tablespace(node, tablename, tablespace): # check tablespace for split node.psql("select split_range_partition('abc_added', 45, 'abc_splitted')") ->>>>>>> master self.assertTrue(check_tablespace(node, 'abc_splitted', 'test_space')) # now let's specify tablespace explicitly node.psql( -<<<<<<< HEAD - 'postgres', "select append_range_partition('abc', 'abc_appended_2', 'pg_default')" ) node.psql( - 'postgres', "select prepend_range_partition('abc', 'abc_prepended_2', 'pg_default')" ) node.psql( - 'postgres', "select add_range_partition('abc', 61, 71, 'abc_added_2', 'pg_default')" ) node.psql( - 'postgres', -======= - "select append_range_partition('abc', 'abc_appended_2', 'pg_default')" - ) - node.psql( - "select prepend_range_partition('abc', 'abc_prepended_2', 'pg_default')" - ) - node.psql( - "select add_range_partition('abc', 61, 71, 'abc_added_2', 'pg_default')" - ) - node.psql( ->>>>>>> master "select split_range_partition('abc_added_2', 65, 'abc_splitted_2', 'pg_default')" ) @@ -558,25 +335,11 @@ def check_tablespace(node, tablename, tablespace): self.assertTrue(check_tablespace(node, 'abc_added_2', 'pg_default')) self.assertTrue(check_tablespace(node, 'abc_splitted_2', 'pg_default')) -<<<<<<< HEAD - @if_fdw_enabled -======= @unittest.skipUnless(is_postgres_fdw_ready(), 'might be missing') ->>>>>>> master def test_foreign_table(self): """ Test foreign tables """ # Start master server -<<<<<<< HEAD - with get_new_node('test') as master, get_new_node('fserv') as fserv: - master.init() - master.append_conf('postgresql.conf', """ - shared_preload_libraries='pg_pathman, postgres_fdw'\n - """) - master.start() - master.psql('postgres', 'create extension pg_pathman') - master.psql('postgres', 'create extension postgres_fdw') -======= with get_new_node() as master, get_new_node() as fserv: master.init() master.append_conf(""" @@ -585,7 +348,6 @@ def test_foreign_table(self): master.start() master.psql('create extension pg_pathman') master.psql('create extension postgres_fdw') ->>>>>>> master # RANGE partitioning test with FDW: # - create range partitioned table in master @@ -594,26 +356,12 @@ def test_foreign_table(self): # - attach foreign table to partitioned one # - try inserting data into foreign partition via parent # - drop partitions -<<<<<<< HEAD - master.psql('postgres', """ -======= master.psql(""" ->>>>>>> master create table abc(id serial, name text); select create_range_partitions('abc', 'id', 0, 10, 2) """) # Current user name (needed for user mapping) -<<<<<<< HEAD - username = master.execute('postgres', 'select current_user')[0][0] - - fserv.init().start() - fserv.safe_psql('postgres', "create table ftable(id serial, name text)") - fserv.safe_psql('postgres', "insert into ftable values (25, 'foreign')") - - # Create foreign table and attach it to partitioned table - master.safe_psql('postgres', """ -======= username = master.execute('select current_user')[0][0] fserv.init().start() @@ -622,52 +370,26 @@ def test_foreign_table(self): # Create foreign table and attach it to partitioned table master.safe_psql(""" ->>>>>>> master create server fserv foreign data wrapper postgres_fdw options (dbname 'postgres', host '127.0.0.1', port '{}') """.format(fserv.port)) -<<<<<<< HEAD - master.safe_psql('postgres', """ -======= master.safe_psql(""" ->>>>>>> master create user mapping for {0} server fserv options (user '{0}') """.format(username)) -<<<<<<< HEAD - master.safe_psql('postgres', """ -======= master.safe_psql(""" ->>>>>>> master import foreign schema public limit to (ftable) from server fserv into public """) master.safe_psql( -<<<<<<< HEAD - 'postgres', -======= ->>>>>>> master "select attach_range_partition('abc', 'ftable', 20, 30)") # Check that table attached to partitioned table self.assertEqual( -<<<<<<< HEAD - master.safe_psql('postgres', 'select * from ftable'), - b'25|foreign\n') - - # Check that we can successfully insert new data into foreign partition - master.safe_psql('postgres', "insert into abc values (26, 'part')") - self.assertEqual( - master.safe_psql('postgres', 'select * from ftable order by id'), - b'25|foreign\n26|part\n') - - # Testing drop partitions (including foreign partitions) - master.safe_psql('postgres', "select drop_partitions('abc')") -======= master.safe_psql('select * from ftable'), b'25|foreign\n') @@ -679,7 +401,6 @@ def test_foreign_table(self): # Testing drop partitions (including foreign partitions) master.safe_psql("select drop_partitions('abc')") ->>>>>>> master # HASH partitioning with FDW: # - create hash partitioned table in master @@ -687,31 +408,6 @@ def test_foreign_table(self): # - replace local partition with foreign one # - insert data # - drop partitions -<<<<<<< HEAD - master.psql('postgres', """ - create table hash_test(id serial, name text); - select create_hash_partitions('hash_test', 'id', 2) - """) - fserv.safe_psql('postgres', - 'create table f_hash_test(id serial, name text)') - - master.safe_psql('postgres', """ - import foreign schema public limit to (f_hash_test) - from server fserv into public - """) - master.safe_psql('postgres', """ - select replace_hash_partition('hash_test_1', 'f_hash_test') - """) - master.safe_psql('postgres', - 'insert into hash_test select generate_series(1,10)') - - self.assertEqual( - master.safe_psql('postgres', 'select * from hash_test'), - b'1|\n2|\n5|\n6|\n8|\n9|\n3|\n4|\n7|\n10|\n') - master.safe_psql('postgres', "select drop_partitions('hash_test')") - - @if_fdw_enabled -======= master.psql(""" create table hash_test(id serial, name text); select create_hash_partitions('hash_test', 'id', 2) @@ -733,23 +429,14 @@ def test_foreign_table(self): master.safe_psql("select drop_partitions('hash_test')") @unittest.skipUnless(is_postgres_fdw_ready(), 'might be missing') ->>>>>>> master def test_parallel_nodes(self): """ Test parallel queries under partitions """ # Init and start postgres instance with preload pg_pathman module -<<<<<<< HEAD - with get_new_node('test') as node: - node.init() - node.append_conf( - 'postgresql.conf', - "shared_preload_libraries='pg_pathman, postgres_fdw'\n") -======= with get_new_node() as node: node.init() node.append_conf( "shared_preload_libraries='pg_pathman, postgres_fdw'") ->>>>>>> master node.start() # Check version of postgres server @@ -758,13 +445,8 @@ def test_parallel_nodes(self): return # Prepare test database -<<<<<<< HEAD - node.psql('postgres', 'create extension pg_pathman') - node.psql('postgres', """ -======= node.psql('create extension pg_pathman') node.psql(""" ->>>>>>> master create table range_partitioned as select generate_series(1, 1e4::integer) i; @@ -779,15 +461,9 @@ def test_parallel_nodes(self): """) # create statistics for both partitioned tables -<<<<<<< HEAD - node.psql('postgres', 'vacuum analyze') - - node.psql('postgres', """ -======= node.psql('vacuum analyze') node.psql(""" ->>>>>>> master create or replace function query_plan(query text) returns jsonb as $$ declare @@ -945,15 +621,9 @@ def test_parallel_nodes(self): self.assertEqual(ordered(plan), ordered(expected)) # Remove all objects for testing -<<<<<<< HEAD - node.psql('postgres', 'drop table range_partitioned cascade') - node.psql('postgres', 'drop table hash_partitioned cascade') - node.psql('postgres', 'drop extension pg_pathman cascade') -======= node.psql('drop table range_partitioned cascade') node.psql('drop table hash_partitioned cascade') node.psql('drop extension pg_pathman cascade') ->>>>>>> master def test_conc_part_drop_runtime_append(self): """ Test concurrent partition drop + SELECT (RuntimeAppend) """ @@ -1196,225 +866,6 @@ def con2_thread(): self.assertEqual(str(rows[0][1]), 'ins_test_1') def test_pg_dump(self): -<<<<<<< HEAD - """ - Test using dump and restore of partitioned table through pg_dump and pg_restore tools. - - Test strategy: - - test range and hash partitioned tables; - - for each partitioned table check on restorable side the following quantities: - * constraints related to partitioning; - * init callback function and enable parent flag; - * number of rows in parent and child tables; - * plan validity of simple SELECT query under partitioned table; - - check dumping using the following parameters of pg_dump: - * format = plain | custom; - * using of inserts and copy. - - all test cases are carried out on tables half-full with data located in parent part, - the rest of data - in child tables. - """ - - # Init and start postgres instance with preload pg_pathman module - with get_new_node('test') as node: - node.init() - node.append_conf('postgresql.conf', """ - shared_preload_libraries='pg_pathman' - pg_pathman.override_copy=false - """) - node.start() - - # Init two databases: initial and copy - node.psql('postgres', 'create database initial') - node.psql('postgres', 'create database copy') - node.psql('initial', 'create extension pg_pathman') - - # Create and fillin partitioned table in initial database - with node.connect('initial') as con: - - # create and initailly fillin tables - con.execute('create table range_partitioned (i integer not null)') - con.execute( - 'insert into range_partitioned select i from generate_series(1, 500) i' - ) - con.execute('create table hash_partitioned (i integer not null)') - con.execute( - 'insert into hash_partitioned select i from generate_series(1, 500) i' - ) - - # partition table keeping data in base table - # enable_parent parameter automatically becames true - con.execute( - "select create_range_partitions('range_partitioned', 'i', 1, 200, partition_data := false)" - ) - con.execute( - "select create_hash_partitions('hash_partitioned', 'i', 5, false)" - ) - - # fillin child tables with remain data - con.execute( - 'insert into range_partitioned select i from generate_series(501, 1000) i' - ) - con.execute( - 'insert into hash_partitioned select i from generate_series(501, 1000) i' - ) - - # set init callback - con.execute(""" - create or replace function init_partition_stub_callback(args jsonb) - returns void as $$ - begin - end - $$ language plpgsql; - """) - con.execute( - "select set_init_callback('range_partitioned', 'init_partition_stub_callback(jsonb)')" - ) - con.execute( - "select set_init_callback('hash_partitioned', 'init_partition_stub_callback(jsonb)')" - ) - - # turn off enable_parent option - con.execute( - "select set_enable_parent('range_partitioned', false)") - con.execute("select set_enable_parent('hash_partitioned', false)") - con.commit() - - # compare strategies - CMP_OK, PLANS_MISMATCH, CONTENTS_MISMATCH = range(3) - - def cmp_full(con1, con2): - """ - Compare selection partitions in plan - and contents in partitioned tables - """ - - plan_query = 'explain (costs off, format json) select * from %s' - content_query = 'select * from %s order by i' - table_refs = [ - 'range_partitioned', 'only range_partitioned', - 'hash_partitioned', 'only hash_partitioned' - ] - for table_ref in table_refs: - plan_initial = con1.execute( - plan_query % table_ref)[0][0][0]['Plan'] - plan_copy = con2.execute( - plan_query % table_ref)[0][0][0]['Plan'] - if ordered(plan_initial) != ordered(plan_copy): - return PLANS_MISMATCH - - content_initial = [ - x[0] for x in con1.execute(content_query % table_ref) - ] - content_copy = [ - x[0] for x in con2.execute(content_query % table_ref) - ] - if content_initial != content_copy: - return CONTENTS_MISMATCH - - return CMP_OK - - def turnoff_pathman(node): - node.psql('initial', 'alter system set pg_pathman.enable to off') - node.reload() - - def turnon_pathman(node): - node.psql('initial', 'alter system set pg_pathman.enable to on') - node.psql('copy', 'alter system set pg_pathman.enable to on') - node.psql('initial', - 'alter system set pg_pathman.override_copy to off') - node.psql('copy', - 'alter system set pg_pathman.override_copy to off') - node.reload() - - # Test dump/restore from init database to copy functionality - test_params = [ - (None, None, [ - get_bin_path("pg_dump"), "-p {}".format(node.port), - "initial" - ], [get_bin_path("psql"), "-p {}".format(node.port), "copy"], - cmp_full), # dump as plain text and restore via COPY - (turnoff_pathman, turnon_pathman, [ - get_bin_path("pg_dump"), "-p {}".format(node.port), - "--inserts", "initial" - ], [get_bin_path("psql"), "-p {}".format(node.port), "copy"], - cmp_full), # dump as plain text and restore via INSERTs - (None, None, [ - get_bin_path("pg_dump"), "-p {}".format(node.port), - "--format=custom", "initial" - ], [ - get_bin_path("pg_restore"), "-p {}".format(node.port), - "--dbname=copy" - ], cmp_full), # dump in archive format - ] - - with open(os.devnull, 'w') as fnull: - for preproc, postproc, pg_dump_params, pg_restore_params, cmp_dbs in test_params: - - dump_restore_cmd = " | ".join((' '.join(pg_dump_params), - ' '.join(pg_restore_params))) - - if (preproc is not None): - preproc(node) - - # transfer and restore data - p1 = subprocess.Popen(pg_dump_params, stdout=subprocess.PIPE) - stdoutdata, _ = p1.communicate() - p2 = subprocess.Popen( - pg_restore_params, - stdin=subprocess.PIPE, - stdout=fnull, - stderr=fnull) - p2.communicate(input=stdoutdata) - - if (postproc is not None): - postproc(node) - - # validate data - with node.connect('initial') as con1, \ - node.connect('copy') as con2: - - # compare plans and contents of initial and copy - cmp_result = cmp_dbs(con1, con2) - self.assertNotEqual( - cmp_result, PLANS_MISMATCH, - "mismatch in plans of select query on partitioned tables under the command: %s" - % dump_restore_cmd) - self.assertNotEqual( - cmp_result, CONTENTS_MISMATCH, - "mismatch in contents of partitioned tables under the command: %s" - % dump_restore_cmd) - - # compare enable_parent flag and callback function - config_params_query = """ - select partrel, enable_parent, init_callback from pathman_config_params - """ - config_params_initial, config_params_copy = {}, {} - for row in con1.execute(config_params_query): - config_params_initial[row[0]] = row[1:] - for row in con2.execute(config_params_query): - config_params_copy[row[0]] = row[1:] - self.assertEqual(config_params_initial, config_params_copy, - "mismatch in pathman_config_params under the command: %s" % dump_restore_cmd) - - # compare constraints on each partition - constraints_query = """ - select r.relname, c.conname, c.consrc from - pg_constraint c join pg_class r on c.conrelid=r.oid - where relname similar to '(range|hash)_partitioned_\d+' - """ - constraints_initial, constraints_copy = {}, {} - for row in con1.execute(constraints_query): - constraints_initial[row[0]] = row[1:] - for row in con2.execute(constraints_query): - constraints_copy[row[0]] = row[1:] - self.assertEqual(constraints_initial, constraints_copy, - "mismatch in partitions' constraints under the command: %s" % dump_restore_cmd) - - # clear copy database - node.psql('copy', 'drop schema public cascade') - node.psql('copy', 'create schema public') - node.psql('copy', 'drop extension pg_pathman cascade') -======= with self.start_new_pathman_cluster() as node: node.safe_psql('create database copy') @@ -1453,8 +904,6 @@ def turnon_pathman(node): p2 = node.execute('copy', 'select * from pathman_partition_list') self.assertEqual(sorted(p1), sorted(p2)) ->>>>>>> master - def test_concurrent_detach(self): """ Test concurrent detach partition with contiguous @@ -1537,7 +986,6 @@ def test_concurrent_detach(self): Race condition between detach and concurrent inserts with append partition is expired """) -<<<<<<< HEAD def test_update_node_plan1(self): ''' @@ -1614,8 +1062,6 @@ def test_update_node_plan1(self): node.psql('postgres', 'DROP SCHEMA test_update_node CASCADE;') node.psql('postgres', 'DROP EXTENSION pg_pathman CASCADE;') -======= ->>>>>>> master if __name__ == "__main__": From f6610e696f97f05e68a1ed12dbd05d76d6a96629 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 3 Jul 2018 23:16:34 +0300 Subject: [PATCH 275/528] treat SubLinks differently --- src/planner_tree_modification.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index c4b4073d..ff5b51fb 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -625,6 +625,14 @@ adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context) return false; } + if (IsA(node, SubLink)) + { + SubLink *sl = (SubLink *) node; + + /* Examine its expression */ + node = sl->testexpr; + } + return expression_tree_walker(node, adjust_appendrel_varnos, context); From 26ed609e4aae6695c9f1a231db5cfb056a3d3412 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 4 Jul 2018 00:37:21 +0300 Subject: [PATCH 276/528] more tests for rebuilt updates --- expected/pathman_rebuild_updates.out | 30 ++++++++++++++++++++++++++++ sql/pathman_rebuild_updates.sql | 16 +++++++++++++++ 2 files changed, 46 insertions(+) diff --git a/expected/pathman_rebuild_updates.out b/expected/pathman_rebuild_updates.out index 297089af..5c54f2cd 100644 --- a/expected/pathman_rebuild_updates.out +++ b/expected/pathman_rebuild_updates.out @@ -24,6 +24,7 @@ SELECT append_range_partition('test_updates.test'); (1 row) INSERT INTO test_updates.test_11 (val, b) VALUES (101, 10); +VACUUM ANALYZE; /* tuple descs are the same */ EXPLAIN (COSTS OFF) UPDATE test_updates.test SET b = 0 WHERE val = 1; QUERY PLAN @@ -54,6 +55,35 @@ UPDATE test_updates.test SET b = 0 WHERE val = 101 RETURNING *, tableoid::REGCLA 101 | 0 | test_updates.test_11 (1 row) +CREATE TABLE test_updates.test_dummy (val INT4); +EXPLAIN (COSTS OFF) UPDATE test_updates.test SET val = val + 1 +WHERE val = 101 AND val = ANY (TABLE test_updates.test_dummy) +RETURNING *, tableoid::REGCLASS; + QUERY PLAN +------------------------------------ + Update on test_11 + -> Nested Loop Semi Join + -> Seq Scan on test_11 + Filter: (val = 101) + -> Seq Scan on test_dummy + Filter: (val = 101) +(6 rows) + +EXPLAIN (COSTS OFF) UPDATE test_updates.test t1 SET b = 0 +FROM test_updates.test_dummy t2 +WHERE t1.val = 101 AND t1.val = t2.val +RETURNING t1.*, t1.tableoid::REGCLASS; + QUERY PLAN +--------------------------------------- + Update on test_11 t1 + -> Nested Loop + -> Seq Scan on test_11 t1 + Filter: (val = 101) + -> Seq Scan on test_dummy t2 + Filter: (val = 101) +(6 rows) + +DROP TABLE test_updates.test_dummy; DROP SCHEMA test_updates CASCADE; NOTICE: drop cascades to 13 other objects DROP EXTENSION pg_pathman; diff --git a/sql/pathman_rebuild_updates.sql b/sql/pathman_rebuild_updates.sql index ec4924ea..fc827dd3 100644 --- a/sql/pathman_rebuild_updates.sql +++ b/sql/pathman_rebuild_updates.sql @@ -22,6 +22,9 @@ SELECT append_range_partition('test_updates.test'); INSERT INTO test_updates.test_11 (val, b) VALUES (101, 10); +VACUUM ANALYZE; + + /* tuple descs are the same */ EXPLAIN (COSTS OFF) UPDATE test_updates.test SET b = 0 WHERE val = 1; UPDATE test_updates.test SET b = 0 WHERE val = 1 RETURNING *, tableoid::REGCLASS; @@ -31,6 +34,19 @@ UPDATE test_updates.test SET b = 0 WHERE val = 1 RETURNING *, tableoid::REGCLASS EXPLAIN (COSTS OFF) UPDATE test_updates.test SET b = 0 WHERE val = 101; UPDATE test_updates.test SET b = 0 WHERE val = 101 RETURNING *, tableoid::REGCLASS; +CREATE TABLE test_updates.test_dummy (val INT4); + +EXPLAIN (COSTS OFF) UPDATE test_updates.test SET val = val + 1 +WHERE val = 101 AND val = ANY (TABLE test_updates.test_dummy) +RETURNING *, tableoid::REGCLASS; + +EXPLAIN (COSTS OFF) UPDATE test_updates.test t1 SET b = 0 +FROM test_updates.test_dummy t2 +WHERE t1.val = 101 AND t1.val = t2.val +RETURNING t1.*, t1.tableoid::REGCLASS; + +DROP TABLE test_updates.test_dummy; + DROP SCHEMA test_updates CASCADE; From 7747219f48ad044402199bdfdb6058955ea1c460 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 4 Jul 2018 14:40:20 +0300 Subject: [PATCH 277/528] don't use varoattno in adjust_appendrel_varnos() --- src/planner_tree_modification.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index ff5b51fb..f404300e 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -605,7 +605,7 @@ adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context) Var *var = (Var *) node; /* Don't transform system columns & other relations' Vars */ - if (var->varoattno > 0 && var->varno == context->child_varno) + if (var->varattno > 0 && var->varno == context->child_varno) { Var *child_var; From 1c436e82457212097047fc3d89e83ed9ae56f64a Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 4 Jul 2018 14:57:24 +0300 Subject: [PATCH 278/528] decouple Append & MergeAppend in plan_tree_walker() --- src/planner_tree_modification.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 3225e59e..4a9b8c40 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -132,9 +132,9 @@ reset_query_id_generator(void) /* - * Basic plan tree walker + * Basic plan tree walker. * - * 'visitor' is applied right before return + * 'visitor' is applied right before return. */ void plan_tree_walker(Plan *plan, @@ -165,15 +165,16 @@ plan_tree_walker(Plan *plan, plan_tree_walker((Plan *) lfirst(l), visitor, context); break; - /* Since they look alike */ - case T_MergeAppend: case T_Append: - Assert(offsetof(Append, appendplans) == - offsetof(MergeAppend, mergeplans)); foreach(l, ((Append *) plan)->appendplans) plan_tree_walker((Plan *) lfirst(l), visitor, context); break; + case T_MergeAppend: + foreach(l, ((MergeAppend *) plan)->mergeplans) + plan_tree_walker((Plan *) lfirst(l), visitor, context); + break; + case T_BitmapAnd: foreach(l, ((BitmapAnd *) plan)->bitmapplans) plan_tree_walker((Plan *) lfirst(l), visitor, context); From 2ce250c8d93bb2b3ae2ec7c8e77589311e7cb7ee Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 4 Jul 2018 15:15:50 +0300 Subject: [PATCH 279/528] copy expression trees for safety --- src/planner_tree_modification.c | 93 ++++++++++++++++++++++----------- 1 file changed, 62 insertions(+), 31 deletions(-) diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index c18bf137..04474fda 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -109,7 +109,8 @@ static void handle_modification_query(Query *parse, transform_query_cxt *context static void partition_filter_visitor(Plan *plan, void *context); static Node *eval_extern_params_mutator(Node *node, ParamListInfo params); -static bool adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context); +static Node *adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context); +static bool inh_translation_list_is_trivial(List *translated_vars); /* @@ -389,7 +390,7 @@ handle_modification_query(Query *parse, transform_query_cxt *context) rte = rt_fetch(result_rti, parse->rtable); - /* Exit if it's DELETE FROM ONLY table */ + /* Exit if it's ONLY table */ if (!rte->inh) return; prel = get_pathman_relation_info(rte->relid); @@ -465,33 +466,37 @@ handle_modification_query(Query *parse, transform_query_cxt *context) return; /* nothing to do here */ } + /* Update RTE's relid and relkind (for FDW) */ + rte->relid = child; + rte->relkind = child_relkind; + + /* HACK: unset the 'inh' flag (no children) */ + rte->inh = false; + /* Both tables are already locked */ child_rel = heap_open(child, NoLock); parent_rel = heap_open(parent, NoLock); make_inh_translation_list(parent_rel, child_rel, 0, &translated_vars); - /* Translate varnos for this child */ - aav_cxt.child_varno = result_rti; - aav_cxt.parent_relid = parent; - aav_cxt.translated_vars = translated_vars; - adjust_appendrel_varnos((Node *) parse, &aav_cxt); - - /* Translate column privileges for this child */ - rte->selectedCols = translate_col_privs(rte->selectedCols, translated_vars); - rte->insertedCols = translate_col_privs(rte->insertedCols, translated_vars); - rte->updatedCols = translate_col_privs(rte->updatedCols, translated_vars); + /* Perform some additional adjustments */ + if (!inh_translation_list_is_trivial(translated_vars)) + { + /* Translate varnos for this child */ + aav_cxt.child_varno = result_rti; + aav_cxt.parent_relid = parent; + aav_cxt.translated_vars = translated_vars; + adjust_appendrel_varnos((Node *) parse, &aav_cxt); + + /* Translate column privileges for this child */ + rte->selectedCols = translate_col_privs(rte->selectedCols, translated_vars); + rte->insertedCols = translate_col_privs(rte->insertedCols, translated_vars); + rte->updatedCols = translate_col_privs(rte->updatedCols, translated_vars); + } /* Close relations (should remain locked, though) */ heap_close(child_rel, NoLock); heap_close(parent_rel, NoLock); - - /* Update RTE's relid and relkind (for FDW) */ - rte->relid = child; - rte->relkind = child_relkind; - - /* HACK: unset the 'inh' flag (no children) */ - rte->inh = false; } } @@ -562,11 +567,11 @@ eval_extern_params_mutator(Node *node, ParamListInfo params) } /* Remap parent's attributes to child ones s*/ -static bool +static Node * adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context) { if (node == NULL) - return false; + return NULL; if (IsA(node, Query)) { @@ -577,7 +582,7 @@ adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context) foreach (lc, query->targetList) { TargetEntry *te = (TargetEntry *) lfirst(lc); - Var *child_var; + Var *child_var; if (te->resjunk) continue; @@ -595,10 +600,12 @@ adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context) te->resno = child_var->varattno; } - return query_tree_walker((Query *) node, - adjust_appendrel_varnos, - context, - QTW_IGNORE_RC_SUBQUERIES); + /* NOTE: we shouldn't copy top-level Query */ + return (Node *) query_tree_mutator((Query *) node, + adjust_appendrel_varnos, + context, + (QTW_IGNORE_RC_SUBQUERIES | + QTW_DONT_COPY_QUERY)); } if (IsA(node, Var)) @@ -610,6 +617,8 @@ adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context) { Var *child_var; + var = copyObject(var); + if (var->varattno > list_length(context->translated_vars)) elog(ERROR, "attribute %d of relation \"%s\" does not exist", var->varattno, get_rel_name(context->parent_relid)); @@ -623,7 +632,7 @@ adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context) var->varattno = child_var->varattno; } - return false; + return (Node *) var; } if (IsA(node, SubLink)) @@ -631,14 +640,36 @@ adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context) SubLink *sl = (SubLink *) node; /* Examine its expression */ - node = sl->testexpr; + sl->testexpr = expression_tree_mutator(sl->testexpr, + adjust_appendrel_varnos, + context); + return (Node *) sl; } - return expression_tree_walker(node, - adjust_appendrel_varnos, - context); + return expression_tree_mutator(node, + adjust_appendrel_varnos, + context); } +/* Check whether Var translation list is trivial (no shuffle) */ +static bool +inh_translation_list_is_trivial(List *translated_vars) +{ + ListCell *lc; + AttrNumber i = 1; + + foreach (lc, translated_vars) + { + Var *var = (Var *) lfirst(lc); + + if (var && var->varattno != i) + return false; + + i++; + } + + return true; +} /* * ------------------------------- From 79a1b89c375f59d8f4c375c0256582884384904b Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 4 Jul 2018 16:02:27 +0300 Subject: [PATCH 280/528] handle wholerow references as well --- expected/pathman_rebuild_updates.out | 19 ++++++++++ sql/pathman_rebuild_updates.sql | 9 +++++ src/planner_tree_modification.c | 57 +++++++++++++++++++--------- 3 files changed, 68 insertions(+), 17 deletions(-) diff --git a/expected/pathman_rebuild_updates.out b/expected/pathman_rebuild_updates.out index 5c54f2cd..f7d59718 100644 --- a/expected/pathman_rebuild_updates.out +++ b/expected/pathman_rebuild_updates.out @@ -83,6 +83,25 @@ RETURNING t1.*, t1.tableoid::REGCLASS; Filter: (val = 101) (6 rows) +EXPLAIN (COSTS OFF) UPDATE test_updates.test SET b = 0 +WHERE val = 101 AND test >= (100, 8) +RETURNING *, tableoid::REGCLASS; + QUERY PLAN +----------------------------------------------------------------------------------- + Update on test_11 + -> Seq Scan on test_11 + Filter: (((test_11.*)::test_updates.test >= ROW(100, 8)) AND (val = 101)) +(3 rows) + +/* execute this one */ +UPDATE test_updates.test SET b = 0 +WHERE val = 101 AND test >= (100, -1) +RETURNING test; + test +--------- + (101,0) +(1 row) + DROP TABLE test_updates.test_dummy; DROP SCHEMA test_updates CASCADE; NOTICE: drop cascades to 13 other objects diff --git a/sql/pathman_rebuild_updates.sql b/sql/pathman_rebuild_updates.sql index fc827dd3..41d168df 100644 --- a/sql/pathman_rebuild_updates.sql +++ b/sql/pathman_rebuild_updates.sql @@ -45,6 +45,15 @@ FROM test_updates.test_dummy t2 WHERE t1.val = 101 AND t1.val = t2.val RETURNING t1.*, t1.tableoid::REGCLASS; +EXPLAIN (COSTS OFF) UPDATE test_updates.test SET b = 0 +WHERE val = 101 AND test >= (100, 8) +RETURNING *, tableoid::REGCLASS; + +/* execute this one */ +UPDATE test_updates.test SET b = 0 +WHERE val = 101 AND test >= (100, -1) +RETURNING test; + DROP TABLE test_updates.test_dummy; diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 04474fda..63cf9963 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -96,7 +96,9 @@ typedef struct typedef struct { Index child_varno; - Oid parent_relid; + Oid parent_relid, + parent_reltype, + child_reltype; List *translated_vars; } adjust_appendrel_varnos_cxt; @@ -483,9 +485,11 @@ handle_modification_query(Query *parse, transform_query_cxt *context) if (!inh_translation_list_is_trivial(translated_vars)) { /* Translate varnos for this child */ - aav_cxt.child_varno = result_rti; - aav_cxt.parent_relid = parent; - aav_cxt.translated_vars = translated_vars; + aav_cxt.child_varno = result_rti; + aav_cxt.parent_relid = parent; + aav_cxt.parent_reltype = RelationGetDescr(parent_rel)->tdtypeid; + aav_cxt.child_reltype = RelationGetDescr(child_rel)->tdtypeid; + aav_cxt.translated_vars = translated_vars; adjust_appendrel_varnos((Node *) parse, &aav_cxt); /* Translate column privileges for this child */ @@ -612,24 +616,43 @@ adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context) { Var *var = (Var *) node; - /* Don't transform system columns & other relations' Vars */ - if (var->varattno > 0 && var->varno == context->child_varno) + /* See adjust_appendrel_attrs_mutator() */ + if (var->varno == context->child_varno) { - Var *child_var; + if (var->varattno > 0) + { + Var *child_var; - var = copyObject(var); + var = copyObject(var); - if (var->varattno > list_length(context->translated_vars)) - elog(ERROR, "attribute %d of relation \"%s\" does not exist", - var->varattno, get_rel_name(context->parent_relid)); + if (var->varattno > list_length(context->translated_vars)) + elog(ERROR, "attribute %d of relation \"%s\" does not exist", + var->varattno, get_rel_name(context->parent_relid)); - child_var = list_nth(context->translated_vars, var->varattno - 1); - if (!child_var) - elog(ERROR, "attribute %d of relation \"%s\" does not exist", - var->varattno, get_rel_name(context->parent_relid)); + child_var = list_nth(context->translated_vars, var->varattno - 1); + if (!child_var) + elog(ERROR, "attribute %d of relation \"%s\" does not exist", + var->varattno, get_rel_name(context->parent_relid)); - /* Transform attribute number */ - var->varattno = child_var->varattno; + /* Transform attribute number */ + var->varattno = child_var->varattno; + } + else if (var->varattno == 0) + { + ConvertRowtypeExpr *r = makeNode(ConvertRowtypeExpr); + + Assert(var->vartype = context->parent_reltype); + + r->arg = (Expr *) var; + r->resulttype = context->parent_reltype; + r->convertformat = COERCE_IMPLICIT_CAST; + r->location = -1; + + /* Make sure the Var node has the right type ID, too */ + var->vartype = context->child_reltype; + + return (Node *) r; + } } return (Node *) var; From 1608d8d3ff1bc77cb4412b3b6acb16c83c170879 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 5 Jul 2018 12:34:23 +0300 Subject: [PATCH 281/528] small refactoring in run_tests.sh --- run_tests.sh | 58 +++++++++++++++++++++------------------------------- 1 file changed, 23 insertions(+), 35 deletions(-) diff --git a/run_tests.sh b/run_tests.sh index 49c481b9..2dbcfd0c 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -6,33 +6,23 @@ # * cmocka-based tests # Copyright (c) 2017, Postgres Professional -set -eux +set -ux echo CHECK_CODE=$CHECK_CODE +echo PG_VERSION=$(pg_config --version) status=0 +# change relevant core dump settings +CORE_DIR=/tmp/cores +ulimit -c unlimited -S +mkdir "$CORE_DIR" +echo "$CORE_DIR/%e-%s-%p.core" | sudo tee /proc/sys/kernel/core_pattern + # perform code analysis if necessary if [ "$CHECK_CODE" = "clang" ]; then scan-build --status-bugs make USE_PGXS=1 || status=$? exit $status - -elif [ "$CHECK_CODE" = "cppcheck" ]; then - cppcheck \ - --template "{file} ({line}): {severity} ({id}): {message}" \ - --enable=warning,portability,performance \ - --suppress=redundantAssignment \ - --suppress=uselessAssignmentPtrArg \ - --suppress=literalWithCharPtrCompare \ - --suppress=incorrectStringBooleanError \ - --std=c89 src/*.c src/include/*.h 2> cppcheck.log - - if [ -s cppcheck.log ]; then - cat cppcheck.log - status=1 # error - fi - - exit $status fi # we need testgres for pathman tests @@ -42,28 +32,20 @@ source env/bin/activate pip install testgres pip freeze | grep testgres -# don't forget to "make clean" -make USE_PGXS=1 clean - # initialize database initdb # build pg_pathman (using PG_CPPFLAGS and SHLIB_LINK for gcov) +set -e +make USE_PGXS=1 clean make USE_PGXS=1 PG_CPPFLAGS="-coverage" SHLIB_LINK="-coverage" make USE_PGXS=1 install - -# check build -status=$? -if [ $status -ne 0 ]; then exit $status; fi +set +e # add pg_pathman to shared_preload_libraries and restart cluster 'test' echo "shared_preload_libraries = 'pg_pathman'" >> $PGDATA/postgresql.conf echo "port = 55435" >> $PGDATA/postgresql.conf -pg_ctl start -l /tmp/postgres.log -w - -# check startup -status=$? -if [ $status -ne 0 ]; then cat /tmp/postgres.log; fi +pg_ctl start -l /tmp/postgres.log -w || cat /tmp/postgres.log # run regression tests export PG_REGRESS_DIFF_OPTS="-w -U3" # for alpine's diff (BusyBox) @@ -72,18 +54,22 @@ PGPORT=55435 make USE_PGXS=1 installcheck || status=$? # show diff if it exists if test -f regression.diffs; then cat regression.diffs; fi -set +u +# list cores and exit if we failed +ls "$CORE_DIR" +if [ $status -ne 0 ]; then exit $status; fi # run python tests +set +u make USE_PGXS=1 python_tests || status=$? -if [ $status -ne 0 ]; then exit $status; fi - set -u -# run cmocka tests (using CFLAGS_SL for gcov) -make USE_PGXS=1 PG_CPPFLAGS="-coverage" cmocka_tests || status=$? +# list cores and exit if we failed +ls "$CORE_DIR" if [ $status -ne 0 ]; then exit $status; fi +# run cmocka tests (using CFLAGS_SL for gcov) +make USE_PGXS=1 PG_CPPFLAGS="-coverage" cmocka_tests || exit $? + # remove useless gcov files rm -f tests/cmocka/*.gcno rm -f tests/cmocka/*.gcda @@ -92,6 +78,8 @@ rm -f tests/cmocka/*.gcda gcov src/*.c src/compat/*.c src/include/*.h src/include/compat/*.h # send coverage stats to Coveralls +set +u bash <(curl -s https://codecov.io/bash) +set -u exit $status From 21330156691f4d3f680a0a000b7d8191bbd2a887 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 5 Jul 2018 18:10:15 +0300 Subject: [PATCH 282/528] update test environment (Docker etc) --- .dockerignore | 5 ++ .travis.yml | 30 ++++---- Dockerfile.tmpl | 54 ++++++++------ make_images.py | 139 ----------------------------------- mk_dockerfile.sh | 16 ++++ run_tests.sh | 187 ++++++++++++++++++++++++++++++++--------------- 6 files changed, 200 insertions(+), 231 deletions(-) create mode 100644 .dockerignore delete mode 100755 make_images.py create mode 100755 mk_dockerfile.sh diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..ce3c9e6f --- /dev/null +++ b/.dockerignore @@ -0,0 +1,5 @@ +*.gcno +*.gcda +*.gcov +*.so +*.o diff --git a/.travis.yml b/.travis.yml index 265ac48d..051401f6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,8 +1,4 @@ -os: - - linux - sudo: required -dist: trusty language: c @@ -10,18 +6,26 @@ services: - docker install: - - echo "FROM ${DOCKER_IMAGE}" > Dockerfile + - ./mk_dockerfile.sh - docker-compose build script: - docker-compose run $(bash <(curl -s https://codecov.io/env)) tests +notifications: + email: + on_success: change + on_failure: always + env: - - DOCKER_IMAGE=pathman/pg95_clang_check_code - - DOCKER_IMAGE=pathman/pg95_pathman_tests - - DOCKER_IMAGE=pathman/pg96_clang_check_code - - DOCKER_IMAGE=pathman/pg96_pathman_tests - - DOCKER_IMAGE=pathman/pg10_clang_check_code - - DOCKER_IMAGE=pathman/pg10_pathman_tests - - DOCKER_IMAGE=pathman/pg10_ca_clang_check_code - - DOCKER_IMAGE=pathman/pg10_ca_pathman_tests + - PG_VERSION=10 LEVEL=nightmare + - PG_VERSION=10 LEVEL=hardcore + - PG_VERSION=10 + - PG_VERSION=9.6 LEVEL=hardcore + - PG_VERSION=9.6 + - PG_VERSION=9.5 LEVEL=hardcore + - PG_VERSION=9.5 + +matrix: + allow_failures: + - env: PG_VERSION=10 LEVEL=nightmare diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index 5ceaeb99..021a2850 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -1,30 +1,40 @@ -FROM ${PG_IMAGE} +FROM postgres:${PG_VERSION}-alpine -ENV LANG=C.UTF-8 PGDATA=/pg/data +# Install dependencies +RUN apk add --no-cache \ + openssl curl \ + cmocka-dev \ + perl perl-ipc-run \ + python3 python3-dev py-virtualenv \ + coreutils linux-headers \ + make musl-dev gcc bison flex \ + zlib-dev libedit-dev \ + clang clang-analyzer; + +# Install fresh valgrind +RUN apk add valgrind \ + --update-cache \ + --repository http://dl-3.alpinelinux.org/alpine/edge/main; -RUN if [ "${CHECK_CODE}" = "clang" ] ; then \ - echo 'http://dl-3.alpinelinux.org/alpine/edge/main' > /etc/apk/repositories; \ - apk --no-cache add clang-analyzer make musl-dev gcc; \ - fi +# Environment +ENV LANG=C.UTF-8 PGDATA=/pg/data -RUN if [ "${CHECK_CODE}" = "cppcheck" ] ; then \ - apk --no-cache add cppcheck --repository http://dl-cdn.alpinelinux.org/alpine/v3.6/community; \ - fi +# Make directories +RUN mkdir -p ${PGDATA} && \ + mkdir -p /pg/testdir -RUN if [ "${CHECK_CODE}" = "false" ] ; then \ - echo 'http://dl-3.alpinelinux.org/alpine/edge/main' > /etc/apk/repositories; \ - apk --no-cache add curl python3 python3-dev gcc make musl-dev cmocka-dev linux-headers;\ - pip3 install virtualenv;\ - fi +# Add data to test dir +ADD . /pg/testdir -RUN mkdir -p /pg/data && \ - mkdir /pg/pg_pathman && \ - chown postgres:postgres ${PGDATA} && \ +# Grant privileges +RUN chown -R postgres:postgres ${PGDATA} && \ + chown -R postgres:postgres /pg/testdir && \ chmod a+rwx /usr/local/lib/postgresql && \ chmod a+rwx /usr/local/share/postgresql/extension -ONBUILD ADD . /pg/pg_pathman -ONBUILD WORKDIR /pg/pg_pathman -ONBUILD RUN chmod -R go+rwX /pg/pg_pathman -ONBUILD USER postgres -ONBUILD ENTRYPOINT PGDATA=${PGDATA} CHECK_CODE=${CHECK_CODE} bash run_tests.sh +COPY run_tests.sh /run.sh +RUN chmod 755 /run.sh + +USER postgres +WORKDIR /pg/testdir +ENTRYPOINT LEVEL=${LEVEL} /run.sh diff --git a/make_images.py b/make_images.py deleted file mode 100755 index 9c9b6e43..00000000 --- a/make_images.py +++ /dev/null @@ -1,139 +0,0 @@ -#!/usr/bin/env python3 - -import os -import subprocess -import getpass -import requests -import tempfile - -from urllib.parse import urljoin -from urllib.request import urlopen - -DOCKER_ID = 'pathman' -ALPINE_BASE_URL = 'https://raw.githubusercontent.com/docker-library/postgres/master/10/alpine/' -ALPINE_ENTRYPOINT = 'docker-entrypoint.sh' - -''' -How to create this patch: - * put `import ipdb; ipdb.set_trace()` in make_alpine_image, just before `open(patch_name)..` - * run the script - * in temporary folder run `cp Dockerfile Dockerfile.1 && vim Dockerfile.1` - * uncomment --enable-debug, add --enable-cassert, add `CFLAGS="-g3 -O0"` before ./configure - * run `diff -Naur Dockerfile Dockerfile.1 > ./cassert.patch` - * contents of cassert.patch put to variable below - * change Dockerfile.1 to Dockerfile in text, change `\` symbols to `\\` -''' -ALPINE_PATCH = b''' ---- Dockerfile 2017-09-25 12:01:24.597813507 +0300 -+++ Dockerfile 2017-09-25 12:09:06.104059704 +0300 -@@ -79,15 +79,15 @@ - && wget -O config/config.sub 'https://git.savannah.gnu.org/cgit/config.git/plain/config.sub?id=7d3d27baf8107b630586c962c057e22149653deb' \\ - # configure options taken from: - # https://anonscm.debian.org/cgit/pkg-postgresql/postgresql.git/tree/debian/rules?h=9.5 -- && ./configure \\ -+ && CFLAGS="-g3 -O0" ./configure \\ - --build="$gnuArch" \\ - # "/usr/src/postgresql/src/backend/access/common/tupconvert.c:105: undefined reference to `libintl_gettext'" - # --enable-nls \\ - --enable-integer-datetimes \\ - --enable-thread-safety \\ - --enable-tap-tests \\ --# skip debugging info -- we want tiny size instead --# --enable-debug \\ -+ --enable-debug \\ -+ --enable-cassert \\ - --disable-rpath \\ - --with-uuid=e2fs \\ - --with-gnu-ld \\ -''' -CUSTOM_IMAGE_NAME = "%s/postgres_stable" % DOCKER_ID - -def make_alpine_image(image_name): - dockerfile = urlopen(urljoin(ALPINE_BASE_URL, 'Dockerfile')).read() - entrypoint_sh = urlopen(urljoin(ALPINE_BASE_URL, ALPINE_ENTRYPOINT)).read() - - with tempfile.TemporaryDirectory() as tmpdir: - print("Creating build in %s" % tmpdir) - patch_name = os.path.join(tmpdir, "cassert.patch") - - with open(os.path.join(tmpdir, 'Dockerfile'), 'w') as f: - f.write(dockerfile.decode()) - - with open(os.path.join(tmpdir, ALPINE_ENTRYPOINT), 'w') as f: - f.write(entrypoint_sh.decode()) - - with open(patch_name, 'w') as f: - f.write(ALPINE_PATCH.decode()) - - with open(patch_name, 'r') as f: - p = subprocess.Popen(["patch", "-p0"], cwd=tmpdir, stdin=subprocess.PIPE) - p.communicate(str.encode(f.read())) - print("patch applied") - subprocess.check_output(["docker", "build", ".", '-t', image_name], cwd=tmpdir) - print("build ok: ", image_name) - subprocess.check_output(['docker', 'push', image_name], - stderr=subprocess.STDOUT) - print("upload ok:", image_name) - -make_alpine_image(CUSTOM_IMAGE_NAME) - -pg_containers = [ - ('pg95', 'postgres:9.5-alpine'), - ('pg96', 'postgres:9.6-alpine'), - ('pg10', 'postgres:10-alpine'), - ('pg10_ca', CUSTOM_IMAGE_NAME), -] - -image_types = { - 'clang_check_code': { - 'CHECK_CODE': 'clang', - }, - 'cppcheck': { - 'CHECK_CODE': 'cppcheck', - }, - 'pathman_tests': { - 'CHECK_CODE': 'false', - } -} - -user = input("Enter username for `docker login`: ") -password = getpass.getpass() -subprocess.check_output([ - 'docker', - 'login', - '-u', user, - '-p', password]) - -travis_conf_line = '- DOCKER_IMAGE=%s' -travis_conf = [] -print("") - -if __name__ == '__main__': - for pgname, container in pg_containers: - for key, variables in image_types.items(): - image_name = '%s/%s_%s' % (DOCKER_ID, pgname, key) - with open('Dockerfile', 'w') as out: - with open('Dockerfile.tmpl', 'r') as f: - for line in f: - line = line.replace('${PG_IMAGE}', container) - for key, value in variables.items(): - varname = '${%s}' % key - line = line.replace(varname, value) - - out.write(line) - - args = [ - 'docker', - 'build', - '-t', image_name, - '.' - ] - subprocess.check_output(args, stderr=subprocess.STDOUT) - print("build ok:", image_name) - subprocess.check_output(['docker', 'push', image_name], - stderr=subprocess.STDOUT) - print("upload ok:", image_name) - travis_conf.append(travis_conf_line % image_name) - -print("\ntravis configuration") -print('\n'.join(travis_conf)) diff --git a/mk_dockerfile.sh b/mk_dockerfile.sh new file mode 100755 index 00000000..f15433c4 --- /dev/null +++ b/mk_dockerfile.sh @@ -0,0 +1,16 @@ +if [ -z ${PG_VERSION+x} ]; then + echo PG_VERSION is not set! + exit 1 +fi + +if [ -z ${LEVEL+x} ]; then + LEVEL=scan-build +fi + +echo PG_VERSION=${PG_VERSION} +echo LEVEL=${LEVEL} + +sed \ + -e 's/${PG_VERSION}/'${PG_VERSION}/g \ + -e 's/${LEVEL}/'${LEVEL}/g \ + Dockerfile.tmpl > Dockerfile diff --git a/run_tests.sh b/run_tests.sh index 2dbcfd0c..a11be8f4 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -1,85 +1,158 @@ -#!/bin/bash - -# This is a main testing script for: -# * regression tests -# * testgres-based tests -# * cmocka-based tests -# Copyright (c) 2017, Postgres Professional +#!/usr/bin/env bash + +# +# Copyright (c) 2018, Postgres Professional +# +# supported levels: +# * standard +# * scan-build +# * hardcore +# * nightmare +# set -ux +status=0 -echo CHECK_CODE=$CHECK_CODE -echo PG_VERSION=$(pg_config --version) +# global exports +export PGPORT=55435 +export VIRTUAL_ENV_DISABLE_PROMPT=1 -status=0 +# rebuild PostgreSQL with cassert + valgrind support +if [ "$LEVEL" = "hardcore" ] || \ + [ "$LEVEL" = "nightmare" ]; then + + set -e + + CUSTOM_PG_BIN=$PWD/pg_bin + CUSTOM_PG_SRC=$PWD/postgresql + + # here PG_VERSION is provided by postgres:X-alpine docker image + curl "https://ftp.postgresql.org/pub/source/v$PG_VERSION/postgresql-$PG_VERSION.tar.bz2" -o postgresql.tar.bz2 + echo "$PG_SHA256 *postgresql.tar.bz2" | sha256sum -c - + + mkdir $CUSTOM_PG_SRC -# change relevant core dump settings -CORE_DIR=/tmp/cores -ulimit -c unlimited -S -mkdir "$CORE_DIR" -echo "$CORE_DIR/%e-%s-%p.core" | sudo tee /proc/sys/kernel/core_pattern + tar \ + --extract \ + --file postgresql.tar.bz2 \ + --directory $CUSTOM_PG_SRC \ + --strip-components 1 -# perform code analysis if necessary -if [ "$CHECK_CODE" = "clang" ]; then - scan-build --status-bugs make USE_PGXS=1 || status=$? - exit $status + cd $CUSTOM_PG_SRC + + # enable Valgrind support + sed -i.bak "s/\/* #define USE_VALGRIND *\//#define USE_VALGRIND/g" src/include/pg_config_manual.h + + # enable additional options + ./configure \ + CFLAGS='-O0 -ggdb3 -fno-omit-frame-pointer' \ + --enable-cassert \ + --prefix=$CUSTOM_PG_BIN \ + --quiet + + time make -s -j$(nproc) && make -s install + + # override default PostgreSQL instance + export PATH=$CUSTOM_PG_BIN/bin:$PATH + export LD_LIBRARY_PATH=$CUSTOM_PG_BIN/lib + + # show pg_config path (just in case) + which pg_config + + cd - + + set +e fi -# we need testgres for pathman tests -virtualenv env -export VIRTUAL_ENV_DISABLE_PROMPT=1 -source env/bin/activate -pip install testgres -pip freeze | grep testgres +# show pg_config just in case +pg_config + +# perform code checks if asked to +if [ "$LEVEL" = "scan-build" ] || \ + [ "$LEVEL" = "hardcore" ] || \ + [ "$LEVEL" = "nightmare" ]; then + + # perform static analyzis + scan-build --status-bugs make USE_PGXS=1 || status=$? + + # something's wrong, exit now! + if [ $status -ne 0 ]; then exit 1; fi + + # don't forget to "make clean" + make USE_PGXS=1 clean +fi -# initialize database -initdb -# build pg_pathman (using PG_CPPFLAGS and SHLIB_LINK for gcov) -set -e -make USE_PGXS=1 clean +# build and install extension (using PG_CPPFLAGS and SHLIB_LINK for gcov) make USE_PGXS=1 PG_CPPFLAGS="-coverage" SHLIB_LINK="-coverage" make USE_PGXS=1 install -set +e -# add pg_pathman to shared_preload_libraries and restart cluster 'test' -echo "shared_preload_libraries = 'pg_pathman'" >> $PGDATA/postgresql.conf -echo "port = 55435" >> $PGDATA/postgresql.conf -pg_ctl start -l /tmp/postgres.log -w || cat /tmp/postgres.log +# initialize database +initdb -D $PGDATA + +# change PG's config +echo "port = $PGPORT" >> $PGDATA/postgresql.conf +cat conf.add >> $PGDATA/postgresql.conf + +# restart cluster 'test' +if [ "$LEVEL" = "nightmare" ]; then + ls $CUSTOM_PG_BIN/bin + + valgrind \ + --tool=memcheck \ + --leak-check=no \ + --time-stamp=yes \ + --track-origins=yes \ + --trace-children=yes \ + --gen-suppressions=all \ + --suppressions=$CUSTOM_PG_SRC/src/tools/valgrind.supp \ + --suppressions=$PWD/valgrind.supp \ + --log-file=/tmp/valgrind-%p.log \ + pg_ctl start -l /tmp/postgres.log -w || status=$? +else + pg_ctl start -l /tmp/postgres.log -w || status=$? +fi + +# something's wrong, exit now! +if [ $status -ne 0 ]; then cat /tmp/postgres.log; exit 1; fi # run regression tests export PG_REGRESS_DIFF_OPTS="-w -U3" # for alpine's diff (BusyBox) -PGPORT=55435 make USE_PGXS=1 installcheck || status=$? +make USE_PGXS=1 installcheck || status=$? # show diff if it exists -if test -f regression.diffs; then cat regression.diffs; fi - -# list cores and exit if we failed -ls "$CORE_DIR" -if [ $status -ne 0 ]; then exit $status; fi +if [ -f regression.diffs ]; then cat regression.diffs; fi # run python tests -set +u +set +x +virtualenv /tmp/env && source /tmp/env/bin/activate && pip install testgres make USE_PGXS=1 python_tests || status=$? -set -u - -# list cores and exit if we failed -ls "$CORE_DIR" -if [ $status -ne 0 ]; then exit $status; fi +deactivate +set -x + +# show Valgrind logs if necessary +if [ "$LEVEL" = "nightmare" ]; then + for f in $(find /tmp -name valgrind-*.log); do + if grep -q 'Command: [^ ]*/postgres' $f && grep -q 'ERROR SUMMARY: [1-9]' $f; then + echo "========= Contents of $f" + cat $f + status=1 + fi + done +fi # run cmocka tests (using CFLAGS_SL for gcov) -make USE_PGXS=1 PG_CPPFLAGS="-coverage" cmocka_tests || exit $? +make USE_PGXS=1 PG_CPPFLAGS="-coverage" cmocka_tests || status=$? -# remove useless gcov files -rm -f tests/cmocka/*.gcno -rm -f tests/cmocka/*.gcda +# something's wrong, exit now! +if [ $status -ne 0 ]; then exit 1; fi # generate *.gcov files -gcov src/*.c src/compat/*.c src/include/*.h src/include/compat/*.h +gcov *.c *.h -# send coverage stats to Coveralls -set +u -bash <(curl -s https://codecov.io/bash) -set -u -exit $status +set +ux + + +# send coverage stats to Codecov +bash <(curl -s https://codecov.io/bash) From add3e9c265521c7c147922eabc42b9ea87663933 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 6 Jul 2018 04:00:33 +0300 Subject: [PATCH 283/528] remove dead code & fix logic in get_pathman_relation_info() --- src/relation_info.c | 31 +++++++++++-------------------- 1 file changed, 11 insertions(+), 20 deletions(-) diff --git a/src/relation_info.c b/src/relation_info.c index ef170b58..449636a7 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -322,7 +322,6 @@ PartRelationInfo * get_pathman_relation_info(Oid relid) { PartStatusInfo *psin; - bool refresh; /* Should always be called in transaction */ Assert(IsTransactionState()); @@ -331,24 +330,18 @@ get_pathman_relation_info(Oid relid) if (relid < FirstNormalObjectId) return NULL; - /* Create a new entry for this table if needed */ + /* Do we know anything about this relation? */ psin = pathman_cache_search_relid(status_cache, relid, HASH_FIND, NULL); - /* Should we build a new PartRelationInfo? */ - refresh = psin ? - (psin->prel && - !PrelIsFresh(psin->prel) && - PrelReferenceCount(psin->prel) == 0) : - true; - - if (refresh) + if (!psin) { PartRelationInfo *prel = NULL; ItemPointerData iptr; Datum values[Natts_pathman_config]; bool isnull[Natts_pathman_config]; + bool found; /* Check if PATHMAN_CONFIG table contains this relation */ if (pathman_config_contains_relation(relid, values, isnull, NULL, &iptr)) @@ -364,21 +357,19 @@ get_pathman_relation_info(Oid relid) prel = build_pathman_relation_info(relid, values); } - /* Create a new entry for this table if needed */ - if (!psin) - { - bool found; - - psin = pathman_cache_search_relid(status_cache, - relid, HASH_ENTER, - &found); - Assert(!found); - } + /* Create a new entry for this relation */ + psin = pathman_cache_search_relid(status_cache, + relid, HASH_ENTER, + &found); + Assert(!found); /* it shouldn't just appear out of thin air */ /* Cache fresh entry */ psin->prel = prel; } + /* Check invariants */ + Assert(!psin->prel || PrelIsFresh(psin->prel)); + #ifdef USE_RELINFO_LOGGING elog(DEBUG2, "fetching %s record for parent %u [%u]", From fbc89f677df87f18a5be4bbb283ef3c69903a646 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 9 Jul 2018 15:24:46 +0300 Subject: [PATCH 284/528] fix Valgrind startup & config --- .travis.yml | 2 ++ run_tests.sh | 1 - src/hooks.c | 2 ++ 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 051401f6..5b1732a6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -21,6 +21,7 @@ env: - PG_VERSION=10 LEVEL=nightmare - PG_VERSION=10 LEVEL=hardcore - PG_VERSION=10 + - PG_VERSION=9.6 LEVEL=nightmare - PG_VERSION=9.6 LEVEL=hardcore - PG_VERSION=9.6 - PG_VERSION=9.5 LEVEL=hardcore @@ -29,3 +30,4 @@ env: matrix: allow_failures: - env: PG_VERSION=10 LEVEL=nightmare + - env: PG_VERSION=9.6 LEVEL=nightmare diff --git a/run_tests.sh b/run_tests.sh index a11be8f4..d0581e7f 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -106,7 +106,6 @@ if [ "$LEVEL" = "nightmare" ]; then --trace-children=yes \ --gen-suppressions=all \ --suppressions=$CUSTOM_PG_SRC/src/tools/valgrind.supp \ - --suppressions=$PWD/valgrind.supp \ --log-file=/tmp/valgrind-%p.log \ pg_ctl start -l /tmp/postgres.log -w || status=$? else diff --git a/src/hooks.c b/src/hooks.c index 4efa9d1c..1088b27a 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -826,6 +826,8 @@ pathman_relcache_hook(Datum arg, Oid relid) if (relid == InvalidOid) { invalidate_pathman_status_info_cache(); + + /* FIXME: reset other caches as well */ } /* Invalidation event for PATHMAN_CONFIG table (probably DROP) */ From 13878ea0966f2f920ab2f0bc4e548ad889c52ee7 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 9 Jul 2018 16:11:13 +0300 Subject: [PATCH 285/528] Fix endless loop in partition_filter.c --- src/partition_filter.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/partition_filter.c b/src/partition_filter.c index 87facbc0..65107759 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -1215,7 +1215,7 @@ fetch_estate_mod_data(EState *estate) if (cb->func == pf_memcxt_callback) return (estate_mod_data *) cb->arg; - cb = estate_mcxt->reset_cbs->next; + cb = cb->next; } /* Have to create a new one */ From dde913b2203716acfddebd29f3d51884a602db47 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 10 Jul 2018 13:31:44 +0300 Subject: [PATCH 286/528] bump lib version to 1.4.13 --- META.json | 15 ++++++++------- expected/pathman_calamity.out | 2 +- src/include/init.h | 2 +- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/META.json b/META.json index 0cfa8dc2..a198d696 100644 --- a/META.json +++ b/META.json @@ -1,10 +1,9 @@ { "name": "pg_pathman", - "abstract": "Partitioning tool", - "description": "The `pg_pathman` module provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.4.12", + "abstract": "Fast partitioning tool for PostgreSQL", + "description": "pg_pathman provides optimized partitioning mechanism and functions to manage partitions.", + "version": "1.4.13", "maintainer": [ - "Ildar Musin ", "Dmitry Ivanov ", "Ildus Kurbangaliev " ], @@ -19,12 +18,12 @@ "type": "git" } }, - "generated_by": "Ildar Musin", + "generated_by": "pgpro", "provides": { "pg_pathman": { "file": "pg_pathman--1.4.sql", "docfile": "README.md", - "version": "1.4.12", + "version": "1.4.13", "abstract": "Partitioning tool" } }, @@ -35,6 +34,8 @@ "tags": [ "partitioning", "partition", - "optimization" + "optimization", + "range", + "hash" ] } diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 0b2434d4..5cda7bc5 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -13,7 +13,7 @@ SELECT debug_capture(); SELECT get_pathman_lib_version(); get_pathman_lib_version ------------------------- - 1.4.12 + 1.4.13 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index 8069f192..6bdccc2e 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -158,7 +158,7 @@ simpify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT 0x010400 /* Current version of native C library (0xAA_BB_CC) */ -#define CURRENT_LIB_VERSION 0x010412 +#define CURRENT_LIB_VERSION 0x010413 void *pathman_cache_search_relid(HTAB *cache_table, From c871c0b2c96be1be62781a6cbac28bc88ebbb992 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 10 Jul 2018 14:19:56 +0300 Subject: [PATCH 287/528] add pathman_rebuild_deletes test suite --- Makefile | 1 + expected/pathman_rebuild_deletes.out | 99 ++++++++++++++++++++++++++++ sql/pathman_rebuild_deletes.sql | 57 ++++++++++++++++ 3 files changed, 157 insertions(+) create mode 100644 expected/pathman_rebuild_deletes.out create mode 100644 sql/pathman_rebuild_deletes.sql diff --git a/Makefile b/Makefile index d810185c..8fdc0cde 100644 --- a/Makefile +++ b/Makefile @@ -50,6 +50,7 @@ REGRESS = pathman_array_qual \ pathman_only \ pathman_param_upd_del \ pathman_permissions \ + pathman_rebuild_deletes \ pathman_rebuild_updates \ pathman_rowmarks \ pathman_runtime_nodes \ diff --git a/expected/pathman_rebuild_deletes.out b/expected/pathman_rebuild_deletes.out new file mode 100644 index 00000000..98e43862 --- /dev/null +++ b/expected/pathman_rebuild_deletes.out @@ -0,0 +1,99 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_deletes; +/* + * Test DELETEs on a partition with different TupleDescriptor. + */ +/* create partitioned table */ +CREATE TABLE test_deletes.test(a FLOAT4, val INT4 NOT NULL, b FLOAT8); +INSERT INTO test_deletes.test SELECT i, i, i FROM generate_series(1, 100) AS i; +SELECT create_range_partitions('test_deletes.test', 'val', 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* drop column 'a' */ +ALTER TABLE test_deletes.test DROP COLUMN a; +/* append new partition */ +SELECT append_range_partition('test_deletes.test'); + append_range_partition +------------------------ + test_deletes.test_11 +(1 row) + +INSERT INTO test_deletes.test_11 (val, b) VALUES (101, 10); +VACUUM ANALYZE; +/* tuple descs are the same */ +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test WHERE val = 1; + QUERY PLAN +--------------------------- + Delete on test_1 + -> Seq Scan on test_1 + Filter: (val = 1) +(3 rows) + +DELETE FROM test_deletes.test WHERE val = 1 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+---+--------------------- + 1 | 1 | test_deletes.test_1 +(1 row) + +/* tuple descs are different */ +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test WHERE val = 101; + QUERY PLAN +----------------------------- + Delete on test_11 + -> Seq Scan on test_11 + Filter: (val = 101) +(3 rows) + +DELETE FROM test_deletes.test WHERE val = 101 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+----+---------------------- + 101 | 10 | test_deletes.test_11 +(1 row) + +CREATE TABLE test_deletes.test_dummy (val INT4); +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test +WHERE val = 101 AND val = ANY (TABLE test_deletes.test_dummy) +RETURNING *, tableoid::REGCLASS; + QUERY PLAN +------------------------------------ + Delete on test_11 + -> Nested Loop Semi Join + -> Seq Scan on test_11 + Filter: (val = 101) + -> Seq Scan on test_dummy + Filter: (val = 101) +(6 rows) + +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test t1 +USING test_deletes.test_dummy t2 +WHERE t1.val = 101 AND t1.val = t2.val +RETURNING t1.*, t1.tableoid::REGCLASS; + QUERY PLAN +--------------------------------------- + Delete on test_11 t1 + -> Nested Loop + -> Seq Scan on test_11 t1 + Filter: (val = 101) + -> Seq Scan on test_dummy t2 + Filter: (val = 101) +(6 rows) + +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test +WHERE val = 101 AND test >= (100, 8) +RETURNING *, tableoid::REGCLASS; + QUERY PLAN +----------------------------------------------------------------------------------- + Delete on test_11 + -> Seq Scan on test_11 + Filter: (((test_11.*)::test_deletes.test >= ROW(100, 8)) AND (val = 101)) +(3 rows) + +DROP TABLE test_deletes.test_dummy; +DROP SCHEMA test_deletes CASCADE; +NOTICE: drop cascades to 13 other objects +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_rebuild_deletes.sql b/sql/pathman_rebuild_deletes.sql new file mode 100644 index 00000000..f14bce5a --- /dev/null +++ b/sql/pathman_rebuild_deletes.sql @@ -0,0 +1,57 @@ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_deletes; + + +/* + * Test DELETEs on a partition with different TupleDescriptor. + */ + +/* create partitioned table */ +CREATE TABLE test_deletes.test(a FLOAT4, val INT4 NOT NULL, b FLOAT8); +INSERT INTO test_deletes.test SELECT i, i, i FROM generate_series(1, 100) AS i; +SELECT create_range_partitions('test_deletes.test', 'val', 1, 10); + +/* drop column 'a' */ +ALTER TABLE test_deletes.test DROP COLUMN a; + +/* append new partition */ +SELECT append_range_partition('test_deletes.test'); +INSERT INTO test_deletes.test_11 (val, b) VALUES (101, 10); + + +VACUUM ANALYZE; + + +/* tuple descs are the same */ +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test WHERE val = 1; +DELETE FROM test_deletes.test WHERE val = 1 RETURNING *, tableoid::REGCLASS; + + +/* tuple descs are different */ +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test WHERE val = 101; +DELETE FROM test_deletes.test WHERE val = 101 RETURNING *, tableoid::REGCLASS; + +CREATE TABLE test_deletes.test_dummy (val INT4); + +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test +WHERE val = 101 AND val = ANY (TABLE test_deletes.test_dummy) +RETURNING *, tableoid::REGCLASS; + +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test t1 +USING test_deletes.test_dummy t2 +WHERE t1.val = 101 AND t1.val = t2.val +RETURNING t1.*, t1.tableoid::REGCLASS; + +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test +WHERE val = 101 AND test >= (100, 8) +RETURNING *, tableoid::REGCLASS; + +DROP TABLE test_deletes.test_dummy; + + + +DROP SCHEMA test_deletes CASCADE; +DROP EXTENSION pg_pathman; From 05f4bbb2a73c4651d0f35ef3f5f5a661a14bbe57 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 10 Jul 2018 14:41:33 +0300 Subject: [PATCH 288/528] extend pathman_rebuild_updates test suite --- expected/pathman_rebuild_updates.out | 48 +++++++++++++++++++++++++++- sql/pathman_rebuild_updates.sql | 15 +++++++++ 2 files changed, 62 insertions(+), 1 deletion(-) diff --git a/expected/pathman_rebuild_updates.out b/expected/pathman_rebuild_updates.out index f7d59718..79a186ae 100644 --- a/expected/pathman_rebuild_updates.out +++ b/expected/pathman_rebuild_updates.out @@ -103,6 +103,52 @@ RETURNING test; (1 row) DROP TABLE test_updates.test_dummy; +/* cross-partition updates (& different tuple descs) */ +TRUNCATE test_updates.test; +SET pg_pathman.enable_partitionrouter = ON; +SELECT *, (select count(*) from pg_attribute where attrelid = partition) as columns +FROM pathman_partition_list +ORDER BY range_min::int, range_max::int; + parent | partition | parttype | expr | range_min | range_max | columns +-------------------+----------------------+----------+------+-----------+-----------+--------- + test_updates.test | test_updates.test_1 | 2 | val | 1 | 11 | 9 + test_updates.test | test_updates.test_2 | 2 | val | 11 | 21 | 9 + test_updates.test | test_updates.test_3 | 2 | val | 21 | 31 | 9 + test_updates.test | test_updates.test_4 | 2 | val | 31 | 41 | 9 + test_updates.test | test_updates.test_5 | 2 | val | 41 | 51 | 9 + test_updates.test | test_updates.test_6 | 2 | val | 51 | 61 | 9 + test_updates.test | test_updates.test_7 | 2 | val | 61 | 71 | 9 + test_updates.test | test_updates.test_8 | 2 | val | 71 | 81 | 9 + test_updates.test | test_updates.test_9 | 2 | val | 81 | 91 | 9 + test_updates.test | test_updates.test_10 | 2 | val | 91 | 101 | 9 + test_updates.test | test_updates.test_11 | 2 | val | 101 | 111 | 8 +(11 rows) + +INSERT INTO test_updates.test VALUES (105, 105); +UPDATE test_updates.test SET val = 106 WHERE val = 105 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+-----+---------------------- + 106 | 105 | test_updates.test_11 +(1 row) + +UPDATE test_updates.test SET val = 115 WHERE val = 106 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+-----+---------------------- + 115 | 105 | test_updates.test_12 +(1 row) + +UPDATE test_updates.test SET val = 95 WHERE val = 115 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+-----+---------------------- + 95 | 105 | test_updates.test_10 +(1 row) + +UPDATE test_updates.test SET val = -1 WHERE val = 95 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+-----+---------------------- + -1 | 105 | test_updates.test_13 +(1 row) + DROP SCHEMA test_updates CASCADE; -NOTICE: drop cascades to 13 other objects +NOTICE: drop cascades to 15 other objects DROP EXTENSION pg_pathman; diff --git a/sql/pathman_rebuild_updates.sql b/sql/pathman_rebuild_updates.sql index 41d168df..3144a416 100644 --- a/sql/pathman_rebuild_updates.sql +++ b/sql/pathman_rebuild_updates.sql @@ -57,6 +57,21 @@ RETURNING test; DROP TABLE test_updates.test_dummy; +/* cross-partition updates (& different tuple descs) */ +TRUNCATE test_updates.test; +SET pg_pathman.enable_partitionrouter = ON; + +SELECT *, (select count(*) from pg_attribute where attrelid = partition) as columns +FROM pathman_partition_list +ORDER BY range_min::int, range_max::int; + +INSERT INTO test_updates.test VALUES (105, 105); +UPDATE test_updates.test SET val = 106 WHERE val = 105 RETURNING *, tableoid::REGCLASS; +UPDATE test_updates.test SET val = 115 WHERE val = 106 RETURNING *, tableoid::REGCLASS; +UPDATE test_updates.test SET val = 95 WHERE val = 115 RETURNING *, tableoid::REGCLASS; +UPDATE test_updates.test SET val = -1 WHERE val = 95 RETURNING *, tableoid::REGCLASS; + + DROP SCHEMA test_updates CASCADE; DROP EXTENSION pg_pathman; From 54a589c4f1b876bbcbbbc92eae4442445dfc786a Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 10 Jul 2018 14:57:44 +0300 Subject: [PATCH 289/528] remove obsolete test runner --- travis/pg-travis-test.sh | 141 --------------------------------------- 1 file changed, 141 deletions(-) delete mode 100755 travis/pg-travis-test.sh diff --git a/travis/pg-travis-test.sh b/travis/pg-travis-test.sh deleted file mode 100755 index 97fa5ea9..00000000 --- a/travis/pg-travis-test.sh +++ /dev/null @@ -1,141 +0,0 @@ -#!/bin/bash - -set -eux - -sudo apt-get update - - -# required packages -apt_packages="postgresql-$PG_VER postgresql-server-dev-$PG_VER postgresql-common python-pip python-dev build-essential" -pip_packages="testgres" - -# exit code -status=0 - -# pg_config path -pg_ctl_path=/usr/lib/postgresql/$PG_VER/bin/pg_ctl -initdb_path=/usr/lib/postgresql/$PG_VER/bin/initdb -config_path=/usr/lib/postgresql/$PG_VER/bin/pg_config - - -# bug: http://www.postgresql.org/message-id/20130508192711.GA9243@msgid.df7cb.de -sudo update-alternatives --remove-all postmaster.1.gz - -# stop all existing instances (because of https://github.com/travis-ci/travis-cookbooks/pull/221) -sudo service postgresql stop -# ... and make sure they don't come back -echo 'exit 0' | sudo tee /etc/init.d/postgresql -sudo chmod a+x /etc/init.d/postgresql - -# install required packages -sudo apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -y install -qq $apt_packages - - -# perform code analysis if necessary -if [ $CHECK_CODE = "true" ]; then - - if [ "$CC" = "clang" ]; then - sudo apt-get -y install -qq clang-$LLVM_VER - - scan-build-$LLVM_VER --status-bugs make USE_PGXS=1 PG_CONFIG=$config_path || status=$? - exit $status - - elif [ "$CC" = "gcc" ]; then - sudo apt-get -y install -qq cppcheck - - cppcheck --template "{file} ({line}): {severity} ({id}): {message}" \ - --enable=warning,portability,performance \ - --suppress=redundantAssignment \ - --suppress=uselessAssignmentPtrArg \ - --suppress=incorrectStringBooleanError \ - --std=c89 src/*.c src/*.h 2> cppcheck.log - - if [ -s cppcheck.log ]; then - cat cppcheck.log - status=1 # error - fi - - exit $status - fi - - # don't forget to "make clean" - make clean USE_PGXS=1 PG_CONFIG=$config_path -fi - - -# create cluster 'test' -CLUSTER_PATH=$(pwd)/test_cluster -$initdb_path -D $CLUSTER_PATH -U $USER -A trust - -# build pg_pathman (using CFLAGS_SL for gcov) -make USE_PGXS=1 PG_CONFIG=$config_path CFLAGS_SL="$($config_path --cflags_sl) -coverage" -sudo make install USE_PGXS=1 PG_CONFIG=$config_path - -# check build -status=$? -if [ $status -ne 0 ]; then exit $status; fi - -# set permission to write postgres locks -sudo chown $USER /var/run/postgresql/ - -# add pg_pathman to shared_preload_libraries and restart cluster 'test' -echo "shared_preload_libraries = 'pg_pathman'" >> $CLUSTER_PATH/postgresql.conf -echo "port = 55435" >> $CLUSTER_PATH/postgresql.conf -$pg_ctl_path -D $CLUSTER_PATH start -l postgres.log -w - -# run regression tests -PGPORT=55435 PGUSER=$USER PG_CONFIG=$config_path make installcheck USE_PGXS=1 || status=$? - -# show diff if it exists -if test -f regression.diffs; then cat regression.diffs; fi - - -set +u - -# create virtual environment and activate it -virtualenv /tmp/envs/pg_pathman --python=python3 -source /tmp/envs/pg_pathman/bin/activate -type python -type pip - -# install pip packages -pip install $pip_packages - -# run python tests -make USE_PGXS=1 PG_CONFIG=$config_path python_tests || status=$? - -# deactivate virtual environment -deactivate - -set -u - - -# install cmake for cmocka -sudo apt-get -y install -qq cmake - -# build & install cmocka -CMOCKA_VER=1.1.1 -cd tests/cmocka -tar xf cmocka-$CMOCKA_VER.tar.xz -cd cmocka-$CMOCKA_VER -mkdir build && cd build -cmake .. -make && sudo make install -cd ../../../.. - -# export path to libcmocka.so -LD_LIBRARY_PATH=/usr/local/lib -export LD_LIBRARY_PATH - -# run cmocka tests (using CFLAGS_SL for gcov) -make USE_PGXS=1 PG_CONFIG=$config_path PG_CPPFLAGS="-coverage" cmocka_tests || status=$? - -# remove useless gcov files -rm -f tests/cmocka/*.gcno -rm -f tests/cmocka/*.gcda - -#generate *.gcov files -gcov src/*.c src/compat/*.c src/include/*.h src/include/compat/*.h - - -exit $status From dff70cc7e71ace4d2b1019c016267fc516c12867 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 10 Jul 2018 16:09:54 +0300 Subject: [PATCH 290/528] move get_pathman_schema() to its siblings --- src/include/pathman.h | 1 + src/include/utils.h | 1 - src/pg_pathman.c | 51 +++++++++++++++++++++++++++++++++++++++++++ src/relation_info.c | 2 -- src/utils.c | 50 +----------------------------------------- 5 files changed, 53 insertions(+), 52 deletions(-) diff --git a/src/include/pathman.h b/src/include/pathman.h index d1ebb583..b5f9a156 100644 --- a/src/include/pathman.h +++ b/src/include/pathman.h @@ -100,6 +100,7 @@ extern Oid pathman_config_params_relid; */ Oid get_pathman_config_relid(bool invalid_is_ok); Oid get_pathman_config_params_relid(bool invalid_is_ok); +Oid get_pathman_schema(void); /* diff --git a/src/include/utils.h b/src/include/utils.h index 0697b923..1e0b87a4 100644 --- a/src/include/utils.h +++ b/src/include/utils.h @@ -28,7 +28,6 @@ bool match_expr_to_operand(const Node *expr, const Node *operand); /* * Misc. */ -Oid get_pathman_schema(void); List *list_reverse(List *l); /* diff --git a/src/pg_pathman.c b/src/pg_pathman.c index a3ff2c7f..b9e4a6a4 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -22,8 +22,13 @@ #include "runtime_merge_append.h" #include "postgres.h" +#include "access/htup_details.h" #include "access/sysattr.h" +#include "access/xact.h" +#include "catalog/indexing.h" #include "catalog/pg_type.h" +#include "catalog/pg_extension.h" +#include "commands/extension.h" #include "foreign/fdwapi.h" #include "miscadmin.h" #include "optimizer/clauses.h" @@ -31,6 +36,7 @@ #include "optimizer/restrictinfo.h" #include "optimizer/cost.h" #include "utils/datum.h" +#include "utils/fmgroids.h" #include "utils/rel.h" #include "utils/lsyscache.h" #include "utils/syscache.h" @@ -354,6 +360,51 @@ get_pathman_config_params_relid(bool invalid_is_ok) return pathman_config_params_relid; } +/* + * Return pg_pathman schema's Oid or InvalidOid if that's not possible. + */ +Oid +get_pathman_schema(void) +{ + Oid result; + Relation rel; + SysScanDesc scandesc; + HeapTuple tuple; + ScanKeyData entry[1]; + Oid ext_oid; + + /* It's impossible to fetch pg_pathman's schema now */ + if (!IsTransactionState()) + return InvalidOid; + + ext_oid = get_extension_oid("pg_pathman", true); + if (ext_oid == InvalidOid) + return InvalidOid; /* exit if pg_pathman does not exist */ + + ScanKeyInit(&entry[0], + ObjectIdAttributeNumber, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(ext_oid)); + + rel = heap_open(ExtensionRelationId, AccessShareLock); + scandesc = systable_beginscan(rel, ExtensionOidIndexId, true, + NULL, 1, entry); + + tuple = systable_getnext(scandesc); + + /* We assume that there can be at most one matching tuple */ + if (HeapTupleIsValid(tuple)) + result = ((Form_pg_extension) GETSTRUCT(tuple))->extnamespace; + else + result = InvalidOid; + + systable_endscan(scandesc); + + heap_close(rel, AccessShareLock); + + return result; +} + /* diff --git a/src/relation_info.c b/src/relation_info.c index 449636a7..999608ec 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -1571,7 +1571,5 @@ finish_delayed_invalidation(void) return; } } - - } } diff --git a/src/utils.c b/src/utils.c index 05f68acf..cbec24c8 100644 --- a/src/utils.c +++ b/src/utils.c @@ -16,18 +16,15 @@ #include "access/htup_details.h" #include "access/nbtree.h" #include "access/sysattr.h" -#include "access/xact.h" -#include "catalog/indexing.h" #include "catalog/namespace.h" #include "catalog/pg_class.h" -#include "catalog/pg_extension.h" #include "catalog/pg_operator.h" #include "catalog/pg_type.h" -#include "commands/extension.h" #include "miscadmin.h" #include "nodes/nodeFuncs.h" #include "parser/parse_coerce.h" #include "parser/parse_oper.h" +#include "utils/array.h" #include "utils/builtins.h" #include "utils/fmgroids.h" #include "utils/lsyscache.h" @@ -136,51 +133,6 @@ match_expr_to_operand(const Node *expr, const Node *operand) } -/* - * Return pg_pathman schema's Oid or InvalidOid if that's not possible. - */ -Oid -get_pathman_schema(void) -{ - Oid result; - Relation rel; - SysScanDesc scandesc; - HeapTuple tuple; - ScanKeyData entry[1]; - Oid ext_oid; - - /* It's impossible to fetch pg_pathman's schema now */ - if (!IsTransactionState()) - return InvalidOid; - - ext_oid = get_extension_oid("pg_pathman", true); - if (ext_oid == InvalidOid) - return InvalidOid; /* exit if pg_pathman does not exist */ - - ScanKeyInit(&entry[0], - ObjectIdAttributeNumber, - BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(ext_oid)); - - rel = heap_open(ExtensionRelationId, AccessShareLock); - scandesc = systable_beginscan(rel, ExtensionOidIndexId, true, - NULL, 1, entry); - - tuple = systable_getnext(scandesc); - - /* We assume that there can be at most one matching tuple */ - if (HeapTupleIsValid(tuple)) - result = ((Form_pg_extension) GETSTRUCT(tuple))->extnamespace; - else - result = InvalidOid; - - systable_endscan(scandesc); - - heap_close(rel, AccessShareLock); - - return result; -} - List * list_reverse(List *l) { From b3eac64837654bec0fd52b71c776a54a67cfcb8d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 10 Jul 2018 18:24:05 +0300 Subject: [PATCH 291/528] fix builds on 9.5 and 9.6 --- src/utils.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/utils.c b/src/utils.c index cbec24c8..ddf10bae 100644 --- a/src/utils.c +++ b/src/utils.c @@ -26,6 +26,7 @@ #include "parser/parse_oper.h" #include "utils/array.h" #include "utils/builtins.h" +#include "utils/datetime.h" #include "utils/fmgroids.h" #include "utils/lsyscache.h" #include "utils/syscache.h" From b609c5f1be07c6fac93cf1f6d9243f3d9232bba6 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 10 Jul 2018 18:33:06 +0300 Subject: [PATCH 292/528] Fix nasty bug in select_partition_for_insert(). Many thanks to @arssher. --- src/include/partition_filter.h | 2 +- src/partition_filter.c | 22 ++++++++++++++-------- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index 0940a59f..3a3e848e 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -167,7 +167,7 @@ void fini_result_parts_storage(ResultPartsStorage *parts_storage); ResultRelInfoHolder * scan_result_parts_storage(ResultPartsStorage *storage, Oid partid); /* Refresh PartRelationInfo in storage */ -void refresh_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid); +PartRelationInfo * refresh_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid); TupleConversionMap * build_part_tuple_map(Relation parent_rel, Relation child_rel); diff --git a/src/partition_filter.c b/src/partition_filter.c index 65107759..4d588914 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -381,7 +381,7 @@ scan_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid) } /* Refresh PartRelationInfo for the partition in storage */ -void +PartRelationInfo * refresh_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid) { if (partid == PrelParentRelid(parts_storage->prel)) @@ -389,6 +389,8 @@ refresh_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid) close_pathman_relation_info(parts_storage->prel); parts_storage->prel = get_pathman_relation_info(partid); shout_if_prel_is_invalid(partid, parts_storage->prel, PT_ANY); + + return parts_storage->prel; } else { @@ -398,12 +400,14 @@ refresh_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid) (const void *) &partid, HASH_FIND, NULL); - if (rri_holder && rri_holder->prel) - { - close_pathman_relation_info(rri_holder->prel); - rri_holder->prel = get_pathman_relation_info(partid); - shout_if_prel_is_invalid(partid, rri_holder->prel, PT_ANY); - } + /* We must have entry (since we got 'prel' from it) */ + Assert(rri_holder && rri_holder->prel); + + close_pathman_relation_info(rri_holder->prel); + rri_holder->prel = get_pathman_relation_info(partid); + shout_if_prel_is_invalid(partid, rri_holder->prel, PT_ANY); + + return rri_holder->prel; } } @@ -543,7 +547,7 @@ select_partition_for_insert(ResultPartsStorage *parts_storage, if ((nparts == 0 || result == NULL) && !PrelIsFresh(prel)) { /* Try building a new 'prel' for this relation */ - refresh_result_parts_storage(parts_storage, parent_relid); + prel = refresh_result_parts_storage(parts_storage, parent_relid); } /* This partition is a parent itself */ @@ -557,6 +561,8 @@ select_partition_for_insert(ResultPartsStorage *parts_storage, /* Repeat with a new dispatch */ result = NULL; } + + Assert(prel); } /* Loop until we get some result */ while (result == NULL); From d0d128d79b5cb499a679f5a212ef99cbb75e83ee Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 11 Jul 2018 18:02:39 +0300 Subject: [PATCH 293/528] change update test suite --- src/partition_filter.c | 1 + ...thman_objects => dump_pathman_objects.sql} | 17 +------- tests/update/get_sql_diff | 39 +++++++++++++++++++ 3 files changed, 41 insertions(+), 16 deletions(-) rename tests/update/{dump_pathman_objects => dump_pathman_objects.sql} (68%) mode change 100755 => 100644 create mode 100755 tests/update/get_sql_diff diff --git a/src/partition_filter.c b/src/partition_filter.c index 4d588914..1e5c2db1 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -1173,6 +1173,7 @@ append_rri_to_estate(EState *estate, ResultRelInfo *rri) { ResultRelInfo *rri_array = estate->es_result_relations; + /* HACK: we can't repalloc or free previous array (there might be users) */ result_rels_allocated = result_rels_allocated * ALLOC_EXP + 1; estate->es_result_relations = palloc(result_rels_allocated * sizeof(ResultRelInfo)); diff --git a/tests/update/dump_pathman_objects b/tests/update/dump_pathman_objects.sql old mode 100755 new mode 100644 similarity index 68% rename from tests/update/dump_pathman_objects rename to tests/update/dump_pathman_objects.sql index fff1ed17..e1a632ca --- a/tests/update/dump_pathman_objects +++ b/tests/update/dump_pathman_objects.sql @@ -1,17 +1,4 @@ -#!/usr/bin/bash - - -rndstr=$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 13 ; echo '') -bindir=$($PG_CONFIG --bindir) -dbname=$1 -flname=pathman_objects_$rndstr.txt - -# show file name -echo $flname - -$bindir/psql $dbname << EOF - -\o $flname +CREATE EXTENSION IF NOT EXISTS pg_pathman; SELECT pg_get_functiondef(objid) FROM pg_catalog.pg_depend JOIN pg_proc ON pg_proc.oid = pg_depend.objid @@ -27,5 +14,3 @@ ORDER BY objid::regprocedure::TEXT ASC; \d+ pathman_partition_list \d+ pathman_cache_stats \d+ pathman_concurrent_part_tasks - -EOF diff --git a/tests/update/get_sql_diff b/tests/update/get_sql_diff new file mode 100755 index 00000000..876717a8 --- /dev/null +++ b/tests/update/get_sql_diff @@ -0,0 +1,39 @@ +#!/usr/bin/env bash + +PG_VER=$1 +WORK_DIR=/tmp/pg_pathman +BRANCH_1=$2 +BRANCH_2=$3 + + +if [ -z "$PG_VER" ]; then + PG_VER=10 +fi + +if [ -z "$BRANCH_1" ]; then + BRANCH_1=master +fi + +if [ -z "$BRANCH_1" ]; then + BRANCH_2=$(git tag | sort -V | tail -1) +fi + + +printf "PG:\\t$PG_VER\\n" +printf "BRANCH_1:\\t$BRANCH_1\\n" +printf "BRANCH_2:\\t$BRANCH_2\\n" + + +cp -R "$(dirname $0)" "$WORK_DIR" + +git checkout "$BRANCH_1" + +norsu pgxs "$PG_VER" -- clean install +norsu run "$PG_VER" --pgxs --psql < "$WORK_DIR"/dump_pathman_objects.sql > "$WORK_DIR"/dump_1 + +git checkout "$BRANCH_2" + +norsu pgxs "$PG_VER" -- clean install +norsu run "$PG_VER" --pgxs --psql < "$WORK_DIR"/dump_pathman_objects.sql > "$WORK_DIR"/dump_2 + +diff -u "$WORK_DIR"/dump_1 "$WORK_DIR"/dump_2 > "$WORK_DIR"/diff From 0dc040b6df6adefdaeed93f9233d217e445a9494 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 12 Jul 2018 13:41:26 +0300 Subject: [PATCH 294/528] fix evaluation of expressions with PARAMs in handle_modification_query() --- expected/pathman_param_upd_del.out | 58 ++++++++++++++++++++++++++++++ sql/pathman_param_upd_del.sql | 11 ++++++ src/planner_tree_modification.c | 13 +++---- 3 files changed, 76 insertions(+), 6 deletions(-) diff --git a/expected/pathman_param_upd_del.out b/expected/pathman_param_upd_del.out index 7419ad29..ad935579 100644 --- a/expected/pathman_param_upd_del.out +++ b/expected/pathman_param_upd_del.out @@ -68,6 +68,64 @@ EXPLAIN (COSTS OFF) EXECUTE upd(11); Filter: (key = 11) (3 rows) +DEALLOCATE upd; +PREPARE upd(INT4) AS UPDATE param_upd_del.test SET val = val + 1 WHERE key = ($1 + 3) * 2; +EXPLAIN (COSTS OFF) EXECUTE upd(5); + QUERY PLAN +---------------------------- + Update on test_7 + -> Seq Scan on test_7 + Filter: (key = 16) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE upd(5); + QUERY PLAN +---------------------------- + Update on test_7 + -> Seq Scan on test_7 + Filter: (key = 16) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE upd(5); + QUERY PLAN +---------------------------- + Update on test_7 + -> Seq Scan on test_7 + Filter: (key = 16) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE upd(5); + QUERY PLAN +---------------------------- + Update on test_7 + -> Seq Scan on test_7 + Filter: (key = 16) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE upd(5); + QUERY PLAN +---------------------------- + Update on test_7 + -> Seq Scan on test_7 + Filter: (key = 16) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE upd(5); + QUERY PLAN +---------------------------- + Update on test_7 + -> Seq Scan on test_7 + Filter: (key = 16) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE upd(6); + QUERY PLAN +---------------------------- + Update on test_3 + -> Seq Scan on test_3 + Filter: (key = 18) +(3 rows) + DEALLOCATE upd; PREPARE del(INT4) AS DELETE FROM param_upd_del.test WHERE key = $1; EXPLAIN (COSTS OFF) EXECUTE del(10); diff --git a/sql/pathman_param_upd_del.sql b/sql/pathman_param_upd_del.sql index 98be1179..f4e42a41 100644 --- a/sql/pathman_param_upd_del.sql +++ b/sql/pathman_param_upd_del.sql @@ -23,6 +23,17 @@ EXPLAIN (COSTS OFF) EXECUTE upd(11); DEALLOCATE upd; +PREPARE upd(INT4) AS UPDATE param_upd_del.test SET val = val + 1 WHERE key = ($1 + 3) * 2; +EXPLAIN (COSTS OFF) EXECUTE upd(5); +EXPLAIN (COSTS OFF) EXECUTE upd(5); +EXPLAIN (COSTS OFF) EXECUTE upd(5); +EXPLAIN (COSTS OFF) EXECUTE upd(5); +EXPLAIN (COSTS OFF) EXECUTE upd(5); +EXPLAIN (COSTS OFF) EXECUTE upd(5); +EXPLAIN (COSTS OFF) EXECUTE upd(6); +DEALLOCATE upd; + + PREPARE del(INT4) AS DELETE FROM param_upd_del.test WHERE key = $1; EXPLAIN (COSTS OFF) EXECUTE del(10); EXPLAIN (COSTS OFF) EXECUTE del(10); diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index e5dcfe2c..35473a75 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -374,8 +374,8 @@ static void handle_modification_query(Query *parse, transform_query_cxt *context) { RangeTblEntry *rte; - Expr *quals; Oid child; + Node *quals = parse->jointree->quals; Index result_rti = parse->resultRelation; ParamListInfo params = context->query_params; @@ -390,14 +390,15 @@ handle_modification_query(Query *parse, transform_query_cxt *context) if (!rte->inh) return; - quals = (Expr *) eval_const_expressions(NULL, parse->jointree->quals); - /* Check if we can replace PARAMs with CONSTs */ - if (params && clause_contains_params((Node *) quals)) - quals = (Expr *) eval_extern_params_mutator((Node *) quals, params); + if (params && clause_contains_params(quals)) + quals = eval_extern_params_mutator(quals, params); + + /* Evaluate constaint expressions */ + quals = eval_const_expressions(NULL, quals); /* Parse syntax tree and extract deepest partition if possible */ - child = find_deepest_partition(rte->relid, result_rti, quals); + child = find_deepest_partition(rte->relid, result_rti, (Expr *) quals); /* Substitute parent table with partition */ if (OidIsValid(child)) From adfaa72a1cf91e61056006f00ce599cf37be730a Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 12 Jul 2018 14:15:08 +0300 Subject: [PATCH 295/528] attempt to fix tests (also disable Valgrind) --- .travis.yml | 2 -- Dockerfile.tmpl | 2 +- tests/python/partitioning_test.py | 1 - 3 files changed, 1 insertion(+), 4 deletions(-) diff --git a/.travis.yml b/.travis.yml index 5b1732a6..db2eebc9 100644 --- a/.travis.yml +++ b/.travis.yml @@ -18,10 +18,8 @@ notifications: on_failure: always env: - - PG_VERSION=10 LEVEL=nightmare - PG_VERSION=10 LEVEL=hardcore - PG_VERSION=10 - - PG_VERSION=9.6 LEVEL=nightmare - PG_VERSION=9.6 LEVEL=hardcore - PG_VERSION=9.6 - PG_VERSION=9.5 LEVEL=hardcore diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index 021a2850..85b159cf 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -5,7 +5,7 @@ RUN apk add --no-cache \ openssl curl \ cmocka-dev \ perl perl-ipc-run \ - python3 python3-dev py-virtualenv \ + python3 python3-dev py3-virtualenv \ coreutils linux-headers \ make musl-dev gcc bison flex \ zlib-dev libedit-dev \ diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 12475b9e..3b889405 100644 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -19,7 +19,6 @@ from distutils.version import LooseVersion from testgres import get_new_node, get_pg_version -from testgres.utils import pg_version_ge # set setup base logging config, it can be turned on by `use_logging` # parameter on node setup From 389c8076b0c403ac1d0a027f3823dac559136525 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Thu, 5 Jul 2018 17:41:16 +0300 Subject: [PATCH 296/528] Adapt pg_pathman for PG 11. I kinda lost interest to exorcise a couple of tests further in attempts to make them pass on all supported versions and just added copies. These are * pathman_expressions now differs because planner converts ROW(Const, Const) to just Const of record type. * Same with pathman_rebuild_updates. I have removed inclusion of partition_filter.h in pg_compat.h in 9.5 as it created circular dependency hell. I think it is not worthwhile to fight with it since the only thing actually needed was error message, which is used in this single place. Small typo fix in partitioning_test.py: con2.begin instead of con1.begin. Finally, run python tests with --failfast and --verbose options. --- expected/pathman_expressions.out | 36 +- expected/pathman_expressions_1.out | 436 +++++++++++++++++++++++++ expected/pathman_permissions.out | 32 +- expected/pathman_rebuild_updates.out | 12 +- expected/pathman_rebuild_updates_1.out | 114 +++++++ sql/pathman_expressions.sql | 7 + sql/pathman_permissions.sql | 28 +- sql/pathman_rebuild_updates.sql | 7 + src/compat/pg_compat.c | 9 + src/hooks.c | 15 +- src/include/compat/pg_compat.h | 110 +++++-- src/include/hooks.h | 2 +- src/include/partition_filter.h | 2 +- src/include/relation_info.h | 4 +- src/init.c | 2 + src/nodes_common.c | 6 +- src/partition_filter.c | 8 +- src/partition_router.c | 12 +- src/pathman_workers.c | 8 +- src/pl_funcs.c | 2 + src/pl_range_funcs.c | 2 + src/planner_tree_modification.c | 15 +- src/relation_info.c | 11 +- src/utility_stmt_hooking.c | 4 +- tests/python/Makefile | 2 +- tests/python/partitioning_test.py | 2 +- 26 files changed, 799 insertions(+), 89 deletions(-) create mode 100644 expected/pathman_expressions_1.out create mode 100644 expected/pathman_rebuild_updates_1.out diff --git a/expected/pathman_expressions.out b/expected/pathman_expressions.out index 685ca2d3..66f931e3 100644 --- a/expected/pathman_expressions.out +++ b/expected/pathman_expressions.out @@ -1,3 +1,9 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + */ \set VERBOSITY terse SET search_path = 'public'; CREATE EXTENSION pg_pathman; @@ -160,42 +166,38 @@ SELECT *, tableoid::REGCLASS FROM test_exprs.composite; (3 rows) EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b)::test_exprs.composite < (21, 0)::test_exprs.composite; - QUERY PLAN ----------------------------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------------------ Append -> Seq Scan on composite_1 -> Seq Scan on composite_2 -> Seq Scan on composite_3 - Filter: (ROW(a, b)::test_exprs.composite < ROW(21, '0'::text)::test_exprs.composite) + Filter: (ROW(a, b)::test_exprs.composite < '(21,0)'::test_exprs.composite) (5 rows) EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b) < (21, 0)::test_exprs.composite; - QUERY PLAN ------------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------- Append -> Seq Scan on composite_1 - Filter: (ROW(a, b) < ROW(21, '0'::text)::test_exprs.composite) + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) -> Seq Scan on composite_2 - Filter: (ROW(a, b) < ROW(21, '0'::text)::test_exprs.composite) + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) -> Seq Scan on composite_3 - Filter: (ROW(a, b) < ROW(21, '0'::text)::test_exprs.composite) + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) -> Seq Scan on composite_4 - Filter: (ROW(a, b) < ROW(21, '0'::text)::test_exprs.composite) + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) (9 rows) EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b)::test_exprs.composite < (21, 0); - QUERY PLAN ----------------------------------------------------------------- + QUERY PLAN +---------------------------------------------------------------------- Append -> Seq Scan on composite_1 - Filter: (ROW(a, b)::test_exprs.composite < ROW(21, 0)) -> Seq Scan on composite_2 - Filter: (ROW(a, b)::test_exprs.composite < ROW(21, 0)) -> Seq Scan on composite_3 - Filter: (ROW(a, b)::test_exprs.composite < ROW(21, 0)) - -> Seq Scan on composite_4 - Filter: (ROW(a, b)::test_exprs.composite < ROW(21, 0)) -(9 rows) + Filter: (ROW(a, b)::test_exprs.composite < '(21,0)'::record) +(5 rows) DROP TABLE test_exprs.composite CASCADE; NOTICE: drop cascades to 5 other objects diff --git a/expected/pathman_expressions_1.out b/expected/pathman_expressions_1.out new file mode 100644 index 00000000..893bcd21 --- /dev/null +++ b/expected/pathman_expressions_1.out @@ -0,0 +1,436 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_exprs; +/* + * Test partitioning expression canonicalization process + */ +CREATE TABLE test_exprs.canon(c JSONB NOT NULL); +SELECT create_range_partitions('test_exprs.canon', '(C->>''key'')::int8', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT expr FROM pathman_config; /* check expression */ + expr +------------------------------- + ((c ->> 'key'::text))::bigint +(1 row) + +INSERT INTO test_exprs.canon VALUES ('{ "key": 2, "value": 0 }'); +SELECT *, tableoid::REGCLASS FROM test_exprs.canon; + c | tableoid +------------------------+-------------------- + {"key": 2, "value": 0} | test_exprs.canon_1 +(1 row) + +DROP TABLE test_exprs.canon CASCADE; +NOTICE: drop cascades to 3 other objects +CREATE TABLE test_exprs.canon(val TEXT NOT NULL); +CREATE SEQUENCE test_exprs.canon_seq; +SELECT add_to_pathman_config('test_exprs.canon', 'VAL collate "C"', NULL); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'a'::TEXT, 'b'); + add_range_partition +--------------------- + test_exprs.canon_1 +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'b'::TEXT, 'c'); + add_range_partition +--------------------- + test_exprs.canon_2 +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'c'::TEXT, 'd'); + add_range_partition +--------------------- + test_exprs.canon_3 +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'd'::TEXT, 'e'); + add_range_partition +--------------------- + test_exprs.canon_4 +(1 row) + +SELECT expr FROM pathman_config; /* check expression */ + expr +------------------- + (val COLLATE "C") +(1 row) + +INSERT INTO test_exprs.canon VALUES ('b'); +SELECT *, tableoid::REGCLASS FROM test_exprs.canon; + val | tableoid +-----+-------------------- + b | test_exprs.canon_2 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.canon WHERE val COLLATE "C" < ALL (array['b', 'c']); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on canon_1 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.canon WHERE val COLLATE "POSIX" < ALL (array['b', 'c']); + QUERY PLAN +----------------------------------------------------------- + Append + -> Seq Scan on canon_1 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on canon_2 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on canon_3 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on canon_4 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") +(9 rows) + +DROP TABLE test_exprs.canon CASCADE; +NOTICE: drop cascades to 5 other objects +/* + * Test composite key. + */ +CREATE TABLE test_exprs.composite(a INT4 NOT NULL, b TEXT NOT NULL); +CREATE SEQUENCE test_exprs.composite_seq; +SELECT add_to_pathman_config('test_exprs.composite', + '(a, b)::test_exprs.composite', + NULL); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(1,a)'::test_exprs.composite, + '(10,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_1 +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(10,a)'::test_exprs.composite, + '(20,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_2 +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(20,a)'::test_exprs.composite, + '(30,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_3 +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(30,a)'::test_exprs.composite, + '(40,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_4 +(1 row) + +SELECT expr FROM pathman_config; /* check expression */ + expr +--------------------------------- + ROW(a, b)::test_exprs.composite +(1 row) + +INSERT INTO test_exprs.composite VALUES(2, 'a'); +INSERT INTO test_exprs.composite VALUES(11, 'a'); +INSERT INTO test_exprs.composite VALUES(2, 'b'); +INSERT INTO test_exprs.composite VALUES(50, 'b'); +ERROR: cannot spawn new partition for key '(50,b)' +SELECT *, tableoid::REGCLASS FROM test_exprs.composite; + a | b | tableoid +----+---+------------------------ + 2 | a | test_exprs.composite_1 + 2 | b | test_exprs.composite_1 + 11 | a | test_exprs.composite_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b)::test_exprs.composite < (21, 0)::test_exprs.composite; + QUERY PLAN +---------------------------------------------------------------------------------------------- + Append + -> Seq Scan on composite_1 + -> Seq Scan on composite_2 + -> Seq Scan on composite_3 + Filter: (ROW(a, b)::test_exprs.composite < ROW(21, '0'::text)::test_exprs.composite) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b) < (21, 0)::test_exprs.composite; + QUERY PLAN +------------------------------------------------------------------------ + Append + -> Seq Scan on composite_1 + Filter: (ROW(a, b) < ROW(21, '0'::text)::test_exprs.composite) + -> Seq Scan on composite_2 + Filter: (ROW(a, b) < ROW(21, '0'::text)::test_exprs.composite) + -> Seq Scan on composite_3 + Filter: (ROW(a, b) < ROW(21, '0'::text)::test_exprs.composite) + -> Seq Scan on composite_4 + Filter: (ROW(a, b) < ROW(21, '0'::text)::test_exprs.composite) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b)::test_exprs.composite < (21, 0); + QUERY PLAN +---------------------------------------------------------------- + Append + -> Seq Scan on composite_1 + Filter: (ROW(a, b)::test_exprs.composite < ROW(21, 0)) + -> Seq Scan on composite_2 + Filter: (ROW(a, b)::test_exprs.composite < ROW(21, 0)) + -> Seq Scan on composite_3 + Filter: (ROW(a, b)::test_exprs.composite < ROW(21, 0)) + -> Seq Scan on composite_4 + Filter: (ROW(a, b)::test_exprs.composite < ROW(21, 0)) +(9 rows) + +DROP TABLE test_exprs.composite CASCADE; +NOTICE: drop cascades to 5 other objects +/* We use this rel to check 'pathman_hooks_enabled' */ +CREATE TABLE test_exprs.canary(val INT4 NOT NULL); +CREATE TABLE test_exprs.canary_copy (LIKE test_exprs.canary); +SELECT create_hash_partitions('test_exprs.canary', 'val', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +/* + * Test HASH + */ +CREATE TABLE test_exprs.hash_rel ( + id SERIAL PRIMARY KEY, + value INTEGER NOT NULL, + value2 INTEGER NOT NULL +); +INSERT INTO test_exprs.hash_rel (value, value2) + SELECT val, val * 2 FROM generate_series(1, 5) val; +SELECT COUNT(*) FROM test_exprs.hash_rel; + count +------- + 5 +(1 row) + +\set VERBOSITY default +/* Try using constant expression */ +SELECT create_hash_partitions('test_exprs.hash_rel', '1 + 1', 4); +ERROR: failed to analyze partitioning expression "1 + 1" +DETAIL: partitioning expression should reference table "hash_rel" +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using system attributes */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'xmin', 4); +ERROR: failed to analyze partitioning expression "xmin" +DETAIL: system attributes are not supported +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using subqueries */ +SELECT create_hash_partitions('test_exprs.hash_rel', + 'value, (select oid from pg_class limit 1)', + 4); +ERROR: failed to analyze partitioning expression "value, (select oid from pg_class limit 1)" +DETAIL: subqueries are not allowed in partitioning expression +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using mutable expression */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'random()', 4); +ERROR: failed to analyze partitioning expression "random()" +DETAIL: functions in partitioning expression must be marked IMMUTABLE +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using broken parentheses */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2))', 4); +ERROR: failed to parse partitioning expression "value * value2))" +DETAIL: syntax error at or near ")" +QUERY: SELECT public.validate_expression(parent_relid, expression) +CONTEXT: PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using missing columns */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value3', 4); +ERROR: failed to analyze partitioning expression "value * value3" +DETAIL: column "value3" does not exist +HINT: Perhaps you meant to reference the column "hash_rel.value" or the column "hash_rel.value2". +QUERY: SELECT public.validate_expression(parent_relid, expression) +CONTEXT: PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ +EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy +SELECT * FROM test_exprs.canary WHERE val = 1; + QUERY PLAN +---------------------------------- + Insert on canary_copy + -> Append + -> Seq Scan on canary_0 + Filter: (val = 1) +(4 rows) + +\set VERBOSITY terse +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +SELECT COUNT(*) FROM ONLY test_exprs.hash_rel; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM test_exprs.hash_rel; + count +------- + 5 +(1 row) + +INSERT INTO test_exprs.hash_rel (value, value2) + SELECT val, val * 2 FROM generate_series(6, 10) val; +SELECT COUNT(*) FROM ONLY test_exprs.hash_rel; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM test_exprs.hash_rel; + count +------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.hash_rel WHERE value = 5; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_0 + Filter: (value = 5) + -> Seq Scan on hash_rel_1 + Filter: (value = 5) + -> Seq Scan on hash_rel_2 + Filter: (value = 5) + -> Seq Scan on hash_rel_3 + Filter: (value = 5) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.hash_rel WHERE (value * value2) = 5; + QUERY PLAN +---------------------------------------- + Append + -> Seq Scan on hash_rel_0 + Filter: ((value * value2) = 5) +(3 rows) + +/* + * Test RANGE + */ +CREATE TABLE test_exprs.range_rel (id SERIAL PRIMARY KEY, dt TIMESTAMP NOT NULL, txt TEXT); +INSERT INTO test_exprs.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2020-04-30', '1 month'::interval) as g; +\set VERBOSITY default +/* Try using constant expression */ +SELECT create_range_partitions('test_exprs.range_rel', '''16 years''::interval', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); +ERROR: failed to analyze partitioning expression "'16 years'::interval" +DETAIL: partitioning expression should reference table "range_rel" +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM +/* Try using mutable expression */ +SELECT create_range_partitions('test_exprs.range_rel', 'RANDOM()', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); +ERROR: failed to analyze partitioning expression "RANDOM()" +DETAIL: functions in partitioning expression must be marked IMMUTABLE +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM +/* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ +EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy +SELECT * FROM test_exprs.canary WHERE val = 1; + QUERY PLAN +---------------------------------- + Insert on canary_copy + -> Append + -> Seq Scan on canary_0 + Filter: (val = 1) +(4 rows) + +\set VERBOSITY terse +SELECT create_range_partitions('test_exprs.range_rel', 'AGE(dt, ''2000-01-01''::DATE)', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +INSERT INTO test_exprs.range_rel_1 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); +ERROR: new row for relation "range_rel_1" violates check constraint "pathman_range_rel_1_check" +SELECT COUNT(*) FROM test_exprs.range_rel_6; + count +------- + 4 +(1 row) + +INSERT INTO test_exprs.range_rel_6 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); +SELECT COUNT(*) FROM test_exprs.range_rel_6; + count +------- + 5 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.range_rel WHERE (AGE(dt, '2000-01-01'::DATE)) = '18 years'::interval; + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_4 + Filter: (age(dt, 'Sat Jan 01 00:00:00 2000'::timestamp without time zone) = '@ 18 years'::interval) +(3 rows) + +DROP SCHEMA test_exprs CASCADE; +NOTICE: drop cascades to 24 other objects +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_permissions.out b/expected/pathman_permissions.out index e329a9ec..a9e68be4 100644 --- a/expected/pathman_permissions.out +++ b/expected/pathman_permissions.out @@ -12,15 +12,23 @@ CREATE TABLE permissions.user1_table(id serial, a int); INSERT INTO permissions.user1_table SELECT g, g FROM generate_series(1, 20) as g; /* Should fail (can't SELECT) */ SET ROLE user2; -SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); -ERROR: permission denied for relation user1_table +DO $$ +BEGIN + SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); +EXCEPTION + WHEN insufficient_privilege THEN +END$$; /* Grant SELECT to user2 */ SET ROLE user1; GRANT SELECT ON permissions.user1_table TO user2; /* Should fail (don't own parent) */ SET ROLE user2; -SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); -ERROR: only the owner or superuser can change partitioning configuration of table "user1_table" +DO $$ +BEGIN + SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); +EXCEPTION + WHEN insufficient_privilege THEN +END$$; /* Should be ok */ SET ROLE user1; SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); @@ -58,8 +66,12 @@ WHERE partrel = 'permissions.user1_table'::regclass; WARNING: only the owner or superuser can change partitioning configuration of table "user1_table" /* No rights to insert, should fail */ SET ROLE user2; -INSERT INTO permissions.user1_table (id, a) VALUES (35, 0); -ERROR: permission denied for relation user1_table +DO $$ +BEGIN + INSERT INTO permissions.user1_table (id, a) VALUES (35, 0); +EXCEPTION + WHEN insufficient_privilege THEN +END$$; /* No rights to create partitions (need INSERT privilege) */ SET ROLE user2; SELECT prepend_range_partition('permissions.user1_table'); @@ -116,8 +128,12 @@ ORDER BY relname; /* we also check ACL for "user1_table_2" */ (3 rows) /* Try to drop partition, should fail */ -SELECT drop_range_partition('permissions.user1_table_4'); -ERROR: must be owner of relation user1_table_4 +DO $$ +BEGIN + SELECT drop_range_partition('permissions.user1_table_4'); +EXCEPTION + WHEN insufficient_privilege THEN +END$$; /* Disable automatic partition creation */ SET ROLE user1; SELECT set_auto('permissions.user1_table', false); diff --git a/expected/pathman_rebuild_updates.out b/expected/pathman_rebuild_updates.out index 79a186ae..eb078303 100644 --- a/expected/pathman_rebuild_updates.out +++ b/expected/pathman_rebuild_updates.out @@ -1,3 +1,9 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + */ \set VERBOSITY terse SET search_path = 'public'; CREATE EXTENSION pg_pathman; @@ -86,11 +92,11 @@ RETURNING t1.*, t1.tableoid::REGCLASS; EXPLAIN (COSTS OFF) UPDATE test_updates.test SET b = 0 WHERE val = 101 AND test >= (100, 8) RETURNING *, tableoid::REGCLASS; - QUERY PLAN ------------------------------------------------------------------------------------ + QUERY PLAN +----------------------------------------------------------------------------------------- Update on test_11 -> Seq Scan on test_11 - Filter: (((test_11.*)::test_updates.test >= ROW(100, 8)) AND (val = 101)) + Filter: (((test_11.*)::test_updates.test >= '(100,8)'::record) AND (val = 101)) (3 rows) /* execute this one */ diff --git a/expected/pathman_rebuild_updates_1.out b/expected/pathman_rebuild_updates_1.out new file mode 100644 index 00000000..cf0fc1dc --- /dev/null +++ b/expected/pathman_rebuild_updates_1.out @@ -0,0 +1,114 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_updates; +/* + * Test UPDATEs on a partition with different TupleDescriptor. + */ +/* create partitioned table */ +CREATE TABLE test_updates.test(a FLOAT4, val INT4 NOT NULL, b FLOAT8); +INSERT INTO test_updates.test SELECT i, i, i FROM generate_series(1, 100) AS i; +SELECT create_range_partitions('test_updates.test', 'val', 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* drop column 'a' */ +ALTER TABLE test_updates.test DROP COLUMN a; +/* append new partition */ +SELECT append_range_partition('test_updates.test'); + append_range_partition +------------------------ + test_updates.test_11 +(1 row) + +INSERT INTO test_updates.test_11 (val, b) VALUES (101, 10); +VACUUM ANALYZE; +/* tuple descs are the same */ +EXPLAIN (COSTS OFF) UPDATE test_updates.test SET b = 0 WHERE val = 1; + QUERY PLAN +--------------------------- + Update on test_1 + -> Seq Scan on test_1 + Filter: (val = 1) +(3 rows) + +UPDATE test_updates.test SET b = 0 WHERE val = 1 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+---+--------------------- + 1 | 0 | test_updates.test_1 +(1 row) + +/* tuple descs are different */ +EXPLAIN (COSTS OFF) UPDATE test_updates.test SET b = 0 WHERE val = 101; + QUERY PLAN +----------------------------- + Update on test_11 + -> Seq Scan on test_11 + Filter: (val = 101) +(3 rows) + +UPDATE test_updates.test SET b = 0 WHERE val = 101 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+---+---------------------- + 101 | 0 | test_updates.test_11 +(1 row) + +CREATE TABLE test_updates.test_dummy (val INT4); +EXPLAIN (COSTS OFF) UPDATE test_updates.test SET val = val + 1 +WHERE val = 101 AND val = ANY (TABLE test_updates.test_dummy) +RETURNING *, tableoid::REGCLASS; + QUERY PLAN +------------------------------------ + Update on test_11 + -> Nested Loop Semi Join + -> Seq Scan on test_11 + Filter: (val = 101) + -> Seq Scan on test_dummy + Filter: (val = 101) +(6 rows) + +EXPLAIN (COSTS OFF) UPDATE test_updates.test t1 SET b = 0 +FROM test_updates.test_dummy t2 +WHERE t1.val = 101 AND t1.val = t2.val +RETURNING t1.*, t1.tableoid::REGCLASS; + QUERY PLAN +--------------------------------------- + Update on test_11 t1 + -> Nested Loop + -> Seq Scan on test_11 t1 + Filter: (val = 101) + -> Seq Scan on test_dummy t2 + Filter: (val = 101) +(6 rows) + +EXPLAIN (COSTS OFF) UPDATE test_updates.test SET b = 0 +WHERE val = 101 AND test >= (100, 8) +RETURNING *, tableoid::REGCLASS; + QUERY PLAN +----------------------------------------------------------------------------------- + Update on test_11 + -> Seq Scan on test_11 + Filter: (((test_11.*)::test_updates.test >= ROW(100, 8)) AND (val = 101)) +(3 rows) + +/* execute this one */ +UPDATE test_updates.test SET b = 0 +WHERE val = 101 AND test >= (100, -1) +RETURNING test; + test +--------- + (101,0) +(1 row) + +DROP TABLE test_updates.test_dummy; +DROP SCHEMA test_updates CASCADE; +NOTICE: drop cascades to 13 other objects +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_expressions.sql b/sql/pathman_expressions.sql index 46bceafb..6149a0c2 100644 --- a/sql/pathman_expressions.sql +++ b/sql/pathman_expressions.sql @@ -1,3 +1,10 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + */ + \set VERBOSITY terse SET search_path = 'public'; diff --git a/sql/pathman_permissions.sql b/sql/pathman_permissions.sql index 2dd22fc0..5f66a84f 100644 --- a/sql/pathman_permissions.sql +++ b/sql/pathman_permissions.sql @@ -18,7 +18,12 @@ INSERT INTO permissions.user1_table SELECT g, g FROM generate_series(1, 20) as g /* Should fail (can't SELECT) */ SET ROLE user2; -SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); +DO $$ +BEGIN + SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); +EXCEPTION + WHEN insufficient_privilege THEN +END$$; /* Grant SELECT to user2 */ SET ROLE user1; @@ -26,7 +31,12 @@ GRANT SELECT ON permissions.user1_table TO user2; /* Should fail (don't own parent) */ SET ROLE user2; -SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); +DO $$ +BEGIN + SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); +EXCEPTION + WHEN insufficient_privilege THEN +END$$; /* Should be ok */ SET ROLE user1; @@ -49,7 +59,12 @@ WHERE partrel = 'permissions.user1_table'::regclass; /* No rights to insert, should fail */ SET ROLE user2; -INSERT INTO permissions.user1_table (id, a) VALUES (35, 0); +DO $$ +BEGIN + INSERT INTO permissions.user1_table (id, a) VALUES (35, 0); +EXCEPTION + WHEN insufficient_privilege THEN +END$$; /* No rights to create partitions (need INSERT privilege) */ SET ROLE user2; @@ -81,7 +96,12 @@ WHERE oid = ANY (SELECT "partition" FROM pathman_partition_list ORDER BY relname; /* we also check ACL for "user1_table_2" */ /* Try to drop partition, should fail */ -SELECT drop_range_partition('permissions.user1_table_4'); +DO $$ +BEGIN + SELECT drop_range_partition('permissions.user1_table_4'); +EXCEPTION + WHEN insufficient_privilege THEN +END$$; /* Disable automatic partition creation */ SET ROLE user1; diff --git a/sql/pathman_rebuild_updates.sql b/sql/pathman_rebuild_updates.sql index 3144a416..f4229d09 100644 --- a/sql/pathman_rebuild_updates.sql +++ b/sql/pathman_rebuild_updates.sql @@ -1,3 +1,10 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + */ + \set VERBOSITY terse SET search_path = 'public'; diff --git a/src/compat/pg_compat.c b/src/compat/pg_compat.c index 602102c4..0fb510ed 100644 --- a/src/compat/pg_compat.c +++ b/src/compat/pg_compat.c @@ -48,7 +48,12 @@ create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel) { int parallel_workers; +#if PG_VERSION_NUM >= 110000 + parallel_workers = compute_parallel_worker(rel, rel->pages, -1, + max_parallel_workers_per_gather); +#else parallel_workers = compute_parallel_worker(rel, rel->pages, -1); +#endif /* If any limit was set to zero, the user doesn't want a parallel scan. */ if (parallel_workers <= 0) @@ -240,7 +245,11 @@ McxtStatsInternal(MemoryContext context, int level, AssertArg(MemoryContextIsValid(context)); /* Examine the context itself */ +#if PG_VERSION_NUM >= 110000 + (*context->methods->stats) (context, NULL, NULL, totals); +#else (*context->methods->stats) (context, level, false, totals); +#endif memset(&local_totals, 0, sizeof(local_totals)); diff --git a/src/hooks.c b/src/hooks.c index 1088b27a..2693cd91 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -514,13 +514,20 @@ pathman_rel_pathlist_hook(PlannerInfo *root, rel->partial_pathlist = NIL; #endif +/* Convert list to array for faster lookups */ +#if PG_VERSION_NUM >= 110000 + setup_append_rel_array(root); +#endif + /* Generate new paths using the rels we've just added */ set_append_rel_pathlist(root, rel, rti, pathkeyAsc, pathkeyDesc); set_append_rel_size_compat(root, rel, rti); -#if PG_VERSION_NUM >= 90600 - /* consider gathering partial paths for the parent appendrel */ - generate_gather_paths(root, rel); + /* consider gathering partial paths for the parent appendrel */ +#if PG_VERSION_NUM >= 110000 + generate_gather_paths(root, rel, false); +#elif PG_VERSION_NUM >= 90600 + generate_gather_paths(root, rel); #endif /* Skip if both custom nodes are disabled */ @@ -925,7 +932,7 @@ pathman_process_utility_hook(Node *first_arg, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot change type of column \"%s\"" " of table \"%s\" partitioned by HASH", - get_attname(relation_oid, attr_number), + get_attname_compat(relation_oid, attr_number), get_rel_name(relation_oid)))); /* Don't forget to invalidate parsed partitioning expression */ diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 9eeca190..2cc80731 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -41,32 +41,33 @@ */ /* - * calc_nestloop_required_outer() + * get_attname() */ +#if PG_VERSION_NUM >= 110000 +#define get_attname_compat(relid, attnum) \ + get_attname((relid), (attnum), false) +#else +#define get_attname_compat(relid, attnum) \ + get_attname((relid), (attnum)) +#endif + +/* + * calc_nestloop_required_outer + */ #if PG_VERSION_NUM >= 110000 -static inline Relids -calc_nestloop_required_outer_compat(Path *outer_path, Path *inner_path) -{ - RelOptInfo *innerrel = inner_path->parent; - RelOptInfo *outerrel = outer_path->parent; - Relids innerrelids = innerrel->relids; - Relids outerrelids = outerrel->relids; - Relids inner_paramrels = PATH_REQ_OUTER(inner_path); - Relids outer_paramrels = PATH_REQ_OUTER(outer_path); - - return calc_nestloop_required_outer(outerrelids, outer_paramrels, - innerrelids, inner_paramrels); -} +#define calc_nestloop_required_outer_compat(outer, inner) \ + calc_nestloop_required_outer((outer)->parent->relids, PATH_REQ_OUTER(outer), \ + (inner)->parent->relids, PATH_REQ_OUTER(inner)) #else -#define calc_nestloop_required_outer_compat(outer_path, inner_path) \ - (calc_nestloop_required_outer((outer_path), (inner_path))) +#define calc_nestloop_required_outer_compat(outer, inner) \ + calc_nestloop_required_outer((outer), (inner)) #endif + /* * adjust_appendrel_attrs() */ - #if PG_VERSION_NUM >= 110000 #define adjust_appendrel_attrs_compat(root, node, appinfo) \ adjust_appendrel_attrs((root), \ @@ -93,17 +94,17 @@ calc_nestloop_required_outer_compat(Path *outer_path, Path *inner_path) #define adjust_rel_targetlist_compat(root, dst_rel, src_rel, appinfo) \ do { \ (dst_rel)->reltarget->exprs = (List *) \ - adjust_appendrel_attrs((root), \ - (Node *) (src_rel)->reltarget->exprs, \ - (appinfo)); \ + adjust_appendrel_attrs((root), \ + (Node *) (src_rel)->reltarget->exprs, \ + (appinfo)); \ } while (0) #elif PG_VERSION_NUM >= 90500 #define adjust_rel_targetlist_compat(root, dst_rel, src_rel, appinfo) \ do { \ (dst_rel)->reltargetlist = (List *) \ - adjust_appendrel_attrs((root), \ - (Node *) (src_rel)->reltargetlist, \ - (appinfo)); \ + adjust_appendrel_attrs((root), \ + (Node *) (src_rel)->reltargetlist, \ + (appinfo)); \ } while (0) #endif @@ -231,7 +232,17 @@ calc_nestloop_required_outer_compat(Path *outer_path, Path *inner_path) /* * create_append_path() */ -#if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 110000 + +#ifndef PGPRO_VERSION +#define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ + create_append_path(NULL, (rel), (subpaths), NIL, (required_outer), \ + (parallel_workers), false, NIL, -1) +#else +/* TODO */ +#endif /* PGPRO_VERSION */ + +#elif PG_VERSION_NUM >= 100000 #ifndef PGPRO_VERSION #define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ @@ -240,7 +251,6 @@ calc_nestloop_required_outer_compat(Path *outer_path, Path *inner_path) #define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ create_append_path((rel), (subpaths), (required_outer), (parallel_workers), NIL, \ false, NIL) - #endif /* PGPRO_VERSION */ #elif PG_VERSION_NUM >= 90600 @@ -360,14 +370,16 @@ extern void create_plain_partial_paths(PlannerInfo *root, #define ExecEvalExprCompat(expr, econtext, isNull, errHandler) \ ExecEvalExpr((expr), (econtext), (isNull)) #elif PG_VERSION_NUM >= 90500 -#include "partition_filter.h" /* Variables for ExecEvalExprCompat() */ extern Datum exprResult; extern ExprDoneCond isDone; /* Error handlers */ -static inline void mult_result_handler() { elog(ERROR, ERR_PART_ATTR_MULTIPLE_RESULTS); } +static inline void mult_result_handler() +{ + elog(ERROR, "partitioning expression should return single value"); +} #define ExecEvalExprCompat(expr, econtext, isNull, errHandler) \ ( \ @@ -727,11 +739,53 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, /* * RegisterCustomScanMethods() */ -#if PG_VERSION_NUM < 96000 +#if PG_VERSION_NUM < 90600 #define RegisterCustomScanMethods(methods) #endif +/* + * MakeTupleTableSlot() + */ +#if PG_VERSION_NUM >= 110000 +#define MakeTupleTableSlotCompat() \ + MakeTupleTableSlot(NULL) +#else +#define MakeTupleTableSlotCompat() \ + MakeTupleTableSlot() +#endif + +/* + * ExecInitExtraTupleSlot() + */ +#if PG_VERSION_NUM >= 110000 +#define ExecInitExtraTupleSlotCompat(estate) \ + ExecInitExtraTupleSlot((estate), NULL) +#else +#define ExecInitExtraTupleSlotCompat(estate) \ + ExecInitExtraTupleSlot(estate) +#endif +/* + * BackgroundWorkerInitializeConnectionByOid() + */ +#if PG_VERSION_NUM >= 110000 +#define BackgroundWorkerInitializeConnectionByOidCompat(dboid, useroid) \ + BackgroundWorkerInitializeConnectionByOid((dboid), (useroid), 0) +#else +#define BackgroundWorkerInitializeConnectionByOidCompat(dboid, useroid) \ + BackgroundWorkerInitializeConnectionByOid((dboid), (useroid)) +#endif + +/* + * heap_delete() + */ +#if PG_VERSION_NUM >= 110000 +#define heap_delete_compat(relation, tid, cid, crosscheck, wait, hufd) \ + heap_delete((relation), (tid), (cid), (crosscheck), (wait), (hufd), false) +#else +#define heap_delete_compat(relation, tid, cid, crosscheck, wait, hufd) \ + heap_delete((relation), (tid), (cid), (crosscheck), (wait), (hufd)) +#endif /* * ------------- diff --git a/src/include/hooks.h b/src/include/hooks.h index 14542bc0..adf96d37 100644 --- a/src/include/hooks.h +++ b/src/include/hooks.h @@ -42,7 +42,7 @@ void pathman_rel_pathlist_hook(PlannerInfo *root, Index rti, RangeTblEntry *rte); -void pathman_enable_assign_hook(char newval, void *extra); +void pathman_enable_assign_hook(bool newval, void *extra); PlannedStmt * pathman_planner_hook(Query *parse, int cursorOptions, diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index 3a3e848e..69cdb8c8 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -29,7 +29,7 @@ #define ERR_PART_ATTR_NULL "partitioning expression's value should not be NULL" -#define ERR_PART_ATTR_MULTIPLE_RESULTS "partitioning expression should return single value" +#define ERR_PART_ATTR_MULTIPLE_RESULTS #define ERR_PART_ATTR_NO_PART "no suitable partition for key '%s'" #define ERR_PART_ATTR_MULTIPLE INSERT_NODE_NAME " selected more than one partition" #define ERR_PART_DESC_CONVERT "could not convert row type for partition" diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 6c1d5435..2f37406c 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -11,10 +11,10 @@ #ifndef RELATION_INFO_H #define RELATION_INFO_H +#include "compat/pg_compat.h" #include "utils.h" -#include "postgres.h" #include "access/attnum.h" #include "access/sysattr.h" #include "fmgr.h" @@ -279,7 +279,7 @@ PrelExpressionColumnNames(const PartRelationInfo *prel) while ((i = bms_next_member(prel->expr_atts, i)) >= 0) { AttrNumber attnum = i + FirstLowInvalidHeapAttributeNumber; - char *attname = get_attname(PrelParentRelid(prel), attnum); + char *attname = get_attname_compat(PrelParentRelid(prel), attnum); columns = lappend(columns, makeString(attname)); } diff --git a/src/init.c b/src/init.c index 2994aaf8..eb3b6feb 100644 --- a/src/init.c +++ b/src/init.c @@ -25,7 +25,9 @@ #include "catalog/indexing.h" #include "catalog/pg_extension.h" #include "catalog/pg_inherits.h" +#if PG_VERSION_NUM < 110000 #include "catalog/pg_inherits_fn.h" +#endif #include "catalog/pg_type.h" #include "miscadmin.h" #include "optimizer/clauses.h" diff --git a/src/nodes_common.c b/src/nodes_common.c index 09f1b07e..5c484cdb 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -568,8 +568,12 @@ create_append_plan_common(PlannerInfo *root, RelOptInfo *rel, forboth (lc1, rpath->cpath.custom_paths, lc2, custom_plans) { Plan *child_plan = (Plan *) lfirst(lc2); - RelOptInfo *child_rel = ((Path *) lfirst(lc1))->parent; + RelOptInfo *child_rel = ((Path *) lfirst(lc1))->parent; +#if PG_VERSION_NUM >= 110000 + AppendRelInfo *appinfo = root->append_rel_array[child_rel->relid]; +#else AppendRelInfo *appinfo = find_childrel_appendrelinfo(root, child_rel); +#endif /* Replace rel's tlist with a matching one (for ExecQual()) */ if (!processed_rel_tlist) diff --git a/src/partition_filter.c b/src/partition_filter.c index 1e5c2db1..f96eb970 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -332,8 +332,12 @@ scan_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid) CopyToResultRelInfo(ri_WithCheckOptions); CopyToResultRelInfo(ri_WithCheckOptionExprs); CopyToResultRelInfo(ri_projectReturning); +#if PG_VERSION_NUM >= 110000 + CopyToResultRelInfo(ri_onConflict); +#else CopyToResultRelInfo(ri_onConflictSetProj); CopyToResultRelInfo(ri_onConflictSetWhere); +#endif if (parts_storage->command_type != CMD_UPDATE) CopyToResultRelInfo(ri_junkFilter); @@ -776,7 +780,7 @@ partition_filter_exec(CustomScanState *node) /* Allocate new slot if needed */ if (!state->tup_convert_slot) - state->tup_convert_slot = MakeTupleTableSlot(); + state->tup_convert_slot = MakeTupleTableSlotCompat(); ExecSetSlotDescriptor(state->tup_convert_slot, RelationGetDescr(child_rel)); ExecStoreTuple(htup_new, state->tup_convert_slot, InvalidBuffer, true); @@ -1055,7 +1059,9 @@ prepare_rri_fdw_for_insert(ResultRelInfoHolder *rri_holder, mtstate.ps.state = estate; mtstate.operation = CMD_INSERT; mtstate.resultRelInfo = rri; +#if PG_VERSION_NUM < 110000 mtstate.mt_onconflict = ONCONFLICT_NONE; +#endif /* Plan fake query in for FDW access to be planned as well */ elog(DEBUG1, "FDW(%u): plan fake query for fdw_private", partid); diff --git a/src/partition_router.c b/src/partition_router.c index a87b514f..a354fd87 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -160,7 +160,7 @@ partition_router_exec(CustomScanState *node) state->junkfilter = ExecInitJunkFilter(state->subplan->targetlist, old_rri->ri_RelationDesc->rd_att->tdhasoid, - ExecInitExtraTupleSlot(estate)); + ExecInitExtraTupleSlotCompat(estate)); state->junkfilter->jf_junkAttNo = ExecFindJunkAttribute(state->junkfilter, "ctid"); @@ -277,11 +277,11 @@ ExecDeleteInternal(ItemPointer tupleid, { /* delete the tuple */ ldelete: - result = heap_delete(resultRelationDesc, tupleid, - estate->es_output_cid, - estate->es_crosscheck_snapshot, - true /* wait for commit */ , - &hufd); + result = heap_delete_compat(resultRelationDesc, tupleid, + estate->es_output_cid, + estate->es_crosscheck_snapshot, + true /* wait for commit */ , + &hufd); switch (result) { case HeapTupleSelfUpdated: diff --git a/src/pathman_workers.c b/src/pathman_workers.c index 86416b36..532420f3 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -384,7 +384,7 @@ bgw_main_spawn_partitions(Datum main_arg) #endif /* Establish connection and start transaction */ - BackgroundWorkerInitializeConnectionByOid(args->dbid, args->userid); + BackgroundWorkerInitializeConnectionByOidCompat(args->dbid, args->userid); /* Start new transaction (syscache access etc.) */ StartTransactionCommand(); @@ -469,7 +469,7 @@ bgw_main_concurrent_part(Datum main_arg) SetAutoPartitionEnabled(false); /* Establish connection and start transaction */ - BackgroundWorkerInitializeConnectionByOid(part_slot->dbid, part_slot->userid); + BackgroundWorkerInitializeConnectionByOidCompat(part_slot->dbid, part_slot->userid); /* Initialize pg_pathman's local config */ StartTransactionCommand(); @@ -483,7 +483,7 @@ bgw_main_concurrent_part(Datum main_arg) Oid types[2] = { OIDOID, INT4OID }; Datum vals[2] = { part_slot->relid, part_slot->batch_size }; - bool nulls[2] = { false, false }; + char nulls[2] = { false, false }; bool rel_locked = false; @@ -568,7 +568,7 @@ bgw_main_concurrent_part(Datum main_arg) /* Extract number of processed rows */ rows = DatumGetInt64(SPI_getbinval(tuple, tupdesc, 1, &isnull)); - Assert(tupdesc->attrs[0]->atttypid == INT8OID); /* check type */ + Assert(TupleDescAttr(tupdesc, 0)->atttypid == INT8OID); /* check type */ Assert(!isnull); /* ... and ofc it must not be NULL */ } /* Else raise generic error */ diff --git a/src/pl_funcs.c b/src/pl_funcs.c index fb457df1..22e6b83f 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -23,7 +23,9 @@ #include "catalog/dependency.h" #include "catalog/indexing.h" #include "catalog/namespace.h" +#if PG_VERSION_NUM < 110000 #include "catalog/pg_inherits_fn.h" +#endif #include "catalog/pg_type.h" #include "commands/tablespace.h" #include "commands/trigger.h" diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 7d17d407..89e8536d 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -18,7 +18,9 @@ #include "access/xact.h" #include "catalog/heap.h" #include "catalog/namespace.h" +#if PG_VERSION_NUM < 110000 #include "catalog/pg_inherits_fn.h" +#endif #include "catalog/pg_type.h" #include "commands/tablecmds.h" #include "executor/spi.h" diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 35473a75..233b8773 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -851,7 +851,20 @@ eval_extern_params_mutator(Node *node, ParamListInfo params) param->paramid > 0 && param->paramid <= params->numParams) { - ParamExternData *prm = ¶ms->params[param->paramid - 1]; + ParamExternData *prm; + +#if PG_VERSION_NUM >= 110000 + ParamExternData prmdata; + if (params->paramFetch != NULL) + prm = params->paramFetch(params, param->paramid, false, &prmdata); + else + prm = ¶ms->params[param->paramid - 1]; +#else + prm = ¶ms->params[param->paramid - 1]; + if (!OidIsValid(prm->ptype) && params->paramFetch != NULL) + (*params->paramFetch) (params, param->paramid); +#endif + if (OidIsValid(prm->ptype)) { diff --git a/src/relation_info.c b/src/relation_info.c index 999608ec..a4c91bbe 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -20,6 +20,9 @@ #include "catalog/catalog.h" #include "catalog/indexing.h" #include "catalog/pg_constraint.h" +#if PG_VERSION_NUM < 110000 && PG_VERSION_NUM >= 90600 +#include "catalog/pg_constraint_fn.h" +#endif #include "catalog/pg_inherits.h" #include "catalog/pg_type.h" #include "miscadmin.h" @@ -45,7 +48,7 @@ #include "optimizer/planmain.h" #endif -#if PG_VERSION_NUM >= 90600 +#if PG_VERSION_NUM >= 90600 && PG_VERSION_NUM < 11000 #include "catalog/pg_constraint_fn.h" #endif @@ -402,7 +405,7 @@ build_pathman_relation_info(Oid relid, Datum *values) /* Create a new memory context to store expression tree etc */ prel_mcxt = AllocSetContextCreate(PathmanParentsCacheContext, - __FUNCTION__, + "build_pathman_relation_info", ALLOCSET_SMALL_SIZES); /* Create a new PartRelationInfo */ @@ -897,7 +900,7 @@ PrelExpressionAttributesMap(const PartRelationInfo *prel, while ((i = bms_next_member(prel->expr_atts, i)) >= 0) { AttrNumber attnum = i + FirstLowInvalidHeapAttributeNumber; - char *attname = get_attname(parent_relid, attnum); + char *attname = get_attname_compat(parent_relid, attnum); int j; Assert(attnum <= expr_natts); @@ -1435,7 +1438,7 @@ cook_partitioning_expression(const Oid relid, if (nullable) ereport(ERROR, (errcode(ERRCODE_NOT_NULL_VIOLATION), errmsg("column \"%s\" should be marked NOT NULL", - get_attname(relid, attnum)))); + get_attname_compat(relid, attnum)))); } } diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index bd65e50f..553f7c8e 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -514,10 +514,10 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, RPS_RRI_CB(finish_rri_for_copy, NULL)); /* Set up a tuple slot too */ - myslot = ExecInitExtraTupleSlot(estate); + myslot = ExecInitExtraTupleSlotCompat(estate); ExecSetSlotDescriptor(myslot, tupDesc); /* Triggers might need a slot as well */ - estate->es_trig_tuple_slot = ExecInitExtraTupleSlot(estate); + estate->es_trig_tuple_slot = ExecInitExtraTupleSlotCompat(estate); /* Prepare to catch AFTER triggers. */ AfterTriggerBeginQuery(); diff --git a/tests/python/Makefile b/tests/python/Makefile index bb548928..ee650ea4 100644 --- a/tests/python/Makefile +++ b/tests/python/Makefile @@ -1,2 +1,2 @@ partitioning_tests: - python -m unittest partitioning_test.py + python -m unittest --verbose --failfast partitioning_test.py diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 3b889405..41390d4a 100644 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -650,7 +650,7 @@ def test_conc_part_drop_runtime_append(self): # Thread for connection #2 (it has to wait) def con2_thread(): - con1.begin() + con2.begin() con2.execute('set enable_hashjoin = f') con2.execute('set enable_mergejoin = f') From 6a60886a6b82a607baf4218dbda01174f1a3faf6 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 9 Jul 2018 19:36:19 +0300 Subject: [PATCH 297/528] Fix handling of append_rel_array. Also a bunch of other stuff noted by @funbringer. --- src/compat/pg_compat.c | 8 ++----- src/hooks.c | 26 ++++++++++++----------- src/include/compat/pg_compat.h | 39 ++++++++++++++++++++++++++++++++++ src/init.c | 6 +++--- src/nodes_common.c | 8 +++---- src/pathman_workers.c | 3 +-- src/pg_pathman.c | 4 ++++ 7 files changed, 66 insertions(+), 28 deletions(-) diff --git a/src/compat/pg_compat.c b/src/compat/pg_compat.c index 0fb510ed..5547231e 100644 --- a/src/compat/pg_compat.c +++ b/src/compat/pg_compat.c @@ -48,12 +48,8 @@ create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel) { int parallel_workers; -#if PG_VERSION_NUM >= 110000 - parallel_workers = compute_parallel_worker(rel, rel->pages, -1, - max_parallel_workers_per_gather); -#else - parallel_workers = compute_parallel_worker(rel, rel->pages, -1); -#endif + /* no more than max_parallel_workers_per_gather since 11 */ + parallel_workers = compute_parallel_worker_compat(rel, rel->pages, -1); /* If any limit was set to zero, the user doesn't want a parallel scan. */ if (parallel_workers <= 0) diff --git a/src/hooks.c b/src/hooks.c index 2693cd91..26adad63 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -477,6 +477,17 @@ pathman_rel_pathlist_hook(PlannerInfo *root, memset((void *) &root->simple_rte_array[current_len], 0, irange_len * sizeof(RangeTblEntry *)); +#if PG_VERSION_NUM >= 110000 + /* Make sure append_rel_array is wide enough */ + if (root->append_rel_array == NULL) + root->append_rel_array = (AppendRelInfo **) palloc0(0); + root->append_rel_array = (AppendRelInfo **) + repalloc(root->append_rel_array, + new_len * sizeof(AppendRelInfo *)); + memset((void *) &root->append_rel_array[current_len], 0, + irange_len * sizeof(AppendRelInfo *)); +#endif + /* Don't forget to update array size! */ root->simple_rel_array_size = new_len; } @@ -485,7 +496,7 @@ pathman_rel_pathlist_hook(PlannerInfo *root, parent_rel = heap_open(rte->relid, NoLock); parent_rowmark = get_plan_rowmark(root->rowMarks, rti); - + /* Add parent if asked to */ if (prel->enable_parent) append_child_relation(root, parent_rel, parent_rowmark, @@ -514,21 +525,12 @@ pathman_rel_pathlist_hook(PlannerInfo *root, rel->partial_pathlist = NIL; #endif -/* Convert list to array for faster lookups */ -#if PG_VERSION_NUM >= 110000 - setup_append_rel_array(root); -#endif - /* Generate new paths using the rels we've just added */ set_append_rel_pathlist(root, rel, rti, pathkeyAsc, pathkeyDesc); set_append_rel_size_compat(root, rel, rti); - /* consider gathering partial paths for the parent appendrel */ -#if PG_VERSION_NUM >= 110000 - generate_gather_paths(root, rel, false); -#elif PG_VERSION_NUM >= 90600 - generate_gather_paths(root, rel); -#endif + /* consider gathering partial paths for the parent appendrel */ + generate_gather_paths_compat(root, rel); /* Skip if both custom nodes are disabled */ if (!(pg_pathman_enable_runtimeappend || diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 2cc80731..699b152d 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -787,6 +787,45 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, heap_delete((relation), (tid), (cid), (crosscheck), (wait), (hufd)) #endif +/* + * compute_parallel_worker + */ +#if PG_VERSION_NUM >= 110000 +#define compute_parallel_worker_compat(rel, heap_pages, index_pages) \ + compute_parallel_worker((rel), (heap_pages), (index_pages), \ + max_parallel_workers_per_gather) +#elif PG_VERSION_NUM >= 100000 +#define compute_parallel_worker_compat(rel, heap_pages, index_pages) \ + compute_parallel_worker((rel), (heap_pages), (index_pages)) +#endif + + +/* + * generate_gather_paths + */ +#if PG_VERSION_NUM >= 110000 +#define generate_gather_paths_compat(root, rel) \ + generate_gather_paths((root), (rel), false) +#elif PG_VERSION_NUM >= 90600 +#define generate_gather_paths_compat(root, rel) \ + generate_gather_paths((rel), (heap_pages), false) +#else +#define generate_gather_paths_compat(root, rel) +#endif + + +/* + * handling appendrelinfo array + */ +#if PG_VERSION_NUM >= 110000 +#define find_childrel_appendrelinfo_compat(root, rel) \ + ((root)->append_rel_array[(rel)->relid]) +#else +#define find_childrel_appendrelinfo_compat(root, rel) \ + find_childrel_appendrelinfo((root), (rel)) +#endif + + /* * ------------- * Common code diff --git a/src/init.c b/src/init.c index eb3b6feb..327ca027 100644 --- a/src/init.c +++ b/src/init.c @@ -25,9 +25,6 @@ #include "catalog/indexing.h" #include "catalog/pg_extension.h" #include "catalog/pg_inherits.h" -#if PG_VERSION_NUM < 110000 -#include "catalog/pg_inherits_fn.h" -#endif #include "catalog/pg_type.h" #include "miscadmin.h" #include "optimizer/clauses.h" @@ -39,6 +36,9 @@ #include "utils/snapmgr.h" #include "utils/syscache.h" #include "utils/typcache.h" +#if PG_VERSION_NUM < 110000 +#include "catalog/pg_inherits_fn.h" +#endif #include diff --git a/src/nodes_common.c b/src/nodes_common.c index 5c484cdb..f9f394ec 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -569,11 +569,9 @@ create_append_plan_common(PlannerInfo *root, RelOptInfo *rel, { Plan *child_plan = (Plan *) lfirst(lc2); RelOptInfo *child_rel = ((Path *) lfirst(lc1))->parent; -#if PG_VERSION_NUM >= 110000 - AppendRelInfo *appinfo = root->append_rel_array[child_rel->relid]; -#else - AppendRelInfo *appinfo = find_childrel_appendrelinfo(root, child_rel); -#endif + AppendRelInfo *appinfo; + + appinfo = find_childrel_appendrelinfo_compat(root, child_rel); /* Replace rel's tlist with a matching one (for ExecQual()) */ if (!processed_rel_tlist) diff --git a/src/pathman_workers.c b/src/pathman_workers.c index 532420f3..69f5db3b 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -483,7 +483,6 @@ bgw_main_concurrent_part(Datum main_arg) Oid types[2] = { OIDOID, INT4OID }; Datum vals[2] = { part_slot->relid, part_slot->batch_size }; - char nulls[2] = { false, false }; bool rel_locked = false; @@ -557,7 +556,7 @@ bgw_main_concurrent_part(Datum main_arg) } /* Call concurrent partitioning function */ - ret = SPI_execute_with_args(sql, 2, types, vals, nulls, false, 0); + ret = SPI_execute_with_args(sql, 2, types, vals, NULL, false, 0); if (ret == SPI_OK_SELECT) { TupleDesc tupdesc = SPI_tuptable->tupdesc; diff --git a/src/pg_pathman.c b/src/pg_pathman.c index b9e4a6a4..588f5417 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -535,6 +535,10 @@ append_child_relation(PlannerInfo *root, /* Now append 'appinfo' to 'root->append_rel_list' */ root->append_rel_list = lappend(root->append_rel_list, appinfo); + /* And to array in >= 11, it must be big enough */ +#if PG_VERSION_NUM >= 110000 + root->append_rel_array[child_rti] = appinfo; +#endif /* Translate column privileges for this child */ if (parent_rte->relid != child_oid) From 0137d1903ae01d339ecf2d0954d560d08a666952 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 9 Jul 2018 19:46:56 +0300 Subject: [PATCH 298/528] REL10 typo fix --- src/include/compat/pg_compat.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 699b152d..b4fcba7c 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -808,7 +808,7 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, generate_gather_paths((root), (rel), false) #elif PG_VERSION_NUM >= 90600 #define generate_gather_paths_compat(root, rel) \ - generate_gather_paths((rel), (heap_pages), false) + generate_gather_paths((root), (rel)) #else #define generate_gather_paths_compat(root, rel) #endif From 4e842566bad2b7cde040cf706dea576394b2c9e2 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 10 Jul 2018 18:21:32 +0300 Subject: [PATCH 299/528] Raise notice 'insufficient privileges', fix pg_constraint_fn inclusion. --- expected/pathman_permissions.out | 8 ++++++++ sql/pathman_permissions.sql | 4 ++++ src/relation_info.c | 6 +----- 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/expected/pathman_permissions.out b/expected/pathman_permissions.out index a9e68be4..388fc2bc 100644 --- a/expected/pathman_permissions.out +++ b/expected/pathman_permissions.out @@ -17,7 +17,9 @@ BEGIN SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); EXCEPTION WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; END$$; +NOTICE: Insufficient priviliges /* Grant SELECT to user2 */ SET ROLE user1; GRANT SELECT ON permissions.user1_table TO user2; @@ -28,7 +30,9 @@ BEGIN SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); EXCEPTION WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; END$$; +NOTICE: Insufficient priviliges /* Should be ok */ SET ROLE user1; SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); @@ -71,7 +75,9 @@ BEGIN INSERT INTO permissions.user1_table (id, a) VALUES (35, 0); EXCEPTION WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; END$$; +NOTICE: Insufficient priviliges /* No rights to create partitions (need INSERT privilege) */ SET ROLE user2; SELECT prepend_range_partition('permissions.user1_table'); @@ -133,7 +139,9 @@ BEGIN SELECT drop_range_partition('permissions.user1_table_4'); EXCEPTION WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; END$$; +NOTICE: Insufficient priviliges /* Disable automatic partition creation */ SET ROLE user1; SELECT set_auto('permissions.user1_table', false); diff --git a/sql/pathman_permissions.sql b/sql/pathman_permissions.sql index 5f66a84f..3a234676 100644 --- a/sql/pathman_permissions.sql +++ b/sql/pathman_permissions.sql @@ -23,6 +23,7 @@ BEGIN SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); EXCEPTION WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; END$$; /* Grant SELECT to user2 */ @@ -36,6 +37,7 @@ BEGIN SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); EXCEPTION WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; END$$; /* Should be ok */ @@ -64,6 +66,7 @@ BEGIN INSERT INTO permissions.user1_table (id, a) VALUES (35, 0); EXCEPTION WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; END$$; /* No rights to create partitions (need INSERT privilege) */ @@ -101,6 +104,7 @@ BEGIN SELECT drop_range_partition('permissions.user1_table_4'); EXCEPTION WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; END$$; /* Disable automatic partition creation */ diff --git a/src/relation_info.c b/src/relation_info.c index a4c91bbe..1f8a1ba1 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -20,9 +20,6 @@ #include "catalog/catalog.h" #include "catalog/indexing.h" #include "catalog/pg_constraint.h" -#if PG_VERSION_NUM < 110000 && PG_VERSION_NUM >= 90600 -#include "catalog/pg_constraint_fn.h" -#endif #include "catalog/pg_inherits.h" #include "catalog/pg_type.h" #include "miscadmin.h" @@ -47,8 +44,7 @@ #if PG_VERSION_NUM < 90600 #include "optimizer/planmain.h" #endif - -#if PG_VERSION_NUM >= 90600 && PG_VERSION_NUM < 11000 +#if PG_VERSION_NUM < 110000 && PG_VERSION_NUM >= 90600 #include "catalog/pg_constraint_fn.h" #endif From 6418761aa976726b6f74fede8854f0b0484a2c15 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Wed, 11 Jul 2018 19:18:34 +0300 Subject: [PATCH 300/528] Fix pathman_rebuild_deletes test for 11. --- expected/pathman_rebuild_deletes.out | 12 ++- expected/pathman_rebuild_deletes_1.out | 105 +++++++++++++++++++++++++ sql/pathman_rebuild_deletes.sql | 7 ++ 3 files changed, 121 insertions(+), 3 deletions(-) create mode 100644 expected/pathman_rebuild_deletes_1.out diff --git a/expected/pathman_rebuild_deletes.out b/expected/pathman_rebuild_deletes.out index 98e43862..b19d700a 100644 --- a/expected/pathman_rebuild_deletes.out +++ b/expected/pathman_rebuild_deletes.out @@ -1,3 +1,9 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + */ \set VERBOSITY terse SET search_path = 'public'; CREATE EXTENSION pg_pathman; @@ -86,11 +92,11 @@ RETURNING t1.*, t1.tableoid::REGCLASS; EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test WHERE val = 101 AND test >= (100, 8) RETURNING *, tableoid::REGCLASS; - QUERY PLAN ------------------------------------------------------------------------------------ + QUERY PLAN +----------------------------------------------------------------------------------------- Delete on test_11 -> Seq Scan on test_11 - Filter: (((test_11.*)::test_deletes.test >= ROW(100, 8)) AND (val = 101)) + Filter: (((test_11.*)::test_deletes.test >= '(100,8)'::record) AND (val = 101)) (3 rows) DROP TABLE test_deletes.test_dummy; diff --git a/expected/pathman_rebuild_deletes_1.out b/expected/pathman_rebuild_deletes_1.out new file mode 100644 index 00000000..d1c4b69e --- /dev/null +++ b/expected/pathman_rebuild_deletes_1.out @@ -0,0 +1,105 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_deletes; +/* + * Test DELETEs on a partition with different TupleDescriptor. + */ +/* create partitioned table */ +CREATE TABLE test_deletes.test(a FLOAT4, val INT4 NOT NULL, b FLOAT8); +INSERT INTO test_deletes.test SELECT i, i, i FROM generate_series(1, 100) AS i; +SELECT create_range_partitions('test_deletes.test', 'val', 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* drop column 'a' */ +ALTER TABLE test_deletes.test DROP COLUMN a; +/* append new partition */ +SELECT append_range_partition('test_deletes.test'); + append_range_partition +------------------------ + test_deletes.test_11 +(1 row) + +INSERT INTO test_deletes.test_11 (val, b) VALUES (101, 10); +VACUUM ANALYZE; +/* tuple descs are the same */ +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test WHERE val = 1; + QUERY PLAN +--------------------------- + Delete on test_1 + -> Seq Scan on test_1 + Filter: (val = 1) +(3 rows) + +DELETE FROM test_deletes.test WHERE val = 1 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+---+--------------------- + 1 | 1 | test_deletes.test_1 +(1 row) + +/* tuple descs are different */ +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test WHERE val = 101; + QUERY PLAN +----------------------------- + Delete on test_11 + -> Seq Scan on test_11 + Filter: (val = 101) +(3 rows) + +DELETE FROM test_deletes.test WHERE val = 101 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+----+---------------------- + 101 | 10 | test_deletes.test_11 +(1 row) + +CREATE TABLE test_deletes.test_dummy (val INT4); +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test +WHERE val = 101 AND val = ANY (TABLE test_deletes.test_dummy) +RETURNING *, tableoid::REGCLASS; + QUERY PLAN +------------------------------------ + Delete on test_11 + -> Nested Loop Semi Join + -> Seq Scan on test_11 + Filter: (val = 101) + -> Seq Scan on test_dummy + Filter: (val = 101) +(6 rows) + +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test t1 +USING test_deletes.test_dummy t2 +WHERE t1.val = 101 AND t1.val = t2.val +RETURNING t1.*, t1.tableoid::REGCLASS; + QUERY PLAN +--------------------------------------- + Delete on test_11 t1 + -> Nested Loop + -> Seq Scan on test_11 t1 + Filter: (val = 101) + -> Seq Scan on test_dummy t2 + Filter: (val = 101) +(6 rows) + +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test +WHERE val = 101 AND test >= (100, 8) +RETURNING *, tableoid::REGCLASS; + QUERY PLAN +----------------------------------------------------------------------------------- + Delete on test_11 + -> Seq Scan on test_11 + Filter: (((test_11.*)::test_deletes.test >= ROW(100, 8)) AND (val = 101)) +(3 rows) + +DROP TABLE test_deletes.test_dummy; +DROP SCHEMA test_deletes CASCADE; +NOTICE: drop cascades to 13 other objects +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_rebuild_deletes.sql b/sql/pathman_rebuild_deletes.sql index f14bce5a..28a09916 100644 --- a/sql/pathman_rebuild_deletes.sql +++ b/sql/pathman_rebuild_deletes.sql @@ -1,3 +1,10 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + */ + \set VERBOSITY terse SET search_path = 'public'; From 50bfb926ca32ef6fbef04c18e58de2100135aa58 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Wed, 11 Jul 2018 19:25:07 +0300 Subject: [PATCH 301/528] Fix pathman_rebuild_updates test. --- expected/pathman_rebuild_updates_1.out | 48 +++++++++++++++++++++++++- 1 file changed, 47 insertions(+), 1 deletion(-) diff --git a/expected/pathman_rebuild_updates_1.out b/expected/pathman_rebuild_updates_1.out index cf0fc1dc..10ec256e 100644 --- a/expected/pathman_rebuild_updates_1.out +++ b/expected/pathman_rebuild_updates_1.out @@ -109,6 +109,52 @@ RETURNING test; (1 row) DROP TABLE test_updates.test_dummy; +/* cross-partition updates (& different tuple descs) */ +TRUNCATE test_updates.test; +SET pg_pathman.enable_partitionrouter = ON; +SELECT *, (select count(*) from pg_attribute where attrelid = partition) as columns +FROM pathman_partition_list +ORDER BY range_min::int, range_max::int; + parent | partition | parttype | expr | range_min | range_max | columns +-------------------+----------------------+----------+------+-----------+-----------+--------- + test_updates.test | test_updates.test_1 | 2 | val | 1 | 11 | 9 + test_updates.test | test_updates.test_2 | 2 | val | 11 | 21 | 9 + test_updates.test | test_updates.test_3 | 2 | val | 21 | 31 | 9 + test_updates.test | test_updates.test_4 | 2 | val | 31 | 41 | 9 + test_updates.test | test_updates.test_5 | 2 | val | 41 | 51 | 9 + test_updates.test | test_updates.test_6 | 2 | val | 51 | 61 | 9 + test_updates.test | test_updates.test_7 | 2 | val | 61 | 71 | 9 + test_updates.test | test_updates.test_8 | 2 | val | 71 | 81 | 9 + test_updates.test | test_updates.test_9 | 2 | val | 81 | 91 | 9 + test_updates.test | test_updates.test_10 | 2 | val | 91 | 101 | 9 + test_updates.test | test_updates.test_11 | 2 | val | 101 | 111 | 8 +(11 rows) + +INSERT INTO test_updates.test VALUES (105, 105); +UPDATE test_updates.test SET val = 106 WHERE val = 105 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+-----+---------------------- + 106 | 105 | test_updates.test_11 +(1 row) + +UPDATE test_updates.test SET val = 115 WHERE val = 106 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+-----+---------------------- + 115 | 105 | test_updates.test_12 +(1 row) + +UPDATE test_updates.test SET val = 95 WHERE val = 115 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+-----+---------------------- + 95 | 105 | test_updates.test_10 +(1 row) + +UPDATE test_updates.test SET val = -1 WHERE val = 95 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+-----+---------------------- + -1 | 105 | test_updates.test_13 +(1 row) + DROP SCHEMA test_updates CASCADE; -NOTICE: drop cascades to 13 other objects +NOTICE: drop cascades to 15 other objects DROP EXTENSION pg_pathman; From 44a3f678da529c70b15054cf3c2c2bdcf9808cb3 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Thu, 12 Jul 2018 16:30:59 +0300 Subject: [PATCH 302/528] Purge spurious whitespace --- src/hooks.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 26adad63..28e52f47 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -487,7 +487,7 @@ pathman_rel_pathlist_hook(PlannerInfo *root, memset((void *) &root->append_rel_array[current_len], 0, irange_len * sizeof(AppendRelInfo *)); #endif - + /* Don't forget to update array size! */ root->simple_rel_array_size = new_len; } @@ -496,7 +496,7 @@ pathman_rel_pathlist_hook(PlannerInfo *root, parent_rel = heap_open(rte->relid, NoLock); parent_rowmark = get_plan_rowmark(root->rowMarks, rti); - + /* Add parent if asked to */ if (prel->enable_parent) append_child_relation(root, parent_rel, parent_rowmark, From 0c38735df27e4e78496fa7eaa1c1d2dd1905ffb6 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 12 Jul 2018 17:39:38 +0300 Subject: [PATCH 303/528] minor style fixes & refactoring --- src/include/compat/pg_compat.h | 29 ++++++++++++++++++++++++++--- src/include/partition_filter.h | 1 - src/init.c | 1 + src/pl_funcs.c | 7 ++++--- src/pl_range_funcs.c | 7 ++++--- src/planner_tree_modification.c | 15 +-------------- 6 files changed, 36 insertions(+), 24 deletions(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index b4fcba7c..08298dd8 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -788,7 +788,7 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, #endif /* - * compute_parallel_worker + * compute_parallel_worker() */ #if PG_VERSION_NUM >= 110000 #define compute_parallel_worker_compat(rel, heap_pages, index_pages) \ @@ -801,7 +801,7 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, /* - * generate_gather_paths + * generate_gather_paths() */ #if PG_VERSION_NUM >= 110000 #define generate_gather_paths_compat(root, rel) \ @@ -815,7 +815,7 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, /* - * handling appendrelinfo array + * find_childrel_appendrelinfo() */ #if PG_VERSION_NUM >= 110000 #define find_childrel_appendrelinfo_compat(root, rel) \ @@ -832,6 +832,29 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, * ------------- */ +/* See ExecEvalParamExtern() */ +static inline ParamExternData * +CustomEvalParamExternCompat(Param *param, ParamListInfo params) +{ + ParamExternData *prm; + +#if PG_VERSION_NUM >= 110000 + ParamExternData prmdata; + + if (params->paramFetch != NULL) + prm = params->paramFetch(params, param->paramid, false, &prmdata); + else + prm = ¶ms->params[param->paramid - 1]; +#else + prm = ¶ms->params[param->paramid - 1]; + + if (!OidIsValid(prm->ptype) && params->paramFetch != NULL) + params->paramFetch(params, param->paramid); +#endif + + return prm; +} + void set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti); diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index 69cdb8c8..ef091e0b 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -29,7 +29,6 @@ #define ERR_PART_ATTR_NULL "partitioning expression's value should not be NULL" -#define ERR_PART_ATTR_MULTIPLE_RESULTS #define ERR_PART_ATTR_NO_PART "no suitable partition for key '%s'" #define ERR_PART_ATTR_MULTIPLE INSERT_NODE_NAME " selected more than one partition" #define ERR_PART_DESC_CONVERT "could not convert row type for partition" diff --git a/src/init.c b/src/init.c index 327ca027..a25d5956 100644 --- a/src/init.c +++ b/src/init.c @@ -36,6 +36,7 @@ #include "utils/snapmgr.h" #include "utils/syscache.h" #include "utils/typcache.h" + #if PG_VERSION_NUM < 110000 #include "catalog/pg_inherits_fn.h" #endif diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 22e6b83f..b90619e0 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -23,9 +23,6 @@ #include "catalog/dependency.h" #include "catalog/indexing.h" #include "catalog/namespace.h" -#if PG_VERSION_NUM < 110000 -#include "catalog/pg_inherits_fn.h" -#endif #include "catalog/pg_type.h" #include "commands/tablespace.h" #include "commands/trigger.h" @@ -41,6 +38,10 @@ #include "utils/syscache.h" #include "utils/typcache.h" +#if PG_VERSION_NUM < 110000 +#include "catalog/pg_inherits_fn.h" +#endif + /* Function declarations */ diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 89e8536d..f8f52e9d 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -18,9 +18,6 @@ #include "access/xact.h" #include "catalog/heap.h" #include "catalog/namespace.h" -#if PG_VERSION_NUM < 110000 -#include "catalog/pg_inherits_fn.h" -#endif #include "catalog/pg_type.h" #include "commands/tablecmds.h" #include "executor/spi.h" @@ -35,6 +32,10 @@ #include "utils/syscache.h" #include "utils/snapmgr.h" +#if PG_VERSION_NUM < 110000 +#include "catalog/pg_inherits_fn.h" +#endif + #if PG_VERSION_NUM >= 100000 #include "utils/regproc.h" #include "utils/varlena.h" diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 233b8773..9d3ffb15 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -851,20 +851,7 @@ eval_extern_params_mutator(Node *node, ParamListInfo params) param->paramid > 0 && param->paramid <= params->numParams) { - ParamExternData *prm; - -#if PG_VERSION_NUM >= 110000 - ParamExternData prmdata; - if (params->paramFetch != NULL) - prm = params->paramFetch(params, param->paramid, false, &prmdata); - else - prm = ¶ms->params[param->paramid - 1]; -#else - prm = ¶ms->params[param->paramid - 1]; - if (!OidIsValid(prm->ptype) && params->paramFetch != NULL) - (*params->paramFetch) (params, param->paramid); -#endif - + ParamExternData *prm = CustomEvalParamExternCompat(param, params); if (OidIsValid(prm->ptype)) { From 087abe75abd95a6afb5366ff212e5845f61dbd6d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 13 Jul 2018 15:09:55 +0300 Subject: [PATCH 304/528] resolve fixme regarding flushing all caches --- src/hooks.c | 8 +++--- src/include/relation_info.h | 16 +++++++++-- src/relation_info.c | 57 ++++++++++++++++++++++++++++++------- 3 files changed, 64 insertions(+), 17 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 28e52f47..cbed54f4 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -834,9 +834,9 @@ pathman_relcache_hook(Datum arg, Oid relid) /* Invalidation event for whole cache */ if (relid == InvalidOid) { - invalidate_pathman_status_info_cache(); - - /* FIXME: reset other caches as well */ + invalidate_bounds_cache(); + invalidate_parents_cache(); + invalidate_status_cache(); } /* Invalidation event for PATHMAN_CONFIG table (probably DROP) */ @@ -855,7 +855,7 @@ pathman_relcache_hook(Datum arg, Oid relid) forget_parent_of_partition(relid); /* Invalidate PartStatusInfo entry if needed */ - invalidate_pathman_status_info(relid); + forget_status_of_relation(relid); } } diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 2f37406c..d2a3d053 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -184,6 +184,16 @@ typedef struct PartBoundInfo uint32 part_idx; } PartBoundInfo; +static inline void +FreePartBoundInfo(PartBoundInfo *pbin) +{ + if (pbin->parttype == PT_RANGE) + { + FreeBound(&pbin->range_min, pbin->byval); + FreeBound(&pbin->range_max, pbin->byval); + } +} + /* * PartRelationInfo * Per-relation partitioning information. @@ -341,8 +351,8 @@ PartTypeToCString(PartType parttype) /* Status chache */ -void invalidate_pathman_status_info(Oid relid); -void invalidate_pathman_status_info_cache(void); +void forget_status_of_relation(Oid relid); +void invalidate_status_cache(void); /* Dispatch cache */ bool has_pathman_relation_info(Oid relid); @@ -359,11 +369,13 @@ void shout_if_prel_is_invalid(const Oid parent_oid, /* Bounds cache */ void forget_bounds_of_partition(Oid partition); PartBoundInfo *get_bounds_of_partition(Oid partition, const PartRelationInfo *prel); +void invalidate_bounds_cache(void); /* Parents cache */ void cache_parent_of_partition(Oid partition, Oid parent); void forget_parent_of_partition(Oid partition); Oid get_parent_of_partition(Oid partition); +void invalidate_parents_cache(void); /* Partitioning expression routines */ Node *parse_partitioning_expression(const Oid relid, diff --git a/src/relation_info.c b/src/relation_info.c index 1f8a1ba1..a18ceeec 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -187,7 +187,7 @@ init_relation_info_static_data(void) /* Invalidate PartStatusInfo for 'relid' */ void -invalidate_pathman_status_info(Oid relid) +forget_status_of_relation(Oid relid) { PartStatusInfo *psin; PartParentInfo *ppar; @@ -225,7 +225,7 @@ invalidate_pathman_status_info(Oid relid) /* Invalidate all PartStatusInfo entries */ void -invalidate_pathman_status_info_cache(void) +invalidate_status_cache(void) { invalidate_psin_entries_using_relid(InvalidOid); } @@ -241,14 +241,14 @@ invalidate_psin_entries_using_relid(Oid relid) while ((psin = (PartStatusInfo *) hash_seq_search(&status)) != NULL) { - if (relid == InvalidOid || + if (!OidIsValid(relid) || psin->relid == relid || (psin->prel && PrelHasPartition(psin->prel, relid))) { /* Perform invalidation */ invalidate_psin_entry(psin); - /* Exit if found */ + /* Exit if exact match */ if (OidIsValid(relid)) { hash_seq_term(&status); @@ -952,15 +952,10 @@ forget_bounds_of_partition(Oid partition) NULL) : NULL; /* don't even bother */ - /* Free this entry */ if (pbin) { - /* Call pfree() if it's RANGE bounds */ - if (pbin->parttype == PT_RANGE) - { - FreeBound(&pbin->range_min, pbin->byval); - FreeBound(&pbin->range_max, pbin->byval); - } + /* Free this entry */ + FreePartBoundInfo(pbin); /* Finally remove this entry from cache */ pathman_cache_search_relid(bounds_cache, @@ -1027,6 +1022,26 @@ get_bounds_of_partition(Oid partition, const PartRelationInfo *prel) return pbin; } +void +invalidate_bounds_cache(void) +{ + HASH_SEQ_STATUS status; + PartBoundInfo *pbin; + + Assert(offsetof(PartBoundInfo, child_relid) == 0); + + hash_seq_init(&status, bounds_cache); + + while ((pbin = hash_seq_search(&status)) != NULL) + { + FreePartBoundInfo(pbin); + + pathman_cache_search_relid(bounds_cache, + pbin->child_relid, + HASH_REMOVE, NULL); + } +} + /* * Get constraint expression tree of a partition. * @@ -1258,6 +1273,26 @@ get_parent_of_partition(Oid partition) } } +void +invalidate_parents_cache(void) +{ + HASH_SEQ_STATUS status; + PartParentInfo *ppar; + + Assert(offsetof(PartParentInfo, child_relid) == 0); + + hash_seq_init(&status, parents_cache); + + while ((ppar = hash_seq_search(&status)) != NULL) + { + /* This is a plain structure, no need to pfree() */ + + pathman_cache_search_relid(parents_cache, + ppar->child_relid, + HASH_REMOVE, NULL); + } +} + /* * Partitioning expression routines. From a69b12264469541b3e6c90ce4fead76460687e68 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Thu, 19 Jul 2018 07:37:06 +0300 Subject: [PATCH 305/528] Fix shardman's COPY FROM as it got a bit rotten in the 'next' branch --- src/utility_stmt_hooking.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 553f7c8e..fcd6a1dc 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -630,9 +630,9 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, /* Handle foreign tables */ else { - child_result_rel->ri_FdwRoutine->ForeignNextCopyFrom(estate, - child_rri, - cstate); + child_rri->ri_FdwRoutine->ForeignNextCopyFrom(estate, + child_rri, + cstate); } #endif @@ -706,7 +706,7 @@ prepare_rri_for_copy(ResultRelInfoHolder *rri_holder, FdwCopyFromIsSupported(fdw_routine)) { CopyState cstate = (CopyState) rps_storage->init_rri_holder_cb_arg; - ResultRelInfo *parent_rri = rps_storage->saved_rel_info; + ResultRelInfo *parent_rri = rps_storage->base_rri; EState *estate = rps_storage->estate; fdw_routine->BeginForeignCopyFrom(estate, rri, cstate, parent_rri); From f8d5a5a45539b1551c1e5a8a29196bb266d17a46 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 23 Jul 2018 13:58:40 +0300 Subject: [PATCH 306/528] Postgres Pro Enterprise compatibility fixes by Victor Wagner (@vbwagner) --- src/include/compat/pg_compat.h | 12 ++++++++++++ src/init.c | 2 +- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 8632578e..4228d264 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -626,6 +626,18 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, #endif +/* + * HeapTupleGetXmin() + * Vanilla PostgreSQL has HeaptTupleHeaderGetXmin, but for 64-bit xid + * we need access to entire tuple, not just its header. + */ +#ifdef XID_IS_64BIT +# define HeapTupleGetXminCompat(htup) HeapTupleGetXmin(htup) +#else +# define HeapTupleGetXminCompat(htup) HeapTupleHeaderGetXmin((htup)->t_data) +#endif + + /* * ------------- * Common code diff --git a/src/init.c b/src/init.c index 569a4c2f..3435fdc8 100644 --- a/src/init.c +++ b/src/init.c @@ -675,7 +675,7 @@ pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, /* Set xmin if necessary */ if (xmin) - *xmin = HeapTupleHeaderGetXmin(htup->t_data); + *xmin = HeapTupleGetXminCompat(htup); /* Set ItemPointer if necessary */ if (iptr) From 91005d048f07c87a850da82af3a6df561fea3bb2 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 23 Jul 2018 14:07:44 +0300 Subject: [PATCH 307/528] fix python tests --- tests/python/partitioning_test.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 2c290f8d..6a435c89 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -19,7 +19,6 @@ from distutils.version import LooseVersion from testgres import get_new_node, get_pg_version -from testgres.utils import pg_version_ge # set setup base logging config, it can be turned on by `use_logging` # parameter on node setup From b23712b640673dc81d7940bc01900ae832979e4c Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 25 Jul 2018 17:19:25 +0300 Subject: [PATCH 308/528] refresh README.md --- README.md | 77 +++++++++++++++++++++++++++++++++++-------------------- 1 file changed, 49 insertions(+), 28 deletions(-) diff --git a/README.md b/README.md index c89d99de..1b4dcf1b 100644 --- a/README.md +++ b/README.md @@ -63,13 +63,13 @@ More interesting features are yet to come. Stay tuned! * Effective query planning for partitioned tables (JOINs, subselects etc); * `RuntimeAppend` & `RuntimeMergeAppend` custom plan nodes to pick partitions at runtime; * `PartitionFilter`: an efficient drop-in replacement for INSERT triggers; + * `PartitionRouter` for cross-partition UPDATE queries (instead of triggers); * Automatic partition creation for new INSERTed data (only for RANGE partitioning); * Improved `COPY FROM` statement that is able to insert rows directly into partitions; - * UPDATE triggers generation out of the box (will be replaced with custom nodes too); * User-defined callbacks for partition creation event handling; * Non-blocking concurrent table partitioning; * FDW support (foreign partitions); - * Various GUC toggles and configurable settings. + * Various [GUC](#disabling-pg_pathman) toggles and configurable settings. ## Installation guide To install `pg_pathman`, execute this in the module's directory: @@ -121,8 +121,8 @@ Although it's possible to get major and minor version numbers using `\dx pg_path ### Partition creation ```plpgsql -create_hash_partitions(relation REGCLASS, - expr TEXT, +create_hash_partitions(parent_relid REGCLASS, + expression TEXT, partitions_count INTEGER, partition_data BOOLEAN DEFAULT TRUE, partition_names TEXT[] DEFAULT NULL, @@ -131,21 +131,21 @@ create_hash_partitions(relation REGCLASS, Performs HASH partitioning for `relation` by partitioning expression `expr`. The `partitions_count` parameter specifies the number of partitions to create; it cannot be changed afterwards. If `partition_data` is `true` then all the data will be automatically copied from the parent table to partitions. Note that data migration may took a while to finish and the table will be locked until transaction commits. See `partition_table_concurrently()` for a lock-free way to migrate data. Partition creation callback is invoked for each partition if set beforehand (see `set_init_callback()`). ```plpgsql -create_range_partitions(relation REGCLASS, +create_range_partitions(parent_relid REGCLASS, expression TEXT, start_value ANYELEMENT, p_interval ANYELEMENT, p_count INTEGER DEFAULT NULL partition_data BOOLEAN DEFAULT TRUE) -create_range_partitions(relation REGCLASS, +create_range_partitions(parent_relid REGCLASS, expression TEXT, start_value ANYELEMENT, p_interval INTERVAL, p_count INTEGER DEFAULT NULL, partition_data BOOLEAN DEFAULT TRUE) -create_range_partitions(relation REGCLASS, +create_range_partitions(parent_relid REGCLASS, expression TEXT, bounds ANYARRAY, partition_names TEXT[] DEFAULT NULL, @@ -181,10 +181,12 @@ stop_concurrent_part_task(relation REGCLASS) Stops a background worker performing a concurrent partitioning task. Note: worker will exit after it finishes relocating a current batch. ### Triggers -```plpgsql -create_update_triggers(parent REGCLASS) -``` -Creates a for-each-row trigger to enable cross-partition UPDATE on a table partitioned by HASH/RANGE. The trigger is not created automatically because of the overhead caused by its function. You don't have to use this feature unless partitioning key might change during an UPDATE. + +Triggers are no longer required nor for INSERTs, neither for cross-partition UPDATEs. However, user-supplied triggers *are supported*. + +Each inserted row results in execution of BEFORE/AFTER INSERT trigger functions of a **corresponding partition**. +Each updated row results in execution of BEFORE/AFTER UPDATE trigger functions of a **corresponding partition**. +Each moved row (cross-partition update) results in execution of BEFORE UPDATE + BEFORE/AFTER DELETE + BEFORE/AFTER INSERT trigger functions of **corresponding partitions**. ### Post-creation partition management ```plpgsql @@ -196,9 +198,10 @@ Replaces specified partition of HASH-partitioned table with another table. The ` ```plpgsql -split_range_partition(partition REGCLASS, - split_value ANYELEMENT, - partition_name TEXT DEFAULT NULL) +split_range_partition(partition_relid REGCLASS, + split_value ANYELEMENT, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) ``` Split RANGE `partition` in two by `split_value`. Partition creation callback is invoked for a new partition if available. @@ -208,21 +211,21 @@ merge_range_partitions(variadic partitions REGCLASS[]) Merge several adjacent RANGE partitions. Partitions are automatically ordered by increasing bounds; all the data will be accumulated in the first partition. ```plpgsql -append_range_partition(parent REGCLASS, +append_range_partition(parent_relid REGCLASS, partition_name TEXT DEFAULT NULL, tablespace TEXT DEFAULT NULL) ``` Append new RANGE partition with `pathman_config.range_interval` as interval. ```plpgsql -prepend_range_partition(parent REGCLASS, +prepend_range_partition(parent_relid REGCLASS, partition_name TEXT DEFAULT NULL, tablespace TEXT DEFAULT NULL) ``` Prepend new RANGE partition with `pathman_config.range_interval` as interval. ```plpgsql -add_range_partition(relation REGCLASS, +add_range_partition(parent_relid REGCLASS, start_value ANYELEMENT, end_value ANYELEMENT, partition_name TEXT DEFAULT NULL, @@ -236,26 +239,26 @@ drop_range_partition(partition TEXT, delete_data BOOLEAN DEFAULT TRUE) Drop RANGE partition and all of its data if `delete_data` is true. ```plpgsql -attach_range_partition(relation REGCLASS, - partition REGCLASS, - start_value ANYELEMENT, - end_value ANYELEMENT) +attach_range_partition(parent_relid REGCLASS, + partition_relid REGCLASS, + start_value ANYELEMENT, + end_value ANYELEMENT) ``` Attach partition to the existing RANGE-partitioned relation. The attached table must have exactly the same structure as the parent table, including the dropped columns. Partition creation callback is invoked if set (see `pathman_config_params`). ```plpgsql -detach_range_partition(partition REGCLASS) +detach_range_partition(partition_relid REGCLASS) ``` Detach partition from the existing RANGE-partitioned relation. ```plpgsql -disable_pathman_for(relation TEXT) +disable_pathman_for(parent_relid REGCLASS) ``` Permanently disable `pg_pathman` partitioning mechanism for the specified parent table and remove the insert trigger if it exists. All partitions and data remain unchanged. ```plpgsql -drop_partitions(parent REGCLASS, - delete_data BOOLEAN DEFAULT FALSE) +drop_partitions(parent_relid REGCLASS, + delete_data BOOLEAN DEFAULT FALSE) ``` Drop partitions of the `parent` table (both foreign and local relations). If `delete_data` is `false`, the data is copied to the parent table first. Default is `false`. @@ -347,7 +350,7 @@ CREATE TABLE IF NOT EXISTS pathman_config_params ( partrel REGCLASS NOT NULL PRIMARY KEY, enable_parent BOOLEAN NOT NULL DEFAULT TRUE, auto BOOLEAN NOT NULL DEFAULT TRUE, - init_callback REGPROCEDURE NOT NULL DEFAULT 0, + init_callback TEXT DEFAULT NULL, spawn_using_bgw BOOLEAN NOT NULL DEFAULT FALSE); ``` This table stores optional parameters which override standard behavior. @@ -414,6 +417,7 @@ Shows memory consumption of various caches. - `RuntimeAppend` (overrides `Append` plan node) - `RuntimeMergeAppend` (overrides `MergeAppend` plan node) - `PartitionFilter` (drop-in replacement for INSERT triggers) +- `PartitionRouter` (implements cross-partition UPDATEs) `PartitionFilter` acts as a *proxy node* for INSERT's child scan, which means it can redirect output tuples to the corresponding partition: @@ -430,6 +434,22 @@ SELECT generate_series(1, 10), random(); (4 rows) ``` +`PartitionRouter` is another *proxy node* used in conjunction with `PartitionFilter` to enable cross-partition UPDATEs (i.e. when you update any column of a partitioning key). Since this node has a great deal of side effects (ordinary `UPDATE` becomes slower; cross-partition `UPDATE` is transformed into `DELETE + INSERT`), it is disabled by default. To enable it, refer to the list of [GUCs](#disabling-pg_pathman) below. + +```plpgsql +EXPLAIN (COSTS OFF) +UPDATE partitioned_table +SET value = value + 1 WHERE value = 2; + QUERY PLAN +--------------------------------------------------- + Update on partitioned_table_0 + -> Custom Scan (PartitionRouter) + -> Custom Scan (PartitionFilter) + -> Seq Scan on partitioned_table_0 + Filter: (value = 2) +(5 rows) +``` + `RuntimeAppend` and `RuntimeMergeAppend` have much in common: they come in handy in a case when WHERE condition takes form of: ``` VARIABLE OP PARAM @@ -580,7 +600,7 @@ NOTICE: 100 rows copied from part_test_2 (3 rows) ``` -- You can turn foreign tables into partitions using the `attach_range_partition()` function. Rows that were meant to be inserted into parent will be redirected to foreign partitions (as usual, PartitionFilter will be involved), though by default it is prohibited to insert rows into partitions provided not by `postgres_fdw`. Only superuser is allowed to set `pg_pathman.insert_into_fdw` GUC variable. +- You can turn foreign tables into partitions using the `attach_range_partition()` function. Rows that were meant to be inserted into parent will be redirected to foreign partitions (as usual, PartitionFilter will be involved), though by default it is prohibited to insert rows into partitions provided not by `postgres_fdw`. Only superuser is allowed to set `pg_pathman.insert_into_fdw` [GUC](#disabling-pg_pathman) variable. ### HASH partitioning Consider an example of HASH partitioning. First create a table with some integer column: @@ -710,7 +730,8 @@ There are several user-accessible [GUC](https://www.postgresql.org/docs/9.5/stat - `pg_pathman.enable` --- disable (or enable) `pg_pathman` **completely** - `pg_pathman.enable_runtimeappend` --- toggle `RuntimeAppend` custom node on\off - `pg_pathman.enable_runtimemergeappend` --- toggle `RuntimeMergeAppend` custom node on\off - - `pg_pathman.enable_partitionfilter` --- toggle `PartitionFilter` custom node on\off + - `pg_pathman.enable_partitionfilter` --- toggle `PartitionFilter` custom node on\off (for INSERTs) + - `pg_pathman.enable_partitionrouter` --- toggle `PartitionRouter` custom node on\off (for cross-partition UPDATEs) - `pg_pathman.enable_auto_partition` --- toggle automatic partition creation on\off (per session) - `pg_pathman.enable_bounds_cache` --- toggle bounds cache on\off (faster updates of partitioning scheme) - `pg_pathman.insert_into_fdw` --- allow INSERTs into various FDWs `(disabled | postgres | any_fdw)` From 784170a7edbdfec4323a48ca8f7d24ea0cc68429 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 25 Jul 2018 17:23:21 +0300 Subject: [PATCH 309/528] more fixes to README.md --- README.md | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 1b4dcf1b..2a2796d7 100644 --- a/README.md +++ b/README.md @@ -58,16 +58,16 @@ More interesting features are yet to come. Stay tuned! * HASH and RANGE partitioning schemes; * Partitioning by expression and composite key; - * Both automatic and manual partition management; + * Both automatic and manual [partition management](#post-creation-partition-management); * Support for integer, floating point, date and other types, including domains; * Effective query planning for partitioned tables (JOINs, subselects etc); * `RuntimeAppend` & `RuntimeMergeAppend` custom plan nodes to pick partitions at runtime; - * `PartitionFilter`: an efficient drop-in replacement for INSERT triggers; - * `PartitionRouter` for cross-partition UPDATE queries (instead of triggers); + * [`PartitionFilter`](#custom-plan-nodes): an efficient drop-in replacement for INSERT triggers; + * [`PartitionRouter`](#custom-plan-nodes) for cross-partition UPDATE queries (instead of triggers); * Automatic partition creation for new INSERTed data (only for RANGE partitioning); * Improved `COPY FROM` statement that is able to insert rows directly into partitions; - * User-defined callbacks for partition creation event handling; - * Non-blocking concurrent table partitioning; + * [User-defined callbacks](#additional-parameters) for partition creation event handling; + * Non-blocking [concurrent table partitioning](#data-migration); * FDW support (foreign partitions); * Various [GUC](#disabling-pg_pathman) toggles and configurable settings. @@ -182,17 +182,17 @@ Stops a background worker performing a concurrent partitioning task. Note: worke ### Triggers -Triggers are no longer required nor for INSERTs, neither for cross-partition UPDATEs. However, user-supplied triggers *are supported*. +Triggers are no longer required nor for INSERTs, neither for cross-partition UPDATEs. However, user-supplied triggers *are supported*: -Each inserted row results in execution of BEFORE/AFTER INSERT trigger functions of a **corresponding partition**. -Each updated row results in execution of BEFORE/AFTER UPDATE trigger functions of a **corresponding partition**. -Each moved row (cross-partition update) results in execution of BEFORE UPDATE + BEFORE/AFTER DELETE + BEFORE/AFTER INSERT trigger functions of **corresponding partitions**. +* Each **inserted row** results in execution of `BEFORE/AFTER INSERT` trigger functions of a *corresponding partition*. +* Each **updated row** results in execution of `BEFORE/AFTER UPDATE` trigger functions of a *corresponding partition*. +* Each **moved row** (cross-partition update) results in execution of `BEFORE UPDATE` + `BEFORE/AFTER DELETE` + `BEFORE/AFTER INSERT` trigger functions of *corresponding partitions*. ### Post-creation partition management ```plpgsql replace_hash_partition(old_partition REGCLASS, new_partition REGCLASS, - lock_parent BOOL DEFAULT TRUE) + lock_parent BOOLEAN DEFAULT TRUE) ``` Replaces specified partition of HASH-partitioned table with another table. The `lock_parent` parameter will prevent any INSERT/UPDATE/ALTER TABLE queries to parent table. @@ -201,7 +201,7 @@ Replaces specified partition of HASH-partitioned table with another table. The ` split_range_partition(partition_relid REGCLASS, split_value ANYELEMENT, partition_name TEXT DEFAULT NULL, - tablespace TEXT DEFAULT NULL) + tablespace TEXT DEFAULT NULL) ``` Split RANGE `partition` in two by `split_value`. Partition creation callback is invoked for a new partition if available. @@ -434,7 +434,7 @@ SELECT generate_series(1, 10), random(); (4 rows) ``` -`PartitionRouter` is another *proxy node* used in conjunction with `PartitionFilter` to enable cross-partition UPDATEs (i.e. when you update any column of a partitioning key). Since this node has a great deal of side effects (ordinary `UPDATE` becomes slower; cross-partition `UPDATE` is transformed into `DELETE + INSERT`), it is disabled by default. To enable it, refer to the list of [GUCs](#disabling-pg_pathman) below. +`PartitionRouter` is another *proxy node* used in conjunction with `PartitionFilter` to enable cross-partition UPDATEs (i.e. when update of partitioning key requires that we move row to another partition). Since this node has a great deal of side effects (ordinary `UPDATE` becomes slower; cross-partition `UPDATE` is transformed into `DELETE + INSERT`), it is disabled by default. To enable it, refer to the list of [GUCs](#disabling-pg_pathman) below. ```plpgsql EXPLAIN (COSTS OFF) From d1255a5394d2bcebc328fdcda4a24f329d0f162a Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 22 Aug 2018 17:47:40 +0300 Subject: [PATCH 310/528] WIP various changes due to EPQ --- src/include/partition_router.h | 3 ++ src/partition_filter.c | 60 ++++++++++-------------- src/partition_router.c | 82 ++++++++++++++++++++------------- src/planner_tree_modification.c | 12 ++--- 4 files changed, 83 insertions(+), 74 deletions(-) diff --git a/src/include/partition_router.h b/src/include/partition_router.h index e90893ba..e21940bb 100644 --- a/src/include/partition_router.h +++ b/src/include/partition_router.h @@ -32,6 +32,8 @@ typedef struct PartitionRouterState Oid partitioned_table; JunkFilter *junkfilter; + EPQState epqstate; + int epqparam; Plan *subplan; /* proxy variable to store subplan */ } PartitionRouterState; @@ -64,6 +66,7 @@ void init_partition_router_static_data(void); Plan *make_partition_router(Plan *subplan, Oid parent_relid, Index parent_rti, + int epq_param, List *returning_list); diff --git a/src/partition_filter.c b/src/partition_filter.c index f96eb970..f2d06848 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -87,7 +87,7 @@ static Node *fix_returning_list_mutator(Node *node, void *state); static Index append_rte_to_estate(EState *estate, RangeTblEntry *rte); static int append_rri_to_estate(EState *estate, ResultRelInfo *rri); -static List * pfilter_build_tlist(Relation parent_rel, Plan *subplan); +static List *pfilter_build_tlist(Plan *subplan); static void pf_memcxt_callback(void *arg); static estate_mod_data * fetch_estate_mod_data(EState *estate); @@ -637,7 +637,6 @@ make_partition_filter(Plan *subplan, CmdType command_type) { CustomScan *cscan = makeNode(CustomScan); - Relation parent_rel; /* Currently we don't support ON CONFLICT clauses */ if (conflict_action != ONCONFLICT_NONE) @@ -655,14 +654,12 @@ make_partition_filter(Plan *subplan, cscan->methods = &partition_filter_plan_methods; cscan->custom_plans = list_make1(subplan); - /* Build an appropriate target list using a cached Relation entry */ - parent_rel = RelationIdGetRelation(parent_relid); - cscan->scan.plan.targetlist = pfilter_build_tlist(parent_rel, subplan); - RelationClose(parent_rel); - /* No physical relation will be scanned */ cscan->scan.scanrelid = 0; + /* Build an appropriate target list */ + cscan->scan.plan.targetlist = pfilter_build_tlist(subplan); + /* Prepare 'custom_scan_tlist' for EXPLAIN (VERBOSE) */ cscan->custom_scan_tlist = copyObject(cscan->scan.plan.targetlist); ChangeVarNodes((Node *) cscan->custom_scan_tlist, INDEX_VAR, parent_rti, 0); @@ -830,44 +827,37 @@ partition_filter_explain(CustomScanState *node, List *ancestors, ExplainState *e * Build partition filter's target list pointing to subplan tuple's elements. */ static List * -pfilter_build_tlist(Relation parent_rel, Plan *subplan) +pfilter_build_tlist(Plan *subplan) { List *result_tlist = NIL; ListCell *lc; foreach (lc, subplan->targetlist) { - TargetEntry *tle = (TargetEntry *) lfirst(lc), - *newtle = NULL; + TargetEntry *tle = (TargetEntry *) lfirst(lc), + *newtle = NULL; if (IsA(tle->expr, Const)) - newtle = makeTargetEntry(copyObject(tle->expr), tle->resno, tle->resname, - tle->resjunk); - + { + /* TODO: maybe we should use copyObject(tle->expr)? */ + newtle = makeTargetEntry(tle->expr, + tle->resno, + tle->resname, + tle->resjunk); + } else { - if (tle->expr != NULL && IsA(tle->expr, Var)) - { - Var *var = (Var *) palloc(sizeof(Var)); - *var = *((Var *)(tle->expr)); - var->varno = INDEX_VAR; - var->varattno = tle->resno; - - newtle = makeTargetEntry((Expr *) var, tle->resno, tle->resname, - tle->resjunk); - } - else - { - Var *var = makeVar(INDEX_VAR, /* point to subplan's elements */ - tle->resno, - exprType((Node *) tle->expr), - exprTypmod((Node *) tle->expr), - exprCollation((Node *) tle->expr), - 0); - - newtle = makeTargetEntry((Expr *) var, tle->resno, tle->resname, - tle->resjunk); - } + Var *var = makeVar(INDEX_VAR, /* point to subplan's elements */ + tle->resno, + exprType((Node *) tle->expr), + exprTypmod((Node *) tle->expr), + exprCollation((Node *) tle->expr), + 0); + + newtle = makeTargetEntry((Expr *) var, + tle->resno, + tle->resname, + tle->resjunk); } result_tlist = lappend(result_tlist, newtle); diff --git a/src/partition_router.c b/src/partition_router.c index a354fd87..94dbae05 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -28,9 +28,9 @@ bool pg_pathman_enable_partition_router = true; CustomScanMethods partition_router_plan_methods; CustomExecMethods partition_router_exec_methods; -static TupleTableSlot *ExecDeleteInternal(ItemPointer tupleid, - EPQState *epqstate, - EState *estate); +static bool ExecDeleteInternal(ItemPointer tupleid, + EPQState *epqstate, + EState *estate); void init_partition_router_static_data(void) @@ -65,6 +65,7 @@ Plan * make_partition_router(Plan *subplan, Oid parent_relid, Index parent_rti, + int epq_param, List *returning_list) { @@ -85,16 +86,17 @@ make_partition_router(Plan *subplan, cscan->scan.plan.plan_rows = subplan->plan_rows; cscan->scan.plan.plan_width = subplan->plan_width; - /* Setup methods and child plan */ + /* Setup methods, child plan and param number for EPQ */ cscan->methods = &partition_router_plan_methods; cscan->custom_plans = list_make1(pfilter); - - /* Build an appropriate target list */ - cscan->scan.plan.targetlist = pfilter->targetlist; + cscan->custom_private = list_make1(makeInteger(epq_param)); /* No physical relation will be scanned */ cscan->scan.scanrelid = 0; + /* Build an appropriate target list */ + cscan->scan.plan.targetlist = pfilter->targetlist; + /* FIXME: should we use the same tlist? */ cscan->custom_scan_tlist = subplan->targetlist; @@ -113,7 +115,9 @@ partition_router_create_scan_state(CustomScan *node) state->css.methods = &partition_router_exec_methods; /* Extract necessary variables */ + state->epqparam = intVal(linitial(node->custom_private)); state->subplan = (Plan *) linitial(node->custom_plans); + return (Node *) state; } @@ -122,6 +126,10 @@ partition_router_begin(CustomScanState *node, EState *estate, int eflags) { PartitionRouterState *state = (PartitionRouterState *) node; + EvalPlanQualInit(&state->epqstate, estate, + state->subplan, NIL, + state->epqparam); + /* It's convenient to store PlanState in 'custom_ps' */ node->custom_ps = list_make1(ExecInitNode(state->subplan, estate, eflags)); } @@ -134,6 +142,7 @@ partition_router_exec(CustomScanState *node) TupleTableSlot *slot; PartitionRouterState *state = (PartitionRouterState *) node; +take_next_tuple: /* execute PartitionFilter child node */ slot = ExecProcNode(child_ps); @@ -141,7 +150,6 @@ partition_router_exec(CustomScanState *node) { ResultRelInfo *new_rri, /* new tuple owner */ *old_rri; /* previous tuple owner */ - EPQState epqstate; PartitionFilterState *child_state; char relkind; ItemPointerData ctid; @@ -203,8 +211,12 @@ partition_router_exec(CustomScanState *node) /* Delete tuple from old partition */ Assert(ItemPointerIsValid(&ctid)); - EvalPlanQualSetSlot(&epqstate, child_state->subplan_slot); - ExecDeleteInternal(&ctid, &epqstate, estate); + EvalPlanQualSetSlot(&state->epqstate, child_state->subplan_slot); + if (!ExecDeleteInternal(&ctid, &state->epqstate, estate)) + { + elog(INFO, "oops, deleted, taking next tuple!"); + goto take_next_tuple; + } /* Magic: replace parent's ResultRelInfo with child's one (INSERT) */ estate->es_result_relation_info = new_rri; @@ -244,40 +256,42 @@ partition_router_explain(CustomScanState *node, List *ancestors, ExplainState *e * ---------------------------------------------------------------- */ -static TupleTableSlot * +static bool ExecDeleteInternal(ItemPointer tupleid, EPQState *epqstate, EState *estate) { - ResultRelInfo *resultRelInfo; - Relation resultRelationDesc; + ResultRelInfo *rri; + Relation rel; HTSU_Result result; HeapUpdateFailureData hufd; /* * get information on the (current) result relation */ - resultRelInfo = estate->es_result_relation_info; - resultRelationDesc = resultRelInfo->ri_RelationDesc; + rri = estate->es_result_relation_info; + rel = rri->ri_RelationDesc; - /* BEFORE ROW DELETE Triggers */ - if (resultRelInfo->ri_TrigDesc && - resultRelInfo->ri_TrigDesc->trig_delete_before_row) + /* BEFORE ROW UPDATE triggers */ + if (rri->ri_TrigDesc && + rri->ri_TrigDesc->trig_update_before_row) { - bool dodelete; - - dodelete = ExecBRDeleteTriggers(estate, epqstate, resultRelInfo, - tupleid, NULL); + elog(INFO, "kek!"); + } - if (!dodelete) - elog(ERROR, "the old row always should be deleted from child table"); + /* BEFORE ROW DELETE triggers */ + if (rri->ri_TrigDesc && + rri->ri_TrigDesc->trig_delete_before_row) + { + if (!ExecBRDeleteTriggers(estate, epqstate, rri, tupleid, NULL)) + return false; } if (tupleid != NULL) { /* delete the tuple */ ldelete: - result = heap_delete_compat(resultRelationDesc, tupleid, + result = heap_delete_compat(rel, tupleid, estate->es_output_cid, estate->es_crosscheck_snapshot, true /* wait for commit */ , @@ -292,7 +306,7 @@ ExecDeleteInternal(ItemPointer tupleid, errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows."))); /* Else, already deleted by self; nothing to do */ - return NULL; + return false; case HeapTupleMayBeUpdated: break; @@ -302,17 +316,19 @@ ExecDeleteInternal(ItemPointer tupleid, ereport(ERROR, (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), errmsg("could not serialize access due to concurrent update"))); + if (!ItemPointerEquals(tupleid, &hufd.ctid)) { TupleTableSlot *epqslot; epqslot = EvalPlanQual(estate, epqstate, - resultRelationDesc, - resultRelInfo->ri_RangeTableIndex, + rel, + rri->ri_RangeTableIndex, LockTupleExclusive, &hufd.ctid, hufd.xmax); + if (!TupIsNull(epqslot)) { Assert(tupleid != NULL); @@ -320,19 +336,19 @@ ExecDeleteInternal(ItemPointer tupleid, goto ldelete; } } + /* tuple already deleted; nothing to do */ - return NULL; + return false; default: elog(ERROR, "unrecognized heap_delete status: %u", result); - return NULL; } } else elog(ERROR, "tupleid should be specified for deletion"); - /* AFTER ROW DELETE Triggers */ - ExecARDeleteTriggersCompat(estate, resultRelInfo, tupleid, NULL, NULL); + /* AFTER ROW DELETE triggers */ + ExecARDeleteTriggersCompat(estate, rri, tupleid, NULL, NULL); - return NULL; + return true; } diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 9d3ffb15..22af4a73 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -666,15 +666,14 @@ partition_router_visitor(Plan *plan, void *context) if (modifytable_contains_fdw(rtable, modify_table)) { - ereport(NOTICE, - (errcode(ERRCODE_STATEMENT_TOO_COMPLEX), - errmsg("discovered mix of local and foreign tables, " - UPDATE_NODE_NAME " will be disabled"))); - return; + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg(UPDATE_NODE_NAME " does not support foreign data wrappers"))); } lc3 = list_head(modify_table->returningLists); - forboth (lc1, modify_table->plans, lc2, modify_table->resultRelations) + forboth (lc1, modify_table->plans, + lc2, modify_table->resultRelations) { Index rindex = lfirst_int(lc2); Oid relid = getrelid(rindex, rtable), @@ -698,6 +697,7 @@ partition_router_visitor(Plan *plan, void *context) lfirst(lc1) = make_partition_router((Plan *) lfirst(lc1), relid, modify_table->nominalRelation, + modify_table->epqParam, returning_list); } } From c0e3513e7ba99d2a4d449a47ebe6bd91a61a30e9 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 24 Aug 2018 18:03:23 +0300 Subject: [PATCH 311/528] WIP more fixes in EPQ handling --- src/hooks.c | 5 +- src/include/partition_filter.h | 9 ++-- src/include/partition_router.h | 7 ++- src/partition_filter.c | 30 ++++------- src/partition_router.c | 93 +++++++++++++++------------------ src/planner_tree_modification.c | 27 +++++++--- 6 files changed, 83 insertions(+), 88 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index cbed54f4..25a2ec5c 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -987,8 +987,9 @@ pathman_executor_hook(QueryDesc *queryDesc, { CustomScanState *pr_state = (CustomScanState *) mt_state->mt_plans[i]; - /* Check if this is a PartitionRouter node */ - if (IsPartitionRouterState(pr_state)) + /* Check if this is a PartitionFilter + PartitionRouter combo */ + if (IsPartitionFilterState(pr_state) && + IsPartitionRouterState(linitial(pr_state->custom_ps))) { ResultRelInfo *rri = &mt_state->resultRelInfo[i]; diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index ef091e0b..b3ecffeb 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -108,9 +108,6 @@ typedef struct ResultPartsStorage result_parts; /* partition ResultRelInfo cache */ CmdType command_type; - bool warning_triggered; /* warning message counter */ - - TupleTableSlot *subplan_slot; /* slot that was returned from subplan */ TupleTableSlot *tup_convert_slot; /* slot for rebuilt tuples */ } PartitionFilterState; @@ -170,6 +167,8 @@ PartRelationInfo * refresh_result_parts_storage(ResultPartsStorage *parts_storag TupleConversionMap * build_part_tuple_map(Relation parent_rel, Relation child_rel); +List * pfilter_build_tlist(Plan *subplan); + /* Find suitable partition using 'value' */ Oid * find_partitions_for_value(Datum value, Oid value_type, @@ -183,8 +182,8 @@ Plan * make_partition_filter(Plan *subplan, Oid parent_relid, Index parent_rti, OnConflictAction conflict_action, - List *returning_list, - CmdType command_type); + CmdType command_type, + List *returning_list); Node * partition_filter_create_scan_state(CustomScan *node); diff --git a/src/include/partition_router.h b/src/include/partition_router.h index e21940bb..a0ebf3dd 100644 --- a/src/include/partition_router.h +++ b/src/include/partition_router.h @@ -31,10 +31,13 @@ typedef struct PartitionRouterState CustomScanState css; Oid partitioned_table; - JunkFilter *junkfilter; + Plan *subplan; /* proxy variable to store subplan */ + JunkFilter *junkfilter; /* 'ctid' extraction facility */ + EPQState epqstate; int epqparam; - Plan *subplan; /* proxy variable to store subplan */ + + ResultRelInfo *current_rri; } PartitionRouterState; diff --git a/src/partition_filter.c b/src/partition_filter.c index f2d06848..57f153c2 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -16,6 +16,7 @@ #include "partition_filter.h" #include "utils.h" +#include "access/htup_details.h" #include "catalog/pg_class.h" #include "catalog/pg_type.h" #include "foreign/fdwapi.h" @@ -87,8 +88,6 @@ static Node *fix_returning_list_mutator(Node *node, void *state); static Index append_rte_to_estate(EState *estate, RangeTblEntry *rte); static int append_rri_to_estate(EState *estate, ResultRelInfo *rri); -static List *pfilter_build_tlist(Plan *subplan); - static void pf_memcxt_callback(void *arg); static estate_mod_data * fetch_estate_mod_data(EState *estate); @@ -633,8 +632,8 @@ make_partition_filter(Plan *subplan, Oid parent_relid, Index parent_rti, OnConflictAction conflict_action, - List *returning_list, - CmdType command_type) + CmdType command_type, + List *returning_list) { CustomScan *cscan = makeNode(CustomScan); @@ -723,9 +722,6 @@ partition_filter_begin(CustomScanState *node, EState *estate, int eflags) state->on_conflict_action != ONCONFLICT_NONE, RPS_RRI_CB(prepare_rri_for_insert, state), RPS_RRI_CB(NULL, NULL)); - - /* No warnings yet */ - state->warning_triggered = false; } TupleTableSlot * @@ -739,16 +735,12 @@ partition_filter_exec(CustomScanState *node) TupleTableSlot *slot; slot = ExecProcNode(child_ps); - state->subplan_slot = slot; - - if (state->tup_convert_slot) - ExecClearTuple(state->tup_convert_slot); if (!TupIsNull(slot)) { MemoryContext old_mcxt; ResultRelInfoHolder *rri_holder; - ResultRelInfo *resultRelInfo; + ResultRelInfo *rri; /* Switch to per-tuple context */ old_mcxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); @@ -760,30 +752,28 @@ partition_filter_exec(CustomScanState *node) MemoryContextSwitchTo(old_mcxt); ResetExprContext(econtext); - resultRelInfo = rri_holder->result_rel_info; + rri = rri_holder->result_rel_info; /* Magic: replace parent's ResultRelInfo with ours */ - estate->es_result_relation_info = resultRelInfo; + estate->es_result_relation_info = rri; /* If there's a transform map, rebuild the tuple */ if (rri_holder->tuple_map) { HeapTuple htup_old, htup_new; - Relation child_rel = resultRelInfo->ri_RelationDesc; + Relation child_rel = rri->ri_RelationDesc; htup_old = ExecMaterializeSlot(slot); htup_new = do_convert_tuple(htup_old, rri_holder->tuple_map); + ExecClearTuple(slot); /* Allocate new slot if needed */ if (!state->tup_convert_slot) state->tup_convert_slot = MakeTupleTableSlotCompat(); ExecSetSlotDescriptor(state->tup_convert_slot, RelationGetDescr(child_rel)); - ExecStoreTuple(htup_new, state->tup_convert_slot, InvalidBuffer, true); - - /* Now replace the original slot */ - slot = state->tup_convert_slot; + slot = ExecStoreTuple(htup_new, state->tup_convert_slot, InvalidBuffer, true); } return slot; @@ -826,7 +816,7 @@ partition_filter_explain(CustomScanState *node, List *ancestors, ExplainState *e /* * Build partition filter's target list pointing to subplan tuple's elements. */ -static List * +List * pfilter_build_tlist(Plan *subplan) { List *result_tlist = NIL; diff --git a/src/partition_router.c b/src/partition_router.c index 94dbae05..22c6435a 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -28,9 +28,10 @@ bool pg_pathman_enable_partition_router = true; CustomScanMethods partition_router_plan_methods; CustomExecMethods partition_router_exec_methods; -static bool ExecDeleteInternal(ItemPointer tupleid, - EPQState *epqstate, - EState *estate); +static TupleTableSlot *ExecDeleteInternal(TupleTableSlot *slot, + ItemPointer tupleid, + EPQState *epqstate, + EState *estate); void init_partition_router_static_data(void) @@ -70,15 +71,6 @@ make_partition_router(Plan *subplan, { CustomScan *cscan = makeNode(CustomScan); - Plan *pfilter; - - /* Create child PartitionFilter node */ - pfilter = make_partition_filter(subplan, - parent_relid, - parent_rti, - ONCONFLICT_NONE, - returning_list, - CMD_UPDATE); /* Copy costs etc */ cscan->scan.plan.startup_cost = subplan->startup_cost; @@ -88,14 +80,14 @@ make_partition_router(Plan *subplan, /* Setup methods, child plan and param number for EPQ */ cscan->methods = &partition_router_plan_methods; - cscan->custom_plans = list_make1(pfilter); + cscan->custom_plans = list_make1(subplan); cscan->custom_private = list_make1(makeInteger(epq_param)); /* No physical relation will be scanned */ cscan->scan.scanrelid = 0; /* Build an appropriate target list */ - cscan->scan.plan.targetlist = pfilter->targetlist; + cscan->scan.plan.targetlist = pfilter_build_tlist(subplan); /* FIXME: should we use the same tlist? */ cscan->custom_scan_tlist = subplan->targetlist; @@ -126,6 +118,9 @@ partition_router_begin(CustomScanState *node, EState *estate, int eflags) { PartitionRouterState *state = (PartitionRouterState *) node; + /* Remember current relation we're going to delete from */ + state->current_rri = estate->es_result_relation_info; + EvalPlanQualInit(&state->epqstate, estate, state->subplan, NIL, state->epqparam); @@ -148,26 +143,18 @@ partition_router_exec(CustomScanState *node) if (!TupIsNull(slot)) { - ResultRelInfo *new_rri, /* new tuple owner */ - *old_rri; /* previous tuple owner */ - PartitionFilterState *child_state; - char relkind; - ItemPointerData ctid; + ResultRelInfo *current_rri = state->current_rri; + char relkind; + ItemPointerData ctid; ItemPointerSetInvalid(&ctid); - child_state = (PartitionFilterState *) child_ps; - Assert(child_state->command_type == CMD_UPDATE); - - old_rri = child_state->result_parts.base_rri; - new_rri = estate->es_result_relation_info; - /* Build new junkfilter if we have to */ if (state->junkfilter == NULL) { state->junkfilter = ExecInitJunkFilter(state->subplan->targetlist, - old_rri->ri_RelationDesc->rd_att->tdhasoid, + current_rri->ri_RelationDesc->rd_att->tdhasoid, ExecInitExtraTupleSlotCompat(estate)); state->junkfilter->jf_junkAttNo = @@ -177,13 +164,14 @@ partition_router_exec(CustomScanState *node) elog(ERROR, "could not find junk ctid column"); } - relkind = old_rri->ri_RelationDesc->rd_rel->relkind; + /* Additional checks based on 'relkind' */ + relkind = current_rri->ri_RelationDesc->rd_rel->relkind; if (relkind == RELKIND_RELATION) { Datum ctid_datum; bool ctid_isnull; - ctid_datum = ExecGetJunkAttribute(child_state->subplan_slot, + ctid_datum = ExecGetJunkAttribute(slot, state->junkfilter->jf_junkAttNo, &ctid_isnull); @@ -199,30 +187,26 @@ partition_router_exec(CustomScanState *node) else elog(ERROR, UPDATE_NODE_NAME " cannot handle relkind %u", relkind); - /* - * Clean from junk attributes before INSERT, - * but only if slot wasn't transformed in PartitionFilter. - */ - if (TupIsNull(child_state->tup_convert_slot)) - slot = ExecFilterJunk(state->junkfilter, slot); + elog(INFO, "deleting (%d, %d) from table: %s", + ItemPointerGetBlockNumber(&ctid), + ItemPointerGetOffsetNumber(&ctid), + get_rel_name(RelationGetRelid(current_rri->ri_RelationDesc))); - /* Magic: replace current ResultRelInfo with parent's one (DELETE) */ - estate->es_result_relation_info = old_rri; + /* Magic: replace parent's ResultRelInfo with ours */ + estate->es_result_relation_info = current_rri; /* Delete tuple from old partition */ Assert(ItemPointerIsValid(&ctid)); - EvalPlanQualSetSlot(&state->epqstate, child_state->subplan_slot); - if (!ExecDeleteInternal(&ctid, &state->epqstate, estate)) + slot = ExecDeleteInternal(slot, &ctid, &state->epqstate, estate); + + if (TupIsNull(slot)) { elog(INFO, "oops, deleted, taking next tuple!"); goto take_next_tuple; } - /* Magic: replace parent's ResultRelInfo with child's one (INSERT) */ - estate->es_result_relation_info = new_rri; - /* Tuple will be inserted by ModifyTable */ - return slot; + return ExecFilterJunk(state->junkfilter, slot); } return NULL; @@ -231,7 +215,10 @@ partition_router_exec(CustomScanState *node) void partition_router_end(CustomScanState *node) { + PartitionRouterState *state = (PartitionRouterState *) node; + Assert(list_length(node->custom_ps) == 1); + EvalPlanQualEnd(&state->epqstate); ExecEndNode((PlanState *) linitial(node->custom_ps)); } @@ -256,8 +243,9 @@ partition_router_explain(CustomScanState *node, List *ancestors, ExplainState *e * ---------------------------------------------------------------- */ -static bool -ExecDeleteInternal(ItemPointer tupleid, +static TupleTableSlot * +ExecDeleteInternal(TupleTableSlot *slot, + ItemPointer tupleid, EPQState *epqstate, EState *estate) { @@ -284,13 +272,15 @@ ExecDeleteInternal(ItemPointer tupleid, rri->ri_TrigDesc->trig_delete_before_row) { if (!ExecBRDeleteTriggers(estate, epqstate, rri, tupleid, NULL)) - return false; + return NULL; } if (tupleid != NULL) { - /* delete the tuple */ + EvalPlanQualSetSlot(epqstate, slot); + ldelete: + /* delete the tuple */ result = heap_delete_compat(rel, tupleid, estate->es_output_cid, estate->es_crosscheck_snapshot, @@ -305,8 +295,8 @@ ExecDeleteInternal(ItemPointer tupleid, errmsg("tuple to be updated was already modified by an operation triggered by the current command"), errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows."))); - /* Else, already deleted by self; nothing to do */ - return false; + /* Already deleted by self; nothing to do */ + return NULL; case HeapTupleMayBeUpdated: break; @@ -333,12 +323,13 @@ ExecDeleteInternal(ItemPointer tupleid, { Assert(tupleid != NULL); *tupleid = hufd.ctid; + slot = epqslot; goto ldelete; } } - /* tuple already deleted; nothing to do */ - return false; + /* Tuple already deleted; nothing to do */ + return NULL; default: elog(ERROR, "unrecognized heap_delete status: %u", result); @@ -350,5 +341,5 @@ ExecDeleteInternal(ItemPointer tupleid, /* AFTER ROW DELETE triggers */ ExecARDeleteTriggersCompat(estate, rri, tupleid, NULL, NULL); - return true; + return slot; } diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 22af4a73..95706a7e 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -618,7 +618,8 @@ partition_filter_visitor(Plan *plan, void *context) Assert(rtable && IsA(rtable, List)); lc3 = list_head(modify_table->returningLists); - forboth (lc1, modify_table->plans, lc2, modify_table->resultRelations) + forboth (lc1, modify_table->plans, + lc2, modify_table->resultRelations) { Index rindex = lfirst_int(lc2); Oid relid = getrelid(rindex, rtable); @@ -638,8 +639,8 @@ partition_filter_visitor(Plan *plan, void *context) lfirst(lc1) = make_partition_filter((Plan *) lfirst(lc1), relid, modify_table->nominalRelation, modify_table->onConflictAction, - returning_list, - CMD_INSERT); + modify_table->operation, + returning_list); } } } @@ -686,7 +687,9 @@ partition_router_visitor(Plan *plan, void *context) /* Check that table is partitioned */ if (has_pathman_relation_info(relid)) { - List *returning_list = NIL; + List *returning_list = NIL; + Plan *prouter, + *pfilter; /* Extract returning list if possible */ if (lc3) @@ -695,10 +698,18 @@ partition_router_visitor(Plan *plan, void *context) lc3 = lnext(lc3); } - lfirst(lc1) = make_partition_router((Plan *) lfirst(lc1), relid, - modify_table->nominalRelation, - modify_table->epqParam, - returning_list); + prouter = make_partition_router((Plan *) lfirst(lc1), relid, + modify_table->nominalRelation, + modify_table->epqParam, + returning_list); + + pfilter = make_partition_filter((Plan *) prouter, relid, + modify_table->nominalRelation, + ONCONFLICT_NONE, + CMD_UPDATE, + returning_list); + + lfirst(lc1) = pfilter; } } } From c907d6ed150a9d9810394259ebc63c6121f21e3f Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 27 Aug 2018 14:26:22 +0300 Subject: [PATCH 312/528] all tests pass --- expected/pathman_update_node.out | 8 ++++---- src/partition_router.c | 15 ++++----------- 2 files changed, 8 insertions(+), 15 deletions(-) diff --git a/expected/pathman_update_node.out b/expected/pathman_update_node.out index 125eedd4..e68bb9ae 100644 --- a/expected/pathman_update_node.out +++ b/expected/pathman_update_node.out @@ -18,8 +18,8 @@ EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 5 WHERE val = 1 QUERY PLAN ------------------------------------------------------------------- Update on test_range_2 - -> Custom Scan (PartitionRouter) - -> Custom Scan (PartitionFilter) + -> Custom Scan (PartitionFilter) + -> Custom Scan (PartitionRouter) -> Bitmap Heap Scan on test_range_2 Recheck Cond: (val = '15'::numeric) -> Bitmap Index Scan on test_range_2_val_idx @@ -31,8 +31,8 @@ EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 14 WHERE val = QUERY PLAN ------------------------------------------------------------------- Update on test_range_2 - -> Custom Scan (PartitionRouter) - -> Custom Scan (PartitionFilter) + -> Custom Scan (PartitionFilter) + -> Custom Scan (PartitionRouter) -> Bitmap Heap Scan on test_range_2 Recheck Cond: (val = '15'::numeric) -> Bitmap Index Scan on test_range_2_val_idx diff --git a/src/partition_router.c b/src/partition_router.c index 22c6435a..f16b7564 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -28,7 +28,7 @@ bool pg_pathman_enable_partition_router = true; CustomScanMethods partition_router_plan_methods; CustomExecMethods partition_router_exec_methods; -static TupleTableSlot *ExecDeleteInternal(TupleTableSlot *slot, +static TupleTableSlot *router_delete_tuple(TupleTableSlot *slot, ItemPointer tupleid, EPQState *epqstate, EState *estate); @@ -187,23 +187,16 @@ partition_router_exec(CustomScanState *node) else elog(ERROR, UPDATE_NODE_NAME " cannot handle relkind %u", relkind); - elog(INFO, "deleting (%d, %d) from table: %s", - ItemPointerGetBlockNumber(&ctid), - ItemPointerGetOffsetNumber(&ctid), - get_rel_name(RelationGetRelid(current_rri->ri_RelationDesc))); - /* Magic: replace parent's ResultRelInfo with ours */ estate->es_result_relation_info = current_rri; /* Delete tuple from old partition */ Assert(ItemPointerIsValid(&ctid)); - slot = ExecDeleteInternal(slot, &ctid, &state->epqstate, estate); + slot = router_delete_tuple(slot, &ctid, &state->epqstate, estate); + /* We require a tuple */ if (TupIsNull(slot)) - { - elog(INFO, "oops, deleted, taking next tuple!"); goto take_next_tuple; - } /* Tuple will be inserted by ModifyTable */ return ExecFilterJunk(state->junkfilter, slot); @@ -244,7 +237,7 @@ partition_router_explain(CustomScanState *node, List *ancestors, ExplainState *e */ static TupleTableSlot * -ExecDeleteInternal(TupleTableSlot *slot, +router_delete_tuple(TupleTableSlot *slot, ItemPointer tupleid, EPQState *epqstate, EState *estate) From ee0b8272598beb90889bbc3183cd708cfd106b91 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 27 Aug 2018 22:37:45 +0300 Subject: [PATCH 313/528] PartitionRouter: call before row update triggers --- src/partition_router.c | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/src/partition_router.c b/src/partition_router.c index f16b7564..22560109 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -29,9 +29,9 @@ CustomScanMethods partition_router_plan_methods; CustomExecMethods partition_router_exec_methods; static TupleTableSlot *router_delete_tuple(TupleTableSlot *slot, - ItemPointer tupleid, - EPQState *epqstate, - EState *estate); + ItemPointer tupleid, + EPQState *epqstate, + EState *estate); void init_partition_router_static_data(void) @@ -232,24 +232,24 @@ partition_router_explain(CustomScanState *node, List *ancestors, ExplainState *e /* * ---------------------------------------------------------------- * ExecDeleteInternal - * Basicly is a copy of ExecDelete from executor/nodeModifyTable.c + * This is a modified copy of ExecDelete from executor/nodeModifyTable.c * ---------------------------------------------------------------- */ static TupleTableSlot * router_delete_tuple(TupleTableSlot *slot, - ItemPointer tupleid, - EPQState *epqstate, - EState *estate) + ItemPointer tupleid, + EPQState *epqstate, + EState *estate) { ResultRelInfo *rri; Relation rel; HTSU_Result result; HeapUpdateFailureData hufd; - /* - * get information on the (current) result relation - */ + EvalPlanQualSetSlot(epqstate, slot); + + /* Get information on the (current) result relation */ rri = estate->es_result_relation_info; rel = rri->ri_RelationDesc; @@ -257,7 +257,9 @@ router_delete_tuple(TupleTableSlot *slot, if (rri->ri_TrigDesc && rri->ri_TrigDesc->trig_update_before_row) { - elog(INFO, "kek!"); + slot = ExecBRUpdateTriggers(estate, epqstate, rri, tupleid, NULL, slot); + if (TupIsNull(slot)) + return NULL; } /* BEFORE ROW DELETE triggers */ @@ -270,10 +272,8 @@ router_delete_tuple(TupleTableSlot *slot, if (tupleid != NULL) { - EvalPlanQualSetSlot(epqstate, slot); - ldelete: - /* delete the tuple */ + /* Delete the tuple */ result = heap_delete_compat(rel, tupleid, estate->es_output_cid, estate->es_crosscheck_snapshot, @@ -328,8 +328,7 @@ router_delete_tuple(TupleTableSlot *slot, elog(ERROR, "unrecognized heap_delete status: %u", result); } } - else - elog(ERROR, "tupleid should be specified for deletion"); + else elog(ERROR, "tupleid should be specified for deletion"); /* AFTER ROW DELETE triggers */ ExecARDeleteTriggersCompat(estate, rri, tupleid, NULL, NULL); From c7dbc90e95687fdee89af7606c6d01e73d1e4ef7 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 29 Aug 2018 13:56:17 +0300 Subject: [PATCH 314/528] fix python tests --- tests/python/partitioning_test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 41390d4a..e234f7ff 100644 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -1044,12 +1044,12 @@ def test_update_node_plan1(self): ], "Node Type": "Custom Scan", "Parent Relationship": "child", - "Custom Plan Provider": "PartitionFilter" + "Custom Plan Provider": "PartitionRouter" } ], "Node Type": "Custom Scan", "Parent Relationship": "Member", - "Custom Plan Provider": "PartitionRouter" + "Custom Plan Provider": "PartitionFilter" } ''' From 4abee5cbc116f56200f382dc2373d990fa6694aa Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 29 Aug 2018 14:53:31 +0300 Subject: [PATCH 315/528] restore compatibility with PG 11 --- src/include/compat/pg_compat.h | 16 ++++++++++++++++ src/partition_router.c | 2 +- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index ed152fe3..f2d5ba63 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -685,6 +685,22 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, #endif +/* + * ExecBRDeleteTriggers() + */ +#if PG_VERSION_NUM >= 110000 +#define ExecBRDeleteTriggersCompat(estate, epqstate, relinfo, tupleid, \ + fdw_trigtuple, epqslot) \ + ExecBRDeleteTriggers((estate), (epqstate), (relinfo), (tupleid), \ + (fdw_trigtuple), (epqslot)) +#else +#define ExecBRDeleteTriggersCompat(estate, epqstate, relinfo, tupleid, \ + fdw_trigtuple, epqslot) \ + ExecBRDeleteTriggers((estate), (epqstate), (relinfo), (tupleid), \ + (fdw_trigtuple)) +#endif + + /* * ExecARDeleteTriggers() */ diff --git a/src/partition_router.c b/src/partition_router.c index 22560109..9c0a041e 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -266,7 +266,7 @@ router_delete_tuple(TupleTableSlot *slot, if (rri->ri_TrigDesc && rri->ri_TrigDesc->trig_delete_before_row) { - if (!ExecBRDeleteTriggers(estate, epqstate, rri, tupleid, NULL)) + if (!ExecBRDeleteTriggersCompat(estate, epqstate, rri, tupleid, NULL, NULL)) return NULL; } From a26243f614e93886b05692603944e92b4ec86d2c Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 29 Aug 2018 18:35:49 +0300 Subject: [PATCH 316/528] remove obsolete tests --- expected/pathman_update_trigger.out | 289 ---------------------------- sql/pathman_update_trigger.sql | 164 ---------------- 2 files changed, 453 deletions(-) delete mode 100644 expected/pathman_update_trigger.out delete mode 100644 sql/pathman_update_trigger.sql diff --git a/expected/pathman_update_trigger.out b/expected/pathman_update_trigger.out deleted file mode 100644 index fdc5438a..00000000 --- a/expected/pathman_update_trigger.out +++ /dev/null @@ -1,289 +0,0 @@ -\set VERBOSITY terse -SET search_path = 'public'; -CREATE EXTENSION pg_pathman; -CREATE SCHEMA test_update_trigger; -/* Partition table by RANGE (NUMERIC) */ -CREATE TABLE test_update_trigger.test_range(val NUMERIC NOT NULL, comment TEXT); -INSERT INTO test_update_trigger.test_range SELECT i, i FROM generate_series(1, 100) i; -SELECT create_range_partitions('test_update_trigger.test_range', 'val', 1, 10); - create_range_partitions -------------------------- - 10 -(1 row) - -SELECT create_update_triggers('test_update_trigger.test_range'); - create_update_triggers ------------------------- - -(1 row) - -/* Update values in 1st partition (rows remain there) */ -UPDATE test_update_trigger.test_range SET val = 5 WHERE val <= 10; -/* Check values #1 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val < 10 -ORDER BY comment; - tableoid | val | comment -----------------------------------+-----+--------- - test_update_trigger.test_range_1 | 5 | 1 - test_update_trigger.test_range_1 | 5 | 10 - test_update_trigger.test_range_1 | 5 | 2 - test_update_trigger.test_range_1 | 5 | 3 - test_update_trigger.test_range_1 | 5 | 4 - test_update_trigger.test_range_1 | 5 | 5 - test_update_trigger.test_range_1 | 5 | 6 - test_update_trigger.test_range_1 | 5 | 7 - test_update_trigger.test_range_1 | 5 | 8 - test_update_trigger.test_range_1 | 5 | 9 -(10 rows) - -SELECT count(*) FROM test_update_trigger.test_range; - count -------- - 100 -(1 row) - -/* Update values in 2nd partition (rows move to 3rd partition) */ -UPDATE test_update_trigger.test_range SET val = val + 10 WHERE val > 10 AND val <= 20; -/* Check values #2 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val > 20 AND val <= 30 -ORDER BY comment; - tableoid | val | comment -----------------------------------+-----+--------- - test_update_trigger.test_range_3 | 21 | 11 - test_update_trigger.test_range_3 | 22 | 12 - test_update_trigger.test_range_3 | 23 | 13 - test_update_trigger.test_range_3 | 24 | 14 - test_update_trigger.test_range_3 | 25 | 15 - test_update_trigger.test_range_3 | 26 | 16 - test_update_trigger.test_range_3 | 27 | 17 - test_update_trigger.test_range_3 | 28 | 18 - test_update_trigger.test_range_3 | 29 | 19 - test_update_trigger.test_range_3 | 30 | 20 - test_update_trigger.test_range_3 | 21 | 21 - test_update_trigger.test_range_3 | 22 | 22 - test_update_trigger.test_range_3 | 23 | 23 - test_update_trigger.test_range_3 | 24 | 24 - test_update_trigger.test_range_3 | 25 | 25 - test_update_trigger.test_range_3 | 26 | 26 - test_update_trigger.test_range_3 | 27 | 27 - test_update_trigger.test_range_3 | 28 | 28 - test_update_trigger.test_range_3 | 29 | 29 - test_update_trigger.test_range_3 | 30 | 30 -(20 rows) - -SELECT count(*) FROM test_update_trigger.test_range; - count -------- - 100 -(1 row) - -/* Move single row */ -UPDATE test_update_trigger.test_range SET val = 90 WHERE val = 80; -/* Check values #3 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val = 90 -ORDER BY comment; - tableoid | val | comment -----------------------------------+-----+--------- - test_update_trigger.test_range_9 | 90 | 80 - test_update_trigger.test_range_9 | 90 | 90 -(2 rows) - -SELECT count(*) FROM test_update_trigger.test_range; - count -------- - 100 -(1 row) - -/* Move single row (create new partition) */ -UPDATE test_update_trigger.test_range SET val = -1 WHERE val = 50; -/* Check values #4 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val = -1 -ORDER BY comment; - tableoid | val | comment ------------------------------------+-----+--------- - test_update_trigger.test_range_11 | -1 | 50 -(1 row) - -SELECT count(*) FROM test_update_trigger.test_range; - count -------- - 100 -(1 row) - -/* Update non-key column */ -UPDATE test_update_trigger.test_range SET comment = 'test!' WHERE val = 100; -/* Check values #5 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val = 100 -ORDER BY comment; - tableoid | val | comment ------------------------------------+-----+--------- - test_update_trigger.test_range_10 | 100 | test! -(1 row) - -SELECT count(*) FROM test_update_trigger.test_range; - count -------- - 100 -(1 row) - -/* Try moving row into a gap (ERROR) */ -DROP TABLE test_update_trigger.test_range_4; -UPDATE test_update_trigger.test_range SET val = 35 WHERE val = 70; -ERROR: cannot spawn a partition -/* Check values #6 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val = 70 -ORDER BY comment; - tableoid | val | comment -----------------------------------+-----+--------- - test_update_trigger.test_range_7 | 70 | 70 -(1 row) - -SELECT count(*) FROM test_update_trigger.test_range; - count -------- - 90 -(1 row) - -/* Test trivial move (same key) */ -UPDATE test_update_trigger.test_range SET val = 65 WHERE val = 65; -/* Check values #7 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val = 65 -ORDER BY comment; - tableoid | val | comment -----------------------------------+-----+--------- - test_update_trigger.test_range_7 | 65 | 65 -(1 row) - -SELECT count(*) FROM test_update_trigger.test_range; - count -------- - 90 -(1 row) - -/* Test tuple conversion (attached partition) */ -CREATE TABLE test_update_trigger.test_range_inv(comment TEXT, val NUMERIC NOT NULL); -SELECT attach_range_partition('test_update_trigger.test_range', - 'test_update_trigger.test_range_inv', - 101::NUMERIC, 111::NUMERIC); - attach_range_partition ------------------------------------- - test_update_trigger.test_range_inv -(1 row) - -UPDATE test_update_trigger.test_range SET val = 105 WHERE val = 60; -/* Check values #8 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val = 105 -ORDER BY comment; - tableoid | val | comment -------------------------------------+-----+--------- - test_update_trigger.test_range_inv | 105 | 60 -(1 row) - -SELECT count(*) FROM test_update_trigger.test_range; - count -------- - 90 -(1 row) - -/* Test tuple conversion (dropped column) */ -ALTER TABLE test_update_trigger.test_range DROP COLUMN comment CASCADE; -SELECT append_range_partition('test_update_trigger.test_range'); - append_range_partition ------------------------------------ - test_update_trigger.test_range_12 -(1 row) - -UPDATE test_update_trigger.test_range SET val = 115 WHERE val = 55; -/* Check values #9 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val = 115; - tableoid | val ------------------------------------+----- - test_update_trigger.test_range_12 | 115 -(1 row) - -SELECT count(*) FROM test_update_trigger.test_range; - count -------- - 90 -(1 row) - -/* Partition table by HASH (INT4) */ -CREATE TABLE test_update_trigger.test_hash(val INT4 NOT NULL, comment TEXT); -INSERT INTO test_update_trigger.test_hash SELECT i, i FROM generate_series(1, 10) i; -SELECT create_hash_partitions('test_update_trigger.test_hash', 'val', 3); - create_hash_partitions ------------------------- - 3 -(1 row) - -SELECT create_update_triggers('test_update_trigger.test_hash'); - create_update_triggers ------------------------- - -(1 row) - -/* Move all rows into single partition */ -UPDATE test_update_trigger.test_hash SET val = 1; -/* Check values #1 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_hash -WHERE val = 1 -ORDER BY comment; - tableoid | val | comment ----------------------------------+-----+--------- - test_update_trigger.test_hash_2 | 1 | 1 - test_update_trigger.test_hash_2 | 1 | 10 - test_update_trigger.test_hash_2 | 1 | 2 - test_update_trigger.test_hash_2 | 1 | 3 - test_update_trigger.test_hash_2 | 1 | 4 - test_update_trigger.test_hash_2 | 1 | 5 - test_update_trigger.test_hash_2 | 1 | 6 - test_update_trigger.test_hash_2 | 1 | 7 - test_update_trigger.test_hash_2 | 1 | 8 - test_update_trigger.test_hash_2 | 1 | 9 -(10 rows) - -SELECT count(*) FROM test_update_trigger.test_hash; - count -------- - 10 -(1 row) - -/* Don't move any rows */ -UPDATE test_update_trigger.test_hash SET val = 3 WHERE val = 2; -/* Check values #2 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_hash -WHERE val = 3 -ORDER BY comment; - tableoid | val | comment -----------+-----+--------- -(0 rows) - -SELECT count(*) FROM test_update_trigger.test_hash; - count -------- - 10 -(1 row) - -DROP SCHEMA test_update_trigger CASCADE; -NOTICE: drop cascades to 18 other objects -DROP EXTENSION pg_pathman; diff --git a/sql/pathman_update_trigger.sql b/sql/pathman_update_trigger.sql deleted file mode 100644 index a5f5b10e..00000000 --- a/sql/pathman_update_trigger.sql +++ /dev/null @@ -1,164 +0,0 @@ -\set VERBOSITY terse - -SET search_path = 'public'; -CREATE EXTENSION pg_pathman; -CREATE SCHEMA test_update_trigger; - - - -/* Partition table by RANGE (NUMERIC) */ -CREATE TABLE test_update_trigger.test_range(val NUMERIC NOT NULL, comment TEXT); -INSERT INTO test_update_trigger.test_range SELECT i, i FROM generate_series(1, 100) i; -SELECT create_range_partitions('test_update_trigger.test_range', 'val', 1, 10); -SELECT create_update_triggers('test_update_trigger.test_range'); - - -/* Update values in 1st partition (rows remain there) */ -UPDATE test_update_trigger.test_range SET val = 5 WHERE val <= 10; - -/* Check values #1 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val < 10 -ORDER BY comment; - -SELECT count(*) FROM test_update_trigger.test_range; - - -/* Update values in 2nd partition (rows move to 3rd partition) */ -UPDATE test_update_trigger.test_range SET val = val + 10 WHERE val > 10 AND val <= 20; - -/* Check values #2 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val > 20 AND val <= 30 -ORDER BY comment; - -SELECT count(*) FROM test_update_trigger.test_range; - - -/* Move single row */ -UPDATE test_update_trigger.test_range SET val = 90 WHERE val = 80; - -/* Check values #3 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val = 90 -ORDER BY comment; - -SELECT count(*) FROM test_update_trigger.test_range; - - -/* Move single row (create new partition) */ -UPDATE test_update_trigger.test_range SET val = -1 WHERE val = 50; - -/* Check values #4 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val = -1 -ORDER BY comment; - -SELECT count(*) FROM test_update_trigger.test_range; - - -/* Update non-key column */ -UPDATE test_update_trigger.test_range SET comment = 'test!' WHERE val = 100; - -/* Check values #5 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val = 100 -ORDER BY comment; - -SELECT count(*) FROM test_update_trigger.test_range; - - -/* Try moving row into a gap (ERROR) */ -DROP TABLE test_update_trigger.test_range_4; -UPDATE test_update_trigger.test_range SET val = 35 WHERE val = 70; - -/* Check values #6 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val = 70 -ORDER BY comment; - -SELECT count(*) FROM test_update_trigger.test_range; - - -/* Test trivial move (same key) */ -UPDATE test_update_trigger.test_range SET val = 65 WHERE val = 65; - -/* Check values #7 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val = 65 -ORDER BY comment; - -SELECT count(*) FROM test_update_trigger.test_range; - - -/* Test tuple conversion (attached partition) */ -CREATE TABLE test_update_trigger.test_range_inv(comment TEXT, val NUMERIC NOT NULL); -SELECT attach_range_partition('test_update_trigger.test_range', - 'test_update_trigger.test_range_inv', - 101::NUMERIC, 111::NUMERIC); -UPDATE test_update_trigger.test_range SET val = 105 WHERE val = 60; - -/* Check values #8 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val = 105 -ORDER BY comment; - -SELECT count(*) FROM test_update_trigger.test_range; - - -/* Test tuple conversion (dropped column) */ -ALTER TABLE test_update_trigger.test_range DROP COLUMN comment CASCADE; -SELECT append_range_partition('test_update_trigger.test_range'); -UPDATE test_update_trigger.test_range SET val = 115 WHERE val = 55; - -/* Check values #9 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val = 115; - -SELECT count(*) FROM test_update_trigger.test_range; - - - -/* Partition table by HASH (INT4) */ -CREATE TABLE test_update_trigger.test_hash(val INT4 NOT NULL, comment TEXT); -INSERT INTO test_update_trigger.test_hash SELECT i, i FROM generate_series(1, 10) i; -SELECT create_hash_partitions('test_update_trigger.test_hash', 'val', 3); -SELECT create_update_triggers('test_update_trigger.test_hash'); - - -/* Move all rows into single partition */ -UPDATE test_update_trigger.test_hash SET val = 1; - -/* Check values #1 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_hash -WHERE val = 1 -ORDER BY comment; - -SELECT count(*) FROM test_update_trigger.test_hash; - - -/* Don't move any rows */ -UPDATE test_update_trigger.test_hash SET val = 3 WHERE val = 2; - -/* Check values #2 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_hash -WHERE val = 3 -ORDER BY comment; - -SELECT count(*) FROM test_update_trigger.test_hash; - - - -DROP SCHEMA test_update_trigger CASCADE; -DROP EXTENSION pg_pathman; From 59ce6526e1eda45cd6037ad030692ce6ef38561e Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 29 Aug 2018 18:57:27 +0300 Subject: [PATCH 317/528] more tests for PartitionRouter, add FIXME --- Makefile | 1 + expected/pathman_update_triggers.out | 110 +++++++++++++++++++++++++++ sql/pathman_update_triggers.sql | 70 +++++++++++++++++ src/hooks.c | 1 + 4 files changed, 182 insertions(+) create mode 100644 expected/pathman_update_triggers.out create mode 100644 sql/pathman_update_triggers.sql diff --git a/Makefile b/Makefile index 44f80b79..f9567f94 100644 --- a/Makefile +++ b/Makefile @@ -55,6 +55,7 @@ REGRESS = pathman_array_qual \ pathman_runtime_nodes \ pathman_subpartitions \ pathman_update_node \ + pathman_update_triggers \ pathman_upd_del \ pathman_utility_stmt \ pathman_views diff --git a/expected/pathman_update_triggers.out b/expected/pathman_update_triggers.out new file mode 100644 index 00000000..5c1092f2 --- /dev/null +++ b/expected/pathman_update_triggers.out @@ -0,0 +1,110 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_update_triggers; +create table test_update_triggers.test (val int not null); +select create_hash_partitions('test_update_triggers.test', 'val', 2, + partition_names := array[ + 'test_update_triggers.test_1', + 'test_update_triggers.test_2']); + create_hash_partitions +------------------------ + 2 +(1 row) + +create or replace function test_update_triggers.test_trigger() returns trigger as $$ +begin + raise notice '%', format('%s %s %s (%s)', TG_WHEN, TG_OP, TG_LEVEL, TG_TABLE_NAME); + + if TG_OP::text = 'DELETE'::text then + return old; + else + return new; + end if; end; +$$ language plpgsql; +create trigger bu before update ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bd before delete ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bi before insert ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger au after update ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ad after delete ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ai after insert ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bu before update ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bd before delete ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bi before insert ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger au after update ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ad after delete ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ai after insert ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +insert into test_update_triggers.test values (1); +NOTICE: BEFORE INSERT ROW (test_1) +NOTICE: AFTER INSERT ROW (test_1) +set pg_pathman.enable_partitionrouter = t; +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE ROW (test_1) +NOTICE: BEFORE DELETE ROW (test_1) +NOTICE: BEFORE INSERT ROW (test_1) +NOTICE: AFTER DELETE ROW (test_1) +NOTICE: AFTER INSERT ROW (test_1) + val | tableoid +-----+----------------------------- + 2 | test_update_triggers.test_1 +(1 row) + +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE ROW (test_1) +NOTICE: BEFORE DELETE ROW (test_1) +NOTICE: BEFORE INSERT ROW (test_2) +NOTICE: AFTER DELETE ROW (test_1) +NOTICE: AFTER INSERT ROW (test_2) + val | tableoid +-----+----------------------------- + 3 | test_update_triggers.test_2 +(1 row) + +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE ROW (test_2) +NOTICE: BEFORE DELETE ROW (test_2) +NOTICE: BEFORE INSERT ROW (test_2) +NOTICE: AFTER DELETE ROW (test_2) +NOTICE: AFTER INSERT ROW (test_2) + val | tableoid +-----+----------------------------- + 4 | test_update_triggers.test_2 +(1 row) + +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE ROW (test_2) +NOTICE: BEFORE DELETE ROW (test_2) +NOTICE: BEFORE INSERT ROW (test_1) +NOTICE: AFTER DELETE ROW (test_2) +NOTICE: AFTER INSERT ROW (test_1) + val | tableoid +-----+----------------------------- + 5 | test_update_triggers.test_1 +(1 row) + +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE ROW (test_1) +NOTICE: BEFORE DELETE ROW (test_1) +NOTICE: BEFORE INSERT ROW (test_1) +NOTICE: AFTER DELETE ROW (test_1) +NOTICE: AFTER INSERT ROW (test_1) + val | tableoid +-----+----------------------------- + 6 | test_update_triggers.test_1 +(1 row) + +DROP SCHEMA test_update_triggers CASCADE; +NOTICE: drop cascades to 4 other objects +DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_update_triggers.sql b/sql/pathman_update_triggers.sql new file mode 100644 index 00000000..c289d12c --- /dev/null +++ b/sql/pathman_update_triggers.sql @@ -0,0 +1,70 @@ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_update_triggers; + + + +create table test_update_triggers.test (val int not null); +select create_hash_partitions('test_update_triggers.test', 'val', 2, + partition_names := array[ + 'test_update_triggers.test_1', + 'test_update_triggers.test_2']); + + +create or replace function test_update_triggers.test_trigger() returns trigger as $$ +begin + raise notice '%', format('%s %s %s (%s)', TG_WHEN, TG_OP, TG_LEVEL, TG_TABLE_NAME); + + if TG_OP::text = 'DELETE'::text then + return old; + else + return new; + end if; end; +$$ language plpgsql; + + +create trigger bu before update ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bd before delete ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bi before insert ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); + +create trigger au after update ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ad after delete ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ai after insert ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); + + +create trigger bu before update ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bd before delete ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bi before insert ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); + +create trigger au after update ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ad after delete ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ai after insert ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); + + +insert into test_update_triggers.test values (1); + +set pg_pathman.enable_partitionrouter = t; +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; + + + +DROP SCHEMA test_update_triggers CASCADE; +DROP EXTENSION pg_pathman CASCADE; diff --git a/src/hooks.c b/src/hooks.c index 25a2ec5c..ac0e595e 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -978,6 +978,7 @@ pathman_executor_hook(QueryDesc *queryDesc, PlanState *state = (PlanState *) queryDesc->planstate; + /* FIXME: we should modify ALL ModifyTable nodes! They might be hidden deeper. */ if (IsA(state, ModifyTableState)) { ModifyTableState *mt_state = (ModifyTableState *) state; From dde5eb2f9332011f4956637269391839599699c7 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 30 Aug 2018 14:54:48 +0300 Subject: [PATCH 318/528] pass correct plan to BeginForeignModify (based on patch #171 by @arssher) --- src/partition_filter.c | 38 +++++++++++++++++++++++++------------- 1 file changed, 25 insertions(+), 13 deletions(-) diff --git a/src/partition_filter.c b/src/partition_filter.c index 57f153c2..80d6ecff 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -976,13 +976,18 @@ prepare_rri_fdw_for_insert(ResultRelInfoHolder *rri_holder, if (fdw_routine->PlanForeignModify) { RangeTblEntry *rte; - ModifyTableState mtstate; - List *fdw_private; Query query; + PlanState pstate, + *pstate_ptr; + ModifyTableState mtstate; PlannedStmt *plan; + + /* This is the value we'd like to get */ + List *fdw_private; + TupleDesc tupdesc; - int i, - target_attr; + int target_attr, + i; /* Fetch RangeTblEntry for partition */ rte = rt_fetch(rri->ri_RangeTableIndex, estate->es_range_table); @@ -1033,26 +1038,33 @@ prepare_rri_fdw_for_insert(ResultRelInfoHolder *rri_holder, target_attr++; } - /* Create fake ModifyTableState */ - memset((void *) &mtstate, 0, sizeof(ModifyTableState)); + /* HACK: plan a fake query for FDW access to be planned as well */ + elog(DEBUG1, "FDW(%u): plan fake query for fdw_private", partid); + plan = standard_planner(&query, 0, NULL); + + /* HACK: create a fake PlanState */ + memset(&pstate, 0, sizeof(PlanState)); + pstate.plan = plan->planTree; + pstate_ptr = &pstate; + + /* HACK: create a fake ModifyTableState */ + memset(&mtstate, 0, sizeof(ModifyTableState)); NodeSetTag(&mtstate, T_ModifyTableState); mtstate.ps.state = estate; mtstate.operation = CMD_INSERT; + mtstate.mt_plans = &pstate_ptr; + mtstate.mt_nplans = 1; + mtstate.mt_whichplan = 0; mtstate.resultRelInfo = rri; #if PG_VERSION_NUM < 110000 mtstate.mt_onconflict = ONCONFLICT_NONE; #endif - /* Plan fake query in for FDW access to be planned as well */ - elog(DEBUG1, "FDW(%u): plan fake query for fdw_private", partid); - plan = standard_planner(&query, 0, NULL); - /* Extract fdw_private from useless plan */ elog(DEBUG1, "FDW(%u): extract fdw_private", partid); - fdw_private = (List *) - linitial(((ModifyTable *) plan->planTree)->fdwPrivLists); + fdw_private = linitial(((ModifyTable *) plan->planTree)->fdwPrivLists); - /* call BeginForeignModify on 'rri' */ + /* HACK: call BeginForeignModify on 'rri' */ elog(DEBUG1, "FDW(%u): call BeginForeignModify on a fake INSERT node", partid); fdw_routine->BeginForeignModify(&mtstate, rri, fdw_private, 0, 0); From 0feb47b94292437e32314db4e2f8b51fa2536331 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 3 Sep 2018 14:47:41 +0300 Subject: [PATCH 319/528] implement state_tree_visitor() --- src/hooks.c | 33 +----- src/include/partition_router.h | 2 + src/include/planner_tree_modification.h | 11 +- src/partition_router.c | 32 ++++++ src/planner_tree_modification.c | 143 ++++++++++++++++++++---- 5 files changed, 170 insertions(+), 51 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index ac0e595e..1ebb726b 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -976,35 +976,10 @@ pathman_executor_hook(QueryDesc *queryDesc, #define EXECUTOR_RUN(q,d,c) standard_ExecutorRun((q),(d),(c)) #endif - PlanState *state = (PlanState *) queryDesc->planstate; - - /* FIXME: we should modify ALL ModifyTable nodes! They might be hidden deeper. */ - if (IsA(state, ModifyTableState)) - { - ModifyTableState *mt_state = (ModifyTableState *) state; - int i; - - for (i = 0; i < mt_state->mt_nplans; i++) - { - CustomScanState *pr_state = (CustomScanState *) mt_state->mt_plans[i]; - - /* Check if this is a PartitionFilter + PartitionRouter combo */ - if (IsPartitionFilterState(pr_state) && - IsPartitionRouterState(linitial(pr_state->custom_ps))) - { - ResultRelInfo *rri = &mt_state->resultRelInfo[i]; - - /* - * HACK: We unset junkfilter to disable - * junk cleaning in ExecModifyTable. - */ - rri->ri_junkFilter = NULL; - - /* HACK: change UPDATE operation to INSERT */ - mt_state->operation = CMD_INSERT; - } - } - } + /* Prepare ModifyTable nodes for PartitionRouter hackery */ + state_tree_visitor((PlanState *) queryDesc->planstate, + prepare_modify_table_for_partition_router, + NULL); /* Call hooks set by other extensions if needed */ if (EXECUTOR_HOOK) diff --git a/src/include/partition_router.h b/src/include/partition_router.h index a0ebf3dd..7c36641a 100644 --- a/src/include/partition_router.h +++ b/src/include/partition_router.h @@ -72,6 +72,8 @@ Plan *make_partition_router(Plan *subplan, int epq_param, List *returning_list); +void prepare_modify_table_for_partition_router(PlanState *state, void *context); + Node *partition_router_create_scan_state(CustomScan *node); diff --git a/src/include/planner_tree_modification.h b/src/include/planner_tree_modification.h index 71fcf25d..b93224ba 100644 --- a/src/include/planner_tree_modification.h +++ b/src/include/planner_tree_modification.h @@ -25,9 +25,14 @@ void assign_query_id(Query *query); void reset_query_id_generator(void); /* Plan tree rewriting utility */ -void plan_tree_walker(Plan *plan, - void (*visitor) (Plan *plan, void *context), - void *context); +void plan_tree_visitor(Plan *plan, + void (*visitor) (Plan *plan, void *context), + void *context); + +/* PlanState tree rewriting utility */ +void state_tree_visitor(PlanState *state, + void (*visitor) (PlanState *state, void *context), + void *context); /* Query tree rewriting utility */ void pathman_transform_query(Query *parse, ParamListInfo params); diff --git a/src/partition_router.c b/src/partition_router.c index 9c0a041e..27ce88d8 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -95,6 +95,38 @@ make_partition_router(Plan *subplan, return &cscan->scan.plan; } +void +prepare_modify_table_for_partition_router(PlanState *state, void *context) +{ + if (IsA(state, ModifyTableState)) + { + ModifyTableState *mt_state = (ModifyTableState *) state; + int i; + + for (i = 0; i < mt_state->mt_nplans; i++) + { + CustomScanState *pr_state = (CustomScanState *) mt_state->mt_plans[i]; + + /* Check if this is a PartitionFilter + PartitionRouter combo */ + if (IsPartitionFilterState(pr_state) && + IsPartitionRouterState(linitial(pr_state->custom_ps))) + { + ResultRelInfo *rri = &mt_state->resultRelInfo[i]; + + /* + * HACK: We unset junkfilter to disable + * junk cleaning in ExecModifyTable. + */ + rri->ri_junkFilter = NULL; + + /* HACK: change UPDATE operation to INSERT */ + mt_state->operation = CMD_INSERT; + } + } + } +} + + Node * partition_router_create_scan_state(CustomScan *node) { diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 95706a7e..58c92bd3 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -113,6 +113,9 @@ static void handle_modification_query(Query *parse, transform_query_cxt *context static void partition_filter_visitor(Plan *plan, void *context); static void partition_router_visitor(Plan *plan, void *context); +static void state_visit_subplans(List *plans, void (*visitor) (), void *context); +static void state_visit_members(PlanState **planstates, int nplans, void (*visitor) (), void *context); + static Oid find_deepest_partition(Oid relid, Index rti, Expr *quals); static Node *eval_extern_params_mutator(Node *node, ParamListInfo params); static Node *adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context); @@ -152,9 +155,9 @@ reset_query_id_generator(void) * 'visitor' is applied right before return. */ void -plan_tree_walker(Plan *plan, - void (*visitor) (Plan *plan, void *context), - void *context) +plan_tree_visitor(Plan *plan, + void (*visitor) (Plan *plan, void *context), + void *context) { ListCell *l; @@ -167,50 +170,152 @@ plan_tree_walker(Plan *plan, switch (nodeTag(plan)) { case T_SubqueryScan: - plan_tree_walker(((SubqueryScan *) plan)->subplan, visitor, context); + plan_tree_visitor(((SubqueryScan *) plan)->subplan, visitor, context); break; case T_CustomScan: - foreach(l, ((CustomScan *) plan)->custom_plans) - plan_tree_walker((Plan *) lfirst(l), visitor, context); + foreach (l, ((CustomScan *) plan)->custom_plans) + plan_tree_visitor((Plan *) lfirst(l), visitor, context); break; case T_ModifyTable: foreach (l, ((ModifyTable *) plan)->plans) - plan_tree_walker((Plan *) lfirst(l), visitor, context); + plan_tree_visitor((Plan *) lfirst(l), visitor, context); break; case T_Append: - foreach(l, ((Append *) plan)->appendplans) - plan_tree_walker((Plan *) lfirst(l), visitor, context); + foreach (l, ((Append *) plan)->appendplans) + plan_tree_visitor((Plan *) lfirst(l), visitor, context); break; case T_MergeAppend: - foreach(l, ((MergeAppend *) plan)->mergeplans) - plan_tree_walker((Plan *) lfirst(l), visitor, context); + foreach (l, ((MergeAppend *) plan)->mergeplans) + plan_tree_visitor((Plan *) lfirst(l), visitor, context); break; case T_BitmapAnd: - foreach(l, ((BitmapAnd *) plan)->bitmapplans) - plan_tree_walker((Plan *) lfirst(l), visitor, context); + foreach (l, ((BitmapAnd *) plan)->bitmapplans) + plan_tree_visitor((Plan *) lfirst(l), visitor, context); break; case T_BitmapOr: - foreach(l, ((BitmapOr *) plan)->bitmapplans) - plan_tree_walker((Plan *) lfirst(l), visitor, context); + foreach (l, ((BitmapOr *) plan)->bitmapplans) + plan_tree_visitor((Plan *) lfirst(l), visitor, context); break; default: break; } - plan_tree_walker(plan->lefttree, visitor, context); - plan_tree_walker(plan->righttree, visitor, context); + plan_tree_visitor(plan->lefttree, visitor, context); + plan_tree_visitor(plan->righttree, visitor, context); /* Apply visitor to the current node */ visitor(plan, context); } +void +state_tree_visitor(PlanState *state, + void (*visitor) (PlanState *plan, void *context), + void *context) +{ + Plan *plan; + ListCell *lc; + + if (state == NULL) + return; + + plan = state->plan; + + check_stack_depth(); + + /* Plan-type-specific fixes */ + switch (nodeTag(plan)) + { + case T_SubqueryScan: + state_tree_visitor(((SubqueryScanState *) state)->subplan, visitor, context); + break; + + case T_CustomScan: + foreach (lc, ((CustomScanState *) state)->custom_ps) + state_tree_visitor((PlanState *) lfirst(lc),visitor, context); + break; + + case T_ModifyTable: + state_visit_members(((ModifyTableState *) state)->mt_plans, + ((ModifyTableState *) state)->mt_nplans, + visitor, context); + break; + + case T_Append: + state_visit_members(((AppendState *) state)->appendplans, + ((AppendState *) state)->as_nplans, + visitor, context); + break; + + case T_MergeAppend: + state_visit_members(((MergeAppendState *) state)->mergeplans, + ((MergeAppendState *) state)->ms_nplans, + visitor, context); + break; + + case T_BitmapAnd: + state_visit_members(((BitmapAndState *) state)->bitmapplans, + ((BitmapAndState *) state)->nplans, + visitor, context); + break; + + case T_BitmapOr: + state_visit_members(((BitmapOrState *) state)->bitmapplans, + ((BitmapOrState *) state)->nplans, + visitor, context); + break; + + default: + break; + } + + state_visit_subplans(state->initPlan, visitor, context); + state_visit_subplans(state->subPlan, visitor, context); + + state_tree_visitor(state->lefttree, visitor, context); + state_tree_visitor(state->righttree, visitor, context); + + /* Apply visitor to the current node */ + visitor(state, context); +} + +/* + * Walk a list of SubPlans (or initPlans, which also use SubPlan nodes). + */ +static void +state_visit_subplans(List *plans, + void (*visitor) (), + void *context) +{ + ListCell *lc; + + foreach (lc, plans) + { + SubPlanState *sps = lfirst_node(SubPlanState, lc); + visitor(sps->planstate, context); + } +} + +/* + * Walk the constituent plans of a ModifyTable, Append, MergeAppend, + * BitmapAnd, or BitmapOr node. + */ +static void +state_visit_members(PlanState **planstates, int nplans, + void (*visitor) (), void *context) +{ + int i; + + for (i = 0; i < nplans; i++) + visitor(planstates[i], context); +} + /* * ------------------------------- @@ -586,7 +691,7 @@ void add_partition_filters(List *rtable, Plan *plan) { if (pg_pathman_enable_partition_filter) - plan_tree_walker(plan, partition_filter_visitor, rtable); + plan_tree_visitor(plan, partition_filter_visitor, rtable); } /* Add PartitionRouter nodes to the plan tree */ @@ -594,7 +699,7 @@ void add_partition_routers(List *rtable, Plan *plan) { if (pg_pathman_enable_partition_router) - plan_tree_walker(plan, partition_router_visitor, rtable); + plan_tree_visitor(plan, partition_router_visitor, rtable); } /* From 47633ba69ddc44be9266c0638f59bcf289de9c4a Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 4 Sep 2018 15:52:08 +0300 Subject: [PATCH 320/528] get rid of ugly ExecEvalExprCompat() macro --- src/compat/pg_compat.c | 11 ----------- src/include/compat/pg_compat.h | 24 +++++++++--------------- src/partition_filter.c | 3 +-- src/pg_pathman.c | 3 +-- 4 files changed, 11 insertions(+), 30 deletions(-) diff --git a/src/compat/pg_compat.c b/src/compat/pg_compat.c index 5547231e..4bc021fd 100644 --- a/src/compat/pg_compat.c +++ b/src/compat/pg_compat.c @@ -118,17 +118,6 @@ create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel) #endif -/* - * ExecEvalExpr - * - * global variables for macro wrapper evaluation - */ -#if PG_VERSION_NUM >= 90500 && PG_VERSION_NUM < 100000 -Datum exprResult; -ExprDoneCond isDone; -#endif - - /* * get_all_actual_clauses */ diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index f2d5ba63..5f1d59d4 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -367,26 +367,20 @@ extern void create_plain_partial_paths(PlannerInfo *root, * NOTE: 'errmsg' specifies error string when ExecEvalExpr returns multiple values. */ #if PG_VERSION_NUM >= 100000 -#define ExecEvalExprCompat(expr, econtext, isNull, errHandler) \ +#define ExecEvalExprCompat(expr, econtext, isNull) \ ExecEvalExpr((expr), (econtext), (isNull)) #elif PG_VERSION_NUM >= 90500 +static inline Datum +ExecEvalExprCompat(ExprState *expr, ExprContext *econtext, bool *isnull) +{ + ExprDoneCond isdone; + Datum result = ExecEvalExpr(expr, econtext, isnull, &isdone); -/* Variables for ExecEvalExprCompat() */ -extern Datum exprResult; -extern ExprDoneCond isDone; + if (isdone != ExprSingleResult) + elog(ERROR, "expression should return single value"); -/* Error handlers */ -static inline void mult_result_handler() -{ - elog(ERROR, "partitioning expression should return single value"); + return result; } - -#define ExecEvalExprCompat(expr, econtext, isNull, errHandler) \ -( \ - exprResult = ExecEvalExpr((expr), (econtext), (isNull), &isDone), \ - (isDone != ExprSingleResult) ? (errHandler)() : (0), \ - exprResult \ -) #endif diff --git a/src/partition_filter.c b/src/partition_filter.c index 80d6ecff..f51f7896 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -519,8 +519,7 @@ select_partition_for_insert(ResultPartsStorage *parts_storage, expr_context->ecxt_scantuple = slot; /* Execute expression */ - value = ExecEvalExprCompat(expr_state, expr_context, - &isnull, mult_result_handler); + value = ExecEvalExprCompat(expr_state, expr_context, &isnull); if (isnull) elog(ERROR, ERR_PART_ATTR_NULL); diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 588f5417..69497f92 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -202,8 +202,7 @@ ExtractConst(Node *node, const WalkerContext *context) /* Evaluate expression */ estate = ExecInitExpr((Expr *) node, NULL); - value = ExecEvalExprCompat(estate, econtext, &isnull, - mult_result_handler); + value = ExecEvalExprCompat(estate, econtext, &isnull); #if PG_VERSION_NUM >= 100000 /* Free temp econtext if needed */ From d946697879864cbd940552e131560a95d2b8a046 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 4 Sep 2018 16:10:59 +0300 Subject: [PATCH 321/528] WIP: now PartitionRouter is able to use both UPDATE & DELETE + INSERT --- src/include/compat/pg_compat.h | 40 +++- src/include/partition_router.h | 8 +- src/include/relation_info.h | 1 + src/partition_router.c | 343 +++++++++++++++++++------------- src/planner_tree_modification.c | 2 +- src/relation_info.c | 4 +- 6 files changed, 250 insertions(+), 148 deletions(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 5f1d59d4..978279d2 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -364,7 +364,6 @@ extern void create_plain_partial_paths(PlannerInfo *root, /* * ExecEvalExpr() - * NOTE: 'errmsg' specifies error string when ExecEvalExpr returns multiple values. */ #if PG_VERSION_NUM >= 100000 #define ExecEvalExprCompat(expr, econtext, isNull) \ @@ -384,6 +383,33 @@ ExecEvalExprCompat(ExprState *expr, ExprContext *econtext, bool *isnull) #endif +/* + * ExecCheck() + */ +#if PG_VERSION_NUM < 100000 +static inline bool +ExecCheck(ExprState *state, ExprContext *econtext) +{ + Datum ret; + bool isnull; + MemoryContext old_mcxt; + + /* short-circuit (here and in ExecInitCheck) for empty restriction list */ + if (state == NULL) + return true; + + old_mcxt = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory); + ret = ExecEvalExprCompat(state, econtext, &isnull); + MemoryContextSwitchTo(old_mcxt); + + if (isnull) + return true; + + return DatumGetBool(ret); +} +#endif + + /* * extract_actual_join_clauses() */ @@ -790,11 +816,15 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, * heap_delete() */ #if PG_VERSION_NUM >= 110000 -#define heap_delete_compat(relation, tid, cid, crosscheck, wait, hufd) \ - heap_delete((relation), (tid), (cid), (crosscheck), (wait), (hufd), false) +#define heap_delete_compat(relation, tid, cid, crosscheck, \ + wait, hufd, changing_part) \ + heap_delete((relation), (tid), (cid), (crosscheck), \ + (wait), (hufd), (changing_part)) #else -#define heap_delete_compat(relation, tid, cid, crosscheck, wait, hufd) \ - heap_delete((relation), (tid), (cid), (crosscheck), (wait), (hufd)) +#define heap_delete_compat(relation, tid, cid, crosscheck, \ + wait, hufd, changing_part) \ + heap_delete((relation), (tid), (cid), (crosscheck), \ + (wait), (hufd)) #endif /* diff --git a/src/include/partition_router.h b/src/include/partition_router.h index 7c36641a..f1526335 100644 --- a/src/include/partition_router.h +++ b/src/include/partition_router.h @@ -30,13 +30,15 @@ typedef struct PartitionRouterState { CustomScanState css; - Oid partitioned_table; - Plan *subplan; /* proxy variable to store subplan */ - JunkFilter *junkfilter; /* 'ctid' extraction facility */ + Plan *subplan; /* proxy variable to store subplan */ + JunkFilter *junkfilter; /* 'ctid' extraction facility */ + ExprState *constraint; /* should tuple remain in partition? */ EPQState epqstate; int epqparam; + ModifyTableState *mt_state; /* need this for a GREAT deal of hackery */ + ResultRelInfo *current_rri; } PartitionRouterState; diff --git a/src/include/relation_info.h b/src/include/relation_info.h index d2a3d053..3a5f0fa8 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -367,6 +367,7 @@ void shout_if_prel_is_invalid(const Oid parent_oid, const PartType expected_part_type); /* Bounds cache */ +Expr *get_partition_constraint_expr(Oid partition); void forget_bounds_of_partition(Oid partition); PartBoundInfo *get_bounds_of_partition(Oid partition, const PartRelationInfo *prel); void invalidate_bounds_cache(void); diff --git a/src/partition_router.c b/src/partition_router.c index 27ce88d8..fc8b50ba 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -20,6 +20,7 @@ #include "commands/trigger.h" #include "executor/nodeModifyTable.h" #include "foreign/fdwapi.h" +#include "storage/bufmgr.h" #include "utils/guc.h" #include "utils/rel.h" @@ -28,10 +29,18 @@ bool pg_pathman_enable_partition_router = true; CustomScanMethods partition_router_plan_methods; CustomExecMethods partition_router_exec_methods; -static TupleTableSlot *router_delete_tuple(TupleTableSlot *slot, - ItemPointer tupleid, - EPQState *epqstate, - EState *estate); + +static void router_lazy_init_junkfilter(PartitionRouterState *state, EState *estate); +static void router_lazy_init_constraint(PartitionRouterState *state); + +static ItemPointerData router_extract_ctid(PartitionRouterState *state, + TupleTableSlot *slot); + +static TupleTableSlot *router_lock_or_delete_tuple(PartitionRouterState *state, + TupleTableSlot *slot, + ItemPointer tupleid, + bool *deleted, + EState *estate); void init_partition_router_static_data(void) @@ -105,22 +114,15 @@ prepare_modify_table_for_partition_router(PlanState *state, void *context) for (i = 0; i < mt_state->mt_nplans; i++) { - CustomScanState *pr_state = (CustomScanState *) mt_state->mt_plans[i]; + CustomScanState *pf_state = (CustomScanState *) mt_state->mt_plans[i]; + PartitionRouterState *pr_state; /* Check if this is a PartitionFilter + PartitionRouter combo */ - if (IsPartitionFilterState(pr_state) && - IsPartitionRouterState(linitial(pr_state->custom_ps))) + if (IsPartitionFilterState(pf_state) && + IsPartitionRouterState(pr_state = linitial(pf_state->custom_ps))) { - ResultRelInfo *rri = &mt_state->resultRelInfo[i]; - - /* - * HACK: We unset junkfilter to disable - * junk cleaning in ExecModifyTable. - */ - rri->ri_junkFilter = NULL; - - /* HACK: change UPDATE operation to INSERT */ - mt_state->operation = CMD_INSERT; + /* HACK: PartitionRouter might change ModifyTable's state */ + pr_state->mt_state = mt_state; } } } @@ -166,8 +168,8 @@ partition_router_exec(CustomScanState *node) { EState *estate = node->ss.ps.state; PlanState *child_ps = (PlanState *) linitial(node->custom_ps); - TupleTableSlot *slot; PartitionRouterState *state = (PartitionRouterState *) node; + TupleTableSlot *slot; take_next_tuple: /* execute PartitionFilter child node */ @@ -175,63 +177,36 @@ partition_router_exec(CustomScanState *node) if (!TupIsNull(slot)) { - ResultRelInfo *current_rri = state->current_rri; - char relkind; + bool deleted; ItemPointerData ctid; ItemPointerSetInvalid(&ctid); - /* Build new junkfilter if we have to */ - if (state->junkfilter == NULL) - { - state->junkfilter = - ExecInitJunkFilter(state->subplan->targetlist, - current_rri->ri_RelationDesc->rd_att->tdhasoid, - ExecInitExtraTupleSlotCompat(estate)); - - state->junkfilter->jf_junkAttNo = - ExecFindJunkAttribute(state->junkfilter, "ctid"); - - if (!AttributeNumberIsValid(state->junkfilter->jf_junkAttNo)) - elog(ERROR, "could not find junk ctid column"); - } - - /* Additional checks based on 'relkind' */ - relkind = current_rri->ri_RelationDesc->rd_rel->relkind; - if (relkind == RELKIND_RELATION) - { - Datum ctid_datum; - bool ctid_isnull; + /* Build new junkfilter lazily */ + router_lazy_init_junkfilter(state, estate); - ctid_datum = ExecGetJunkAttribute(slot, - state->junkfilter->jf_junkAttNo, - &ctid_isnull); + /* Build recheck constraint state lazily */ + router_lazy_init_constraint(state); - /* shouldn't ever get a null result... */ - if (ctid_isnull) - elog(ERROR, "ctid is NULL"); - - /* Get item pointer to tuple */ - ctid = *(ItemPointer) DatumGetPointer(ctid_datum); - } - else if (relkind == RELKIND_FOREIGN_TABLE) - elog(ERROR, UPDATE_NODE_NAME " does not support foreign tables"); - else - elog(ERROR, UPDATE_NODE_NAME " cannot handle relkind %u", relkind); + /* Extract item pointer from current tuple */ + ctid = router_extract_ctid(state, slot); /* Magic: replace parent's ResultRelInfo with ours */ - estate->es_result_relation_info = current_rri; + estate->es_result_relation_info = state->current_rri; /* Delete tuple from old partition */ Assert(ItemPointerIsValid(&ctid)); - slot = router_delete_tuple(slot, &ctid, &state->epqstate, estate); + slot = router_lock_or_delete_tuple(state, slot, &ctid, + &deleted, estate); /* We require a tuple */ if (TupIsNull(slot)) goto take_next_tuple; - /* Tuple will be inserted by ModifyTable */ - return ExecFilterJunk(state->junkfilter, slot); + /* HACK: change command type in ModifyTable */ + state->mt_state->operation = deleted ? CMD_INSERT : CMD_UPDATE; + + return slot; } return NULL; @@ -261,109 +236,205 @@ partition_router_explain(CustomScanState *node, List *ancestors, ExplainState *e } -/* - * ---------------------------------------------------------------- - * ExecDeleteInternal - * This is a modified copy of ExecDelete from executor/nodeModifyTable.c - * ---------------------------------------------------------------- - */ +static void +router_lazy_init_junkfilter(PartitionRouterState *state, EState *estate) +{ + Relation rel = state->current_rri->ri_RelationDesc; + + if (state->junkfilter == NULL) + { + state->junkfilter = + ExecInitJunkFilter(state->subplan->targetlist, + RelationGetDescr(rel)->tdhasoid, + ExecInitExtraTupleSlotCompat(estate)); + + state->junkfilter->jf_junkAttNo = + ExecFindJunkAttribute(state->junkfilter, "ctid"); + + if (!AttributeNumberIsValid(state->junkfilter->jf_junkAttNo)) + elog(ERROR, "could not find junk ctid column"); + } +} + +static void +router_lazy_init_constraint(PartitionRouterState *state) +{ + Relation rel = state->current_rri->ri_RelationDesc; + + if (state->constraint == NULL) + { + Expr *expr = get_partition_constraint_expr(RelationGetRelid(rel)); + state->constraint = ExecInitExpr(expr, NULL); + } +} + +/* Extract ItemPointer from tuple using JunkFilter */ +static ItemPointerData +router_extract_ctid(PartitionRouterState *state, TupleTableSlot *slot) +{ + Relation rel = state->current_rri->ri_RelationDesc; + char relkind = RelationGetForm(rel)->relkind; + + if (relkind == RELKIND_RELATION) + { + Datum ctid_datum; + bool ctid_isnull; + + ctid_datum = ExecGetJunkAttribute(slot, + state->junkfilter->jf_junkAttNo, + &ctid_isnull); + + /* shouldn't ever get a null result... */ + if (ctid_isnull) + elog(ERROR, "ctid is NULL"); + + /* Get item pointer to tuple */ + return *(ItemPointer) DatumGetPointer(ctid_datum); + } + else if (relkind == RELKIND_FOREIGN_TABLE) + elog(ERROR, UPDATE_NODE_NAME " does not support foreign tables"); + else + elog(ERROR, UPDATE_NODE_NAME " cannot handle relkind %u", relkind); +} +/* This is a heavily modified copy of ExecDelete from nodeModifyTable.c */ static TupleTableSlot * -router_delete_tuple(TupleTableSlot *slot, - ItemPointer tupleid, - EPQState *epqstate, - EState *estate) +router_lock_or_delete_tuple(PartitionRouterState *state, + TupleTableSlot *slot, + ItemPointer tupleid, + bool *deleted, /* return value #1 */ + EState *estate) { - ResultRelInfo *rri; - Relation rel; - HTSU_Result result; - HeapUpdateFailureData hufd; + ResultRelInfo *rri; + Relation rel; + + ExprContext *econtext = GetPerTupleExprContext(estate); + ExprState *constraint = state->constraint; + + HeapUpdateFailureData hufd; + HTSU_Result result; + EPQState *epqstate = &state->epqstate; + + LOCKMODE lockmode; + bool try_delete; + + *deleted = false; EvalPlanQualSetSlot(epqstate, slot); /* Get information on the (current) result relation */ rri = estate->es_result_relation_info; rel = rri->ri_RelationDesc; + lockmode = ExecUpdateLockMode(estate, rri); - /* BEFORE ROW UPDATE triggers */ - if (rri->ri_TrigDesc && - rri->ri_TrigDesc->trig_update_before_row) - { - slot = ExecBRUpdateTriggers(estate, epqstate, rri, tupleid, NULL, slot); - if (TupIsNull(slot)) - return NULL; - } +recheck: + /* Does tuple still belong to current partition? */ + econtext->ecxt_scantuple = slot; + try_delete = !ExecCheck(constraint, econtext); - /* BEFORE ROW DELETE triggers */ - if (rri->ri_TrigDesc && - rri->ri_TrigDesc->trig_delete_before_row) + /* Lock or delete tuple */ + if (try_delete) { - if (!ExecBRDeleteTriggersCompat(estate, epqstate, rri, tupleid, NULL, NULL)) - return NULL; - } + /* BEFORE ROW UPDATE triggers */ + if (rri->ri_TrigDesc && + rri->ri_TrigDesc->trig_update_before_row) + { + slot = ExecBRUpdateTriggers(estate, epqstate, rri, tupleid, NULL, slot); + if (TupIsNull(slot)) + return NULL; + } + + /* BEFORE ROW DELETE triggers */ + if (rri->ri_TrigDesc && + rri->ri_TrigDesc->trig_delete_before_row) + { + if (!ExecBRDeleteTriggersCompat(estate, epqstate, rri, tupleid, NULL, NULL)) + return NULL; + } - if (tupleid != NULL) - { -ldelete: /* Delete the tuple */ result = heap_delete_compat(rel, tupleid, estate->es_output_cid, estate->es_crosscheck_snapshot, - true /* wait for commit */ , - &hufd); - switch (result) - { - case HeapTupleSelfUpdated: - if (hufd.cmax != estate->es_output_cid) - ereport(ERROR, - (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION), - errmsg("tuple to be updated was already modified by an operation triggered by the current command"), - errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows."))); - - /* Already deleted by self; nothing to do */ - return NULL; + true /* wait for commit */, &hufd, + true /* changing partition */); + } + else + { + HeapTupleData tuple; + Buffer buffer; + + tuple.t_self = *tupleid; + result = heap_lock_tuple(rel, &tuple, + estate->es_output_cid, + lockmode, LockWaitBlock, + false, &buffer, &hufd); + + ReleaseBuffer(buffer); + } - case HeapTupleMayBeUpdated: - break; + /* Check lock/delete status */ + switch (result) + { + case HeapTupleSelfUpdated: + if (hufd.cmax != estate->es_output_cid) + ereport(ERROR, + (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION), + errmsg("tuple to be updated was already modified by an operation triggered by the current command"), + errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows."))); + + /* Already deleted by self; nothing to do */ + return NULL; - case HeapTupleUpdated: - if (IsolationUsesXactSnapshot()) - ereport(ERROR, - (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), - errmsg("could not serialize access due to concurrent update"))); + case HeapTupleMayBeUpdated: + break; - if (!ItemPointerEquals(tupleid, &hufd.ctid)) + case HeapTupleUpdated: + if (IsolationUsesXactSnapshot()) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("could not serialize access due to concurrent update"))); + + if (!ItemPointerEquals(tupleid, &hufd.ctid)) + { + TupleTableSlot *epqslot; + + epqslot = EvalPlanQual(estate, + epqstate, + rel, + rri->ri_RangeTableIndex, + LockTupleExclusive, + &hufd.ctid, + hufd.xmax); + + if (!TupIsNull(epqslot)) { - TupleTableSlot *epqslot; - - epqslot = EvalPlanQual(estate, - epqstate, - rel, - rri->ri_RangeTableIndex, - LockTupleExclusive, - &hufd.ctid, - hufd.xmax); - - if (!TupIsNull(epqslot)) - { - Assert(tupleid != NULL); - *tupleid = hufd.ctid; - slot = epqslot; - goto ldelete; - } + Assert(tupleid != NULL); + *tupleid = hufd.ctid; + slot = epqslot; + goto recheck; } + } - /* Tuple already deleted; nothing to do */ - return NULL; + /* Tuple already deleted; nothing to do */ + return NULL; - default: - elog(ERROR, "unrecognized heap_delete status: %u", result); - } + case HeapTupleInvisible: + elog(ERROR, "attempted to lock invisible tuple"); + break; + + default: + elog(ERROR, "unrecognized heap_delete status: %u", result); + break; } - else elog(ERROR, "tupleid should be specified for deletion"); - /* AFTER ROW DELETE triggers */ - ExecARDeleteTriggersCompat(estate, rri, tupleid, NULL, NULL); + /* Additional work for delete s*/ + if (try_delete) + { + /* AFTER ROW DELETE triggers */ + ExecARDeleteTriggersCompat(estate, rri, tupleid, NULL, NULL); + } + *deleted = try_delete; return slot; } diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 58c92bd3..b4baabfa 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -238,7 +238,7 @@ state_tree_visitor(PlanState *state, case T_CustomScan: foreach (lc, ((CustomScanState *) state)->custom_ps) - state_tree_visitor((PlanState *) lfirst(lc),visitor, context); + state_tree_visitor((PlanState *) lfirst(lc), visitor, context); break; case T_ModifyTable: diff --git a/src/relation_info.c b/src/relation_info.c index a18ceeec..25b86d31 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -150,8 +150,6 @@ static void resonwner_prel_callback(ResourceReleasePhase phase, bool isTopLevel, void *arg); -static Expr *get_partition_constraint_expr(Oid partition); - static void fill_prel_with_partitions(PartRelationInfo *prel, const Oid *partitions, const uint32 parts_count); @@ -1047,7 +1045,7 @@ invalidate_bounds_cache(void) * * build_check_constraint_name_internal() is used to build conname. */ -static Expr * +Expr * get_partition_constraint_expr(Oid partition) { Oid conid; /* constraint Oid */ From 65033edd831e028e56eb0e5402871c259b997d4e Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Tue, 4 Sep 2018 17:01:02 +0300 Subject: [PATCH 322/528] Support create_append_path in PGPROEE11 --- src/include/compat/pg_compat.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index f2d5ba63..f33d41cc 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -239,7 +239,9 @@ create_append_path(NULL, (rel), (subpaths), NIL, (required_outer), \ (parallel_workers), false, NIL, -1) #else -/* TODO */ +#define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ + create_append_path(NULL, (rel), (subpaths), NIL, (required_outer), \ + (parallel_workers), false, NIL, -1, false, NIL) #endif /* PGPRO_VERSION */ #elif PG_VERSION_NUM >= 100000 From ffa6335b507069f15e8aec6416abcd82c3c3edf0 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 4 Sep 2018 22:42:06 +0300 Subject: [PATCH 323/528] WIP router_run_modify_table() hackery works in simple cases --- src/include/partition_router.h | 1 + src/partition_filter.c | 1 + src/partition_router.c | 142 ++++++++++++++++++++++++++++++--- 3 files changed, 131 insertions(+), 13 deletions(-) diff --git a/src/include/partition_router.h b/src/include/partition_router.h index f1526335..ac1b3ea4 100644 --- a/src/include/partition_router.h +++ b/src/include/partition_router.h @@ -38,6 +38,7 @@ typedef struct PartitionRouterState int epqparam; ModifyTableState *mt_state; /* need this for a GREAT deal of hackery */ + TupleTableSlot *saved_slot; ResultRelInfo *current_rri; } PartitionRouterState; diff --git a/src/partition_filter.c b/src/partition_filter.c index f51f7896..9850dde1 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -771,6 +771,7 @@ partition_filter_exec(CustomScanState *node) if (!state->tup_convert_slot) state->tup_convert_slot = MakeTupleTableSlotCompat(); + /* TODO: why should we *always* set a new slot descriptor? */ ExecSetSlotDescriptor(state->tup_convert_slot, RelationGetDescr(child_rel)); slot = ExecStoreTuple(htup_new, state->tup_convert_slot, InvalidBuffer, true); } diff --git a/src/partition_router.c b/src/partition_router.c index fc8b50ba..3106e487 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -24,12 +24,28 @@ #include "utils/guc.h" #include "utils/rel.h" + +#define MTHackField(mt_state, field) ( (mt_state)->field ) + + bool pg_pathman_enable_partition_router = true; CustomScanMethods partition_router_plan_methods; CustomExecMethods partition_router_exec_methods; +/* FIXME: replace this magic with a CustomScan */ +static ExecProcNodeMtd mt_method = NULL; + + +static TupleTableSlot *router_run_modify_table(PlanState *state); + +static TupleTableSlot *router_set_slot(PartitionRouterState *state, + TupleTableSlot *slot, + CmdType operation); +static TupleTableSlot *router_get_slot(PartitionRouterState *state, + bool *should_process); + static void router_lazy_init_junkfilter(PartitionRouterState *state, EState *estate); static void router_lazy_init_constraint(PartitionRouterState *state); @@ -110,6 +126,7 @@ prepare_modify_table_for_partition_router(PlanState *state, void *context) if (IsA(state, ModifyTableState)) { ModifyTableState *mt_state = (ModifyTableState *) state; + bool changed_method = false; int i; for (i = 0; i < mt_state->mt_nplans; i++) @@ -121,8 +138,19 @@ prepare_modify_table_for_partition_router(PlanState *state, void *context) if (IsPartitionFilterState(pf_state) && IsPartitionRouterState(pr_state = linitial(pf_state->custom_ps))) { - /* HACK: PartitionRouter might change ModifyTable's state */ + /* HACK: point to ModifyTable in PartitionRouter */ pr_state->mt_state = mt_state; + + if (!changed_method) + { + if (!mt_method) + mt_method = state->ExecProcNodeReal; + + /* HACK: replace ModifyTable's execution method */ + ExecSetExecProcNode(state, router_run_modify_table); + + changed_method = true; + } } } } @@ -166,17 +194,18 @@ partition_router_begin(CustomScanState *node, EState *estate, int eflags) TupleTableSlot * partition_router_exec(CustomScanState *node) { - EState *estate = node->ss.ps.state; - PlanState *child_ps = (PlanState *) linitial(node->custom_ps); - PartitionRouterState *state = (PartitionRouterState *) node; - TupleTableSlot *slot; + EState *estate = node->ss.ps.state; + PartitionRouterState *state = (PartitionRouterState *) node; + TupleTableSlot *slot; + bool should_process; take_next_tuple: - /* execute PartitionFilter child node */ - slot = ExecProcNode(child_ps); + /* Get next tuple for processing */ + slot = router_get_slot(state, &should_process); - if (!TupIsNull(slot)) + if (should_process) { + CmdType new_cmd; bool deleted; ItemPointerData ctid; @@ -203,13 +232,14 @@ partition_router_exec(CustomScanState *node) if (TupIsNull(slot)) goto take_next_tuple; - /* HACK: change command type in ModifyTable */ - state->mt_state->operation = deleted ? CMD_INSERT : CMD_UPDATE; + /* Should we use UPDATE or DELETE + INSERT? */ + new_cmd = deleted ? CMD_INSERT : CMD_UPDATE; - return slot; + /* Alter ModifyTable's state and return */ + return router_set_slot(state, slot, new_cmd); } - return NULL; + return slot; } void @@ -218,15 +248,20 @@ partition_router_end(CustomScanState *node) PartitionRouterState *state = (PartitionRouterState *) node; Assert(list_length(node->custom_ps) == 1); - EvalPlanQualEnd(&state->epqstate); ExecEndNode((PlanState *) linitial(node->custom_ps)); + + EvalPlanQualEnd(&state->epqstate); } void partition_router_rescan(CustomScanState *node) { + PartitionRouterState *state = (PartitionRouterState *) node; + Assert(list_length(node->custom_ps) == 1); ExecReScan((PlanState *) linitial(node->custom_ps)); + + state->saved_slot = NULL; } void @@ -236,6 +271,87 @@ partition_router_explain(CustomScanState *node, List *ancestors, ExplainState *e } +static TupleTableSlot * +router_run_modify_table(PlanState *state) +{ + ModifyTableState *mt_state; + TupleTableSlot *slot; + int mt_plans_old, + mt_plans_new; + + mt_state = (ModifyTableState *) state; + + mt_plans_old = MTHackField(mt_state, mt_nplans); + + /* Fetch next tuple */ + slot = mt_method(state); + + mt_plans_new = MTHackField(mt_state, mt_nplans); + + /* PartitionRouter asked us to restart */ + if (mt_plans_new != mt_plans_old) + { + int state_idx = mt_state->mt_whichplan - 1; + + /* HACK: partially restore ModifyTable's state */ + MTHackField(mt_state, mt_done) = false; + MTHackField(mt_state, mt_nplans) = mt_plans_old; + MTHackField(mt_state, mt_whichplan) = state_idx; + + /* Restart ModifyTable */ + return mt_method(state); + } + + return slot; +} + +static TupleTableSlot * +router_set_slot(PartitionRouterState *state, + TupleTableSlot *slot, + CmdType operation) +{ + ModifyTableState *mt_state = state->mt_state; + + Assert(!TupIsNull(slot)); + + if (mt_state->operation == operation) + return slot; + + /* HACK: alter ModifyTable's state */ + MTHackField(mt_state, mt_nplans) = -mt_state->mt_whichplan; + MTHackField(mt_state, operation) = operation; + + /* Set saved_slot and yield */ + state->saved_slot = slot; + return NULL; +} + +static TupleTableSlot * +router_get_slot(PartitionRouterState *state, + bool *should_process) +{ + TupleTableSlot *slot; + + if (!TupIsNull(state->saved_slot)) + { + /* Reset saved_slot */ + slot = state->saved_slot; + state->saved_slot = NULL; + + /* We shouldn't process preserved slot... */ + *should_process = false; + } + else + { + slot = ExecProcNode((PlanState *) linitial(state->css.custom_ps)); + + /* But we have to process non-empty slot */ + *should_process = !TupIsNull(slot); + } + + return slot; +} + static void router_lazy_init_junkfilter(PartitionRouterState *state, EState *estate) { From 8e92ca1ba42b025089a6f613e6712404ac297727 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 5 Sep 2018 14:35:57 +0300 Subject: [PATCH 324/528] WIP conditionally disable junk filter --- src/include/partition_router.h | 5 ++--- src/partition_router.c | 24 +++++++++--------------- 2 files changed, 11 insertions(+), 18 deletions(-) diff --git a/src/include/partition_router.h b/src/include/partition_router.h index ac1b3ea4..79ae71a3 100644 --- a/src/include/partition_router.h +++ b/src/include/partition_router.h @@ -31,16 +31,15 @@ typedef struct PartitionRouterState CustomScanState css; Plan *subplan; /* proxy variable to store subplan */ - JunkFilter *junkfilter; /* 'ctid' extraction facility */ ExprState *constraint; /* should tuple remain in partition? */ + JunkFilter *junkfilter; /* 'ctid' extraction facility */ + ResultRelInfo *current_rri; EPQState epqstate; int epqparam; ModifyTableState *mt_state; /* need this for a GREAT deal of hackery */ TupleTableSlot *saved_slot; - - ResultRelInfo *current_rri; } PartitionRouterState; diff --git a/src/partition_router.c b/src/partition_router.c index 3106e487..968d5a3e 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -291,7 +291,7 @@ router_run_modify_table(PlanState *state) /* PartitionRouter asked us to restart */ if (mt_plans_new != mt_plans_old) { - int state_idx = mt_state->mt_whichplan - 1; + int state_idx = -mt_plans_new; /* HACK: partially restore ModifyTable's state */ MTHackField(mt_state, mt_done) = false; @@ -312,7 +312,9 @@ router_set_slot(PartitionRouterState *state, { ModifyTableState *mt_state = state->mt_state; + /* Check invariants */ Assert(!TupIsNull(slot)); + Assert(state->junkfilter); if (mt_state->operation == operation) return slot; @@ -321,6 +323,11 @@ router_set_slot(PartitionRouterState *state, MTHackField(mt_state, mt_nplans) = -mt_state->mt_whichplan; MTHackField(mt_state, operation) = operation; + /* HACK: conditionally disable junk filter in result relation */ + state->current_rri->ri_junkFilter = (operation == CMD_UPDATE) ? + state->junkfilter : + NULL; + /* Set saved_slot and yield */ state->saved_slot = slot; return NULL; @@ -355,21 +362,8 @@ router_get_slot(PartitionRouterState *state, static void router_lazy_init_junkfilter(PartitionRouterState *state, EState *estate) { - Relation rel = state->current_rri->ri_RelationDesc; - if (state->junkfilter == NULL) - { - state->junkfilter = - ExecInitJunkFilter(state->subplan->targetlist, - RelationGetDescr(rel)->tdhasoid, - ExecInitExtraTupleSlotCompat(estate)); - - state->junkfilter->jf_junkAttNo = - ExecFindJunkAttribute(state->junkfilter, "ctid"); - - if (!AttributeNumberIsValid(state->junkfilter->jf_junkAttNo)) - elog(ERROR, "could not find junk ctid column"); - } + state->junkfilter = state->current_rri->ri_junkFilter; } static void From 2385cdd6192c60e3acd33c82a8e8af01842c0167 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 5 Sep 2018 14:36:21 +0300 Subject: [PATCH 325/528] add approved tests --- expected/pathman_update_triggers.out | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/expected/pathman_update_triggers.out b/expected/pathman_update_triggers.out index 5c1092f2..6b366fb3 100644 --- a/expected/pathman_update_triggers.out +++ b/expected/pathman_update_triggers.out @@ -52,10 +52,7 @@ NOTICE: AFTER INSERT ROW (test_1) set pg_pathman.enable_partitionrouter = t; update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; NOTICE: BEFORE UPDATE ROW (test_1) -NOTICE: BEFORE DELETE ROW (test_1) -NOTICE: BEFORE INSERT ROW (test_1) -NOTICE: AFTER DELETE ROW (test_1) -NOTICE: AFTER INSERT ROW (test_1) +NOTICE: AFTER UPDATE ROW (test_1) val | tableoid -----+----------------------------- 2 | test_update_triggers.test_1 @@ -74,10 +71,7 @@ NOTICE: AFTER INSERT ROW (test_2) update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; NOTICE: BEFORE UPDATE ROW (test_2) -NOTICE: BEFORE DELETE ROW (test_2) -NOTICE: BEFORE INSERT ROW (test_2) -NOTICE: AFTER DELETE ROW (test_2) -NOTICE: AFTER INSERT ROW (test_2) +NOTICE: AFTER UPDATE ROW (test_2) val | tableoid -----+----------------------------- 4 | test_update_triggers.test_2 @@ -96,10 +90,7 @@ NOTICE: AFTER INSERT ROW (test_1) update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; NOTICE: BEFORE UPDATE ROW (test_1) -NOTICE: BEFORE DELETE ROW (test_1) -NOTICE: BEFORE INSERT ROW (test_1) -NOTICE: AFTER DELETE ROW (test_1) -NOTICE: AFTER INSERT ROW (test_1) +NOTICE: AFTER UPDATE ROW (test_1) val | tableoid -----+----------------------------- 6 | test_update_triggers.test_1 From 42e2c10cf73ce5d38673b788d716fc3f40727adb Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 5 Sep 2018 15:04:29 +0300 Subject: [PATCH 326/528] WIP fix router_run_modify_table(), more tests --- expected/pathman_update_node.out | 26 ++++++++++++++++++++++++++ sql/pathman_update_node.sql | 15 +++++++++++++++ src/partition_router.c | 8 ++++++-- 3 files changed, 47 insertions(+), 2 deletions(-) diff --git a/expected/pathman_update_node.out b/expected/pathman_update_node.out index e68bb9ae..4f379e05 100644 --- a/expected/pathman_update_node.out +++ b/expected/pathman_update_node.out @@ -374,6 +374,32 @@ SELECT create_hash_partitions('test_update_node.test_hash', 'val', 3); 3 (1 row) +/* Shuffle rows a few times */ +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +/* Check values #0 */ +SELECT tableoid::regclass, * FROM test_update_node.test_hash ORDER BY val; + tableoid | val | comment +------------------------------+-----+--------- + test_update_node.test_hash_2 | 10 | 1 + test_update_node.test_hash_1 | 11 | 2 + test_update_node.test_hash_1 | 12 | 3 + test_update_node.test_hash_2 | 13 | 4 + test_update_node.test_hash_1 | 14 | 5 + test_update_node.test_hash_1 | 15 | 6 + test_update_node.test_hash_2 | 16 | 7 + test_update_node.test_hash_0 | 17 | 8 + test_update_node.test_hash_1 | 18 | 9 + test_update_node.test_hash_0 | 19 | 10 +(10 rows) + /* Move all rows into single partition */ UPDATE test_update_node.test_hash SET val = 1; /* Check values #1 */ diff --git a/sql/pathman_update_node.sql b/sql/pathman_update_node.sql index aff7f8ec..2c7e97f7 100644 --- a/sql/pathman_update_node.sql +++ b/sql/pathman_update_node.sql @@ -174,6 +174,21 @@ INSERT INTO test_update_node.test_hash SELECT i, i FROM generate_series(1, 10) i SELECT create_hash_partitions('test_update_node.test_hash', 'val', 3); +/* Shuffle rows a few times */ +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; + +/* Check values #0 */ +SELECT tableoid::regclass, * FROM test_update_node.test_hash ORDER BY val; + + /* Move all rows into single partition */ UPDATE test_update_node.test_hash SET val = 1; diff --git a/src/partition_router.c b/src/partition_router.c index 968d5a3e..3348adb7 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -281,16 +281,20 @@ router_run_modify_table(PlanState *state) mt_state = (ModifyTableState *) state; + /* Get initial signal */ mt_plans_old = MTHackField(mt_state, mt_nplans); +restart: /* Fetch next tuple */ slot = mt_method(state); + /* Get current signal */ mt_plans_new = MTHackField(mt_state, mt_nplans); - /* PartitionRouter asked us to restart */ + /* Did PartitionRouter ask us to restart? */ if (mt_plans_new != mt_plans_old) { + /* Signal points to current plan */ int state_idx = -mt_plans_new; /* HACK: partially restore ModifyTable's state */ @@ -299,7 +303,7 @@ router_run_modify_table(PlanState *state) MTHackField(mt_state, mt_whichplan) = state_idx; /* Restart ModifyTable */ - return mt_method(state); + goto restart; } return slot; From c3399f3d123e9a6bdddf4e39aa29558b638d3045 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 5 Sep 2018 15:15:58 +0300 Subject: [PATCH 327/528] WIP add comments here and there --- src/partition_router.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/partition_router.c b/src/partition_router.c index 3348adb7..56008743 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -223,12 +223,12 @@ partition_router_exec(CustomScanState *node) /* Magic: replace parent's ResultRelInfo with ours */ estate->es_result_relation_info = state->current_rri; - /* Delete tuple from old partition */ + /* Lock or delete tuple from old partition */ Assert(ItemPointerIsValid(&ctid)); slot = router_lock_or_delete_tuple(state, slot, &ctid, &deleted, estate); - /* We require a tuple */ + /* We require a tuple (previous one has vanished) */ if (TupIsNull(slot)) goto take_next_tuple; @@ -265,12 +265,15 @@ partition_router_rescan(CustomScanState *node) } void -partition_router_explain(CustomScanState *node, List *ancestors, ExplainState *es) +partition_router_explain(CustomScanState *node, + List *ancestors, + ExplainState *es) { /* Nothing to do here now */ } +/* Smart wrapper over ModifyTable */ static TupleTableSlot * router_run_modify_table(PlanState *state) { @@ -309,6 +312,7 @@ router_run_modify_table(PlanState *state) return slot; } +/* Return tuple OR stash it and change ModifyTable's operation */ static TupleTableSlot * router_set_slot(PartitionRouterState *state, TupleTableSlot *slot, @@ -337,6 +341,7 @@ router_set_slot(PartitionRouterState *state, return NULL; } +/* Fetch next tuple (either fresh or stashed) */ static TupleTableSlot * router_get_slot(PartitionRouterState *state, bool *should_process) From 0927b9fc57eb040ca9a6aef089a9c7d85f35ee4e Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 5 Sep 2018 18:05:36 +0300 Subject: [PATCH 328/528] EPQ: fix multilevel (see router_lazy_init_constraint()) --- src/include/relation_info.h | 2 +- src/partition_router.c | 30 ++++++++++++++++++++++++++---- src/relation_info.c | 20 +++++++++++--------- 3 files changed, 38 insertions(+), 14 deletions(-) diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 3a5f0fa8..f3faa3d3 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -367,9 +367,9 @@ void shout_if_prel_is_invalid(const Oid parent_oid, const PartType expected_part_type); /* Bounds cache */ -Expr *get_partition_constraint_expr(Oid partition); void forget_bounds_of_partition(Oid partition); PartBoundInfo *get_bounds_of_partition(Oid partition, const PartRelationInfo *prel); +Expr *get_partition_constraint_expr(Oid partition, bool raise_error); void invalidate_bounds_cache(void); /* Parents cache */ diff --git a/src/partition_router.c b/src/partition_router.c index 56008743..64feddd9 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -20,6 +20,7 @@ #include "commands/trigger.h" #include "executor/nodeModifyTable.h" #include "foreign/fdwapi.h" +#include "optimizer/clauses.h" #include "storage/bufmgr.h" #include "utils/guc.h" #include "utils/rel.h" @@ -378,12 +379,33 @@ router_lazy_init_junkfilter(PartitionRouterState *state, EState *estate) static void router_lazy_init_constraint(PartitionRouterState *state) { - Relation rel = state->current_rri->ri_RelationDesc; - if (state->constraint == NULL) { - Expr *expr = get_partition_constraint_expr(RelationGetRelid(rel)); - state->constraint = ExecInitExpr(expr, NULL); + Relation rel = state->current_rri->ri_RelationDesc; + Oid relid = RelationGetRelid(rel); + List *clauses = NIL; + Expr *expr; + + while (OidIsValid(relid)) + { + /* It's probably OK if expression is NULL */ + expr = get_partition_constraint_expr(relid, false); + expr = expression_planner(expr); + + if (!expr) + break; + + /* Add this constraint to set */ + clauses = lappend(clauses, expr); + + /* Consider parent's check constraint as well */ + relid = get_parent_of_partition(relid); + } + + if (!clauses) + elog(ERROR, "no recheck constraint for relid %d", relid); + + state->constraint = ExecInitExpr(make_ands_explicit(clauses), NULL); } } diff --git a/src/relation_info.c b/src/relation_info.c index 25b86d31..386008d2 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -1000,7 +1000,7 @@ get_bounds_of_partition(Oid partition, const PartRelationInfo *prel) pbin_local.byval = prel->ev_byval; /* Try to build constraint's expression tree (may emit ERROR) */ - con_expr = get_partition_constraint_expr(partition); + con_expr = get_partition_constraint_expr(partition, true); /* Grab bounds/hash and fill in 'pbin_local' (may emit ERROR) */ fill_pbin_with_bounds(&pbin_local, prel, con_expr); @@ -1046,7 +1046,7 @@ invalidate_bounds_cache(void) * build_check_constraint_name_internal() is used to build conname. */ Expr * -get_partition_constraint_expr(Oid partition) +get_partition_constraint_expr(Oid partition, bool raise_error) { Oid conid; /* constraint Oid */ char *conname; /* constraint name */ @@ -1060,11 +1060,12 @@ get_partition_constraint_expr(Oid partition) if (!OidIsValid(conid)) { - DisablePathman(); /* disable pg_pathman since config is broken */ + if (!raise_error) + return NULL; + ereport(ERROR, (errmsg("constraint \"%s\" of partition \"%s\" does not exist", - conname, get_rel_name_or_relid(partition)), - errhint(INIT_ERROR_HINT))); + conname, get_rel_name_or_relid(partition)))); } con_tuple = SearchSysCache1(CONSTROID, ObjectIdGetDatum(conid)); @@ -1073,11 +1074,12 @@ get_partition_constraint_expr(Oid partition) &conbin_isnull); if (conbin_isnull) { - DisablePathman(); /* disable pg_pathman since config is broken */ - ereport(WARNING, + if (!raise_error) + return NULL; + + ereport(ERROR, (errmsg("constraint \"%s\" of partition \"%s\" has NULL conbin", - conname, get_rel_name_or_relid(partition)), - errhint(INIT_ERROR_HINT))); + conname, get_rel_name_or_relid(partition)))); pfree(conname); return NULL; /* could not parse */ From baf8fc2fd85d3754fd671ca45c0d773c92841642 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 7 Sep 2018 16:57:49 +0300 Subject: [PATCH 329/528] optimize find_deepest_partition() for single tables --- src/planner_tree_modification.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 58c92bd3..ab84b254 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -885,7 +885,8 @@ modifytable_contains_fdw(List *rtable, ModifyTable *node) /* * Find a single deepest subpartition using quals. - * Return InvalidOid if it's not possible. + * It's always better to narrow down the set of tables to be scanned. + * Return InvalidOid if it's not possible (e.g. table is not partitioned). */ static Oid find_deepest_partition(Oid relid, Index rti, Expr *quals) @@ -931,8 +932,13 @@ find_deepest_partition(Oid relid, Index rti, Expr *quals) Oid *children = PrelGetChildrenArray(prel), child = children[irange_lower(irange)]; + /* Scan this partition */ + result = child; + /* Try to go deeper and see if there are subpartitions */ - result = find_deepest_partition(child, rti, quals); + child = find_deepest_partition(child, rti, quals); + if (OidIsValid(child)) + result = child; } break; @@ -943,8 +949,6 @@ find_deepest_partition(Oid relid, Index rti, Expr *quals) /* Don't forget to close 'prel'! */ close_pathman_relation_info(prel); } - /* Otherwise, return this table */ - else result = relid; return result; } From 7a4aa43adebce49e0c3383dbb945d9cbaebff529 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 10 Sep 2018 20:36:27 +0300 Subject: [PATCH 330/528] sometimes break saves the day (issue #174) --- src/nodes_common.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/nodes_common.c b/src/nodes_common.c index f9f394ec..5f0c0c14 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -159,6 +159,7 @@ build_parent_tlist(List *tlist, AppendRelInfo *appinfo) { tlist_var->varattno = attnum; found_column = true; /* successful mapping */ + break; } } From 906dafbd86189dc4866268bf5159b39d7e2b4cce Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 11 Sep 2018 16:56:32 +0300 Subject: [PATCH 331/528] fix CustomEvalParamExternCompat(), many thanks to Alexander Kuzmenkov --- src/include/compat/pg_compat.h | 8 ++++---- src/planner_tree_modification.c | 5 ++++- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index f33d41cc..17f037cd 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -864,15 +864,15 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, /* See ExecEvalParamExtern() */ static inline ParamExternData * -CustomEvalParamExternCompat(Param *param, ParamListInfo params) +CustomEvalParamExternCompat(Param *param, + ParamListInfo params, + ParamExternData *prmdata) { ParamExternData *prm; #if PG_VERSION_NUM >= 110000 - ParamExternData prmdata; - if (params->paramFetch != NULL) - prm = params->paramFetch(params, param->paramid, false, &prmdata); + prm = params->paramFetch(params, param->paramid, false, prmdata); else prm = ¶ms->params[param->paramid - 1]; #else diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index ab84b254..ee05108a 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -971,7 +971,10 @@ eval_extern_params_mutator(Node *node, ParamListInfo params) param->paramid > 0 && param->paramid <= params->numParams) { - ParamExternData *prm = CustomEvalParamExternCompat(param, params); + ParamExternData prmdata; /* storage for 'prm' (PG 11) */ + ParamExternData *prm = CustomEvalParamExternCompat(param, + params, + &prmdata); if (OidIsValid(prm->ptype)) { From 063114712b3426ea0f8fcb072b8537c452f6fd7c Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 11 Sep 2018 18:06:16 +0300 Subject: [PATCH 332/528] run 11-based builds (Travis CI) --- .travis.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.travis.yml b/.travis.yml index db2eebc9..946eb606 100644 --- a/.travis.yml +++ b/.travis.yml @@ -18,6 +18,8 @@ notifications: on_failure: always env: + - PG_VERSION=11 LEVEL=hardcore + - PG_VERSION=11 - PG_VERSION=10 LEVEL=hardcore - PG_VERSION=10 - PG_VERSION=9.6 LEVEL=hardcore From b0eefc5e69fbac72e5176c8d013af7511386e382 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 11 Sep 2018 18:30:12 +0300 Subject: [PATCH 333/528] add test variant for PG 10 and PG 11 --- expected/pathman_basic_1.out | 1845 ++++++++++++++++++++++++++++++++++ 1 file changed, 1845 insertions(+) create mode 100644 expected/pathman_basic_1.out diff --git a/expected/pathman_basic_1.out b/expected/pathman_basic_1.out new file mode 100644 index 00000000..692de996 --- /dev/null +++ b/expected/pathman_basic_1.out @@ -0,0 +1,1845 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.hash_rel ( + id SERIAL PRIMARY KEY, + value INTEGER); +INSERT INTO test.hash_rel VALUES (1, 1); +INSERT INTO test.hash_rel VALUES (2, 2); +INSERT INTO test.hash_rel VALUES (3, 3); +\set VERBOSITY default +SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3); +ERROR: failed to analyze partitioning expression "value" +DETAIL: column "value" should be marked NOT NULL +CONTEXT: SQL statement "SELECT pathman.validate_expression(parent_relid, expression)" +PL/pgSQL function pathman.prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT pathman.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function pathman.create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +\set VERBOSITY terse +ALTER TABLE test.hash_rel ALTER COLUMN value SET NOT NULL; +SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3, partition_data:=false); + create_hash_partitions +------------------------ + 3 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel + -> Seq Scan on hash_rel_0 + -> Seq Scan on hash_rel_1 + -> Seq Scan on hash_rel_2 +(5 rows) + +SELECT * FROM test.hash_rel; + id | value +----+------- + 1 | 1 + 2 | 2 + 3 | 3 +(3 rows) + +SELECT pathman.set_enable_parent('test.hash_rel', false); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_0 + -> Seq Scan on hash_rel_1 + -> Seq Scan on hash_rel_2 +(4 rows) + +SELECT * FROM test.hash_rel; + id | value +----+------- +(0 rows) + +SELECT pathman.set_enable_parent('test.hash_rel', true); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel + -> Seq Scan on hash_rel_0 + -> Seq Scan on hash_rel_1 + -> Seq Scan on hash_rel_2 +(5 rows) + +SELECT * FROM test.hash_rel; + id | value +----+------- + 1 | 1 + 2 | 2 + 3 | 3 +(3 rows) + +SELECT pathman.drop_partitions('test.hash_rel'); +NOTICE: 0 rows copied from test.hash_rel_0 +NOTICE: 0 rows copied from test.hash_rel_1 +NOTICE: 0 rows copied from test.hash_rel_2 + drop_partitions +----------------- + 3 +(1 row) + +SELECT pathman.create_hash_partitions('test.hash_rel', 'Value', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT COUNT(*) FROM test.hash_rel; + count +------- + 3 +(1 row) + +SELECT COUNT(*) FROM ONLY test.hash_rel; + count +------- + 0 +(1 row) + +INSERT INTO test.hash_rel VALUES (4, 4); +INSERT INTO test.hash_rel VALUES (5, 5); +INSERT INTO test.hash_rel VALUES (6, 6); +SELECT COUNT(*) FROM test.hash_rel; + count +------- + 6 +(1 row) + +SELECT COUNT(*) FROM ONLY test.hash_rel; + count +------- + 0 +(1 row) + +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +\set VERBOSITY default +SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL); +ERROR: failed to analyze partitioning expression "dt" +DETAIL: column "dt" should be marked NOT NULL +CONTEXT: SQL statement "SELECT pathman.validate_expression(parent_relid, expression)" +PL/pgSQL function pathman.prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT pathman.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function pathman.create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM +\set VERBOSITY terse +ALTER TABLE test.range_rel ALTER COLUMN dt SET NOT NULL; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL, 2); +ERROR: not enough partitions to fit all values of "dt" +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +SELECT COUNT(*) FROM test.range_rel; + count +------- + 120 +(1 row) + +SELECT COUNT(*) FROM ONLY test.range_rel; + count +------- + 0 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +SELECT COUNT(*) FROM test.num_range_rel; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM ONLY test.num_range_rel; + count +------- + 0 +(1 row) + +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SELECT COUNT(*) FROM test.num_range_rel; + count +------- + 3000 +(1 row) + +SELECT COUNT(*) FROM ONLY test.num_range_rel; + count +------- + 0 +(1 row) + +/* since rel_1_2_beta: check append_child_relation(), make_ands_explicit(), dummy path */ +CREATE TABLE test.improved_dummy (id BIGSERIAL, name TEXT NOT NULL); +INSERT INTO test.improved_dummy (name) SELECT md5(g::TEXT) FROM generate_series(1, 100) as g; +SELECT pathman.create_range_partitions('test.improved_dummy', 'id', 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +INSERT INTO test.improved_dummy (name) VALUES ('test'); /* spawns new partition */ +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; + QUERY PLAN +---------------------------------------------------- + Append + -> Seq Scan on improved_dummy_1 + Filter: ((id = 5) AND (name = 'ib'::text)) + -> Seq Scan on improved_dummy_11 + Filter: (id = 101) +(5 rows) + +SELECT pathman.set_enable_parent('test.improved_dummy', true); /* enable parent */ + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; + QUERY PLAN +-------------------------------------------------------------------- + Append + -> Seq Scan on improved_dummy + Filter: ((id = 101) OR ((id = 5) AND (name = 'ib'::text))) + -> Seq Scan on improved_dummy_1 + Filter: ((id = 5) AND (name = 'ib'::text)) + -> Seq Scan on improved_dummy_11 + Filter: (id = 101) +(7 rows) + +SELECT pathman.set_enable_parent('test.improved_dummy', false); /* disable parent */ + set_enable_parent +------------------- + +(1 row) + +ALTER TABLE test.improved_dummy_1 ADD CHECK (name != 'ib'); /* make test.improved_dummy_1 disappear */ +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on improved_dummy_11 + Filter: (id = 101) +(3 rows) + +SELECT pathman.set_enable_parent('test.improved_dummy', true); /* enable parent */ + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; + QUERY PLAN +-------------------------------------------------------------------- + Append + -> Seq Scan on improved_dummy + Filter: ((id = 101) OR ((id = 5) AND (name = 'ib'::text))) + -> Seq Scan on improved_dummy_11 + Filter: (id = 101) +(5 rows) + +DROP TABLE test.improved_dummy CASCADE; +NOTICE: drop cascades to 12 other objects +/* since rel_1_4_beta: check create_range_partitions(bounds array) */ +CREATE TABLE test.improved_dummy (val INT NOT NULL); +SELECT pathman.create_range_partitions('test.improved_dummy', 'val', + pathman.generate_range_bounds(1, 1, 2)); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT * FROM pathman.pathman_partition_list +WHERE parent = 'test.improved_dummy'::REGCLASS +ORDER BY partition; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + test.improved_dummy | test.improved_dummy_1 | 2 | val | 1 | 2 + test.improved_dummy | test.improved_dummy_2 | 2 | val | 2 | 3 +(2 rows) + +SELECT pathman.drop_partitions('test.improved_dummy'); +NOTICE: 0 rows copied from test.improved_dummy_1 +NOTICE: 0 rows copied from test.improved_dummy_2 + drop_partitions +----------------- + 2 +(1 row) + +SELECT pathman.create_range_partitions('test.improved_dummy', 'val', + pathman.generate_range_bounds(1, 1, 2), + partition_names := '{p1, p2}'); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT * FROM pathman.pathman_partition_list +WHERE parent = 'test.improved_dummy'::REGCLASS +ORDER BY partition; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------+----------+------+-----------+----------- + test.improved_dummy | p1 | 2 | val | 1 | 2 + test.improved_dummy | p2 | 2 | val | 2 | 3 +(2 rows) + +SELECT pathman.drop_partitions('test.improved_dummy'); +NOTICE: 0 rows copied from p1 +NOTICE: 0 rows copied from p2 + drop_partitions +----------------- + 2 +(1 row) + +SELECT pathman.create_range_partitions('test.improved_dummy', 'val', + pathman.generate_range_bounds(1, 1, 2), + partition_names := '{p1, p2}', + tablespaces := '{pg_default, pg_default}'); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT * FROM pathman.pathman_partition_list +WHERE parent = 'test.improved_dummy'::REGCLASS +ORDER BY partition; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------+----------+------+-----------+----------- + test.improved_dummy | p1 | 2 | val | 1 | 2 + test.improved_dummy | p2 | 2 | val | 2 | 3 +(2 rows) + +DROP TABLE test.improved_dummy CASCADE; +NOTICE: drop cascades to 3 other objects +/* Test pathman_rel_pathlist_hook() with INSERT query */ +CREATE TABLE test.insert_into_select(val int NOT NULL); +INSERT INTO test.insert_into_select SELECT generate_series(1, 100); +SELECT pathman.create_range_partitions('test.insert_into_select', 'val', 1, 20); + create_range_partitions +------------------------- + 5 +(1 row) + +CREATE TABLE test.insert_into_select_copy (LIKE test.insert_into_select); /* INSERT INTO ... SELECT ... */ +EXPLAIN (COSTS OFF) +INSERT INTO test.insert_into_select_copy +SELECT * FROM test.insert_into_select +WHERE val <= 80; + QUERY PLAN +---------------------------------------------- + Insert on insert_into_select_copy + -> Append + -> Seq Scan on insert_into_select_1 + -> Seq Scan on insert_into_select_2 + -> Seq Scan on insert_into_select_3 + -> Seq Scan on insert_into_select_4 + Filter: (val <= 80) +(7 rows) + +SELECT pathman.set_enable_parent('test.insert_into_select', true); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) +INSERT INTO test.insert_into_select_copy +SELECT * FROM test.insert_into_select +WHERE val <= 80; + QUERY PLAN +---------------------------------------------- + Insert on insert_into_select_copy + -> Append + -> Seq Scan on insert_into_select + Filter: (val <= 80) + -> Seq Scan on insert_into_select_1 + -> Seq Scan on insert_into_select_2 + -> Seq Scan on insert_into_select_3 + -> Seq Scan on insert_into_select_4 + Filter: (val <= 80) +(9 rows) + +INSERT INTO test.insert_into_select_copy SELECT * FROM test.insert_into_select; +SELECT count(*) FROM test.insert_into_select_copy; + count +------- + 100 +(1 row) + +DROP TABLE test.insert_into_select_copy, test.insert_into_select CASCADE; +NOTICE: drop cascades to 6 other objects +SET pg_pathman.enable_runtimeappend = OFF; +SET pg_pathman.enable_runtimemergeappend = OFF; +VACUUM; +SET enable_indexscan = OFF; +SET enable_bitmapscan = OFF; +SET enable_seqscan = ON; +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_0 + -> Seq Scan on hash_rel_1 + -> Seq Scan on hash_rel_2 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE false; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = NULL; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_1 + Filter: (value = 2) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE 2 = value; /* test commutator */ + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_1 + Filter: (2 = value) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_1 + Filter: (value = 2) + -> Seq Scan on hash_rel_2 + Filter: (value = 1) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 = id; /* test commutator */ + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_3 + Filter: (2500 = id) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 < id; /* test commutator */ + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_3 + Filter: (2500 < id) + -> Seq Scan on num_range_rel_4 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_3 + Filter: (id > 2500) + -> Seq Scan on num_range_rel_4 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1000 AND id < 3000; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_2 + -> Seq Scan on num_range_rel_3 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1500 AND id < 2500; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_2 + Filter: (id >= 1500) + -> Seq Scan on num_range_rel_3 + Filter: (id < 2500) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE (id >= 500 AND id < 1500) OR (id > 2500); + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_1 + Filter: (id >= 500) + -> Seq Scan on num_range_rel_2 + Filter: (id < 1500) + -> Seq Scan on num_range_rel_3 + Filter: (id > 2500) + -> Seq Scan on num_range_rel_4 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2015-02-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_2 + Filter: (dt > 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_3 + -> Seq Scan on range_rel_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE '2015-02-15' < dt; /* test commutator */ + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_2 + Filter: ('Sun Feb 15 00:00:00 2015'::timestamp without time zone < dt) + -> Seq Scan on range_rel_3 + -> Seq Scan on range_rel_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-01'; + QUERY PLAN +------------------------------- + Append + -> Seq Scan on range_rel_2 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-15' AND dt < '2015-03-15'; + QUERY PLAN +--------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_2 + Filter: (dt >= 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_3 + Filter: (dt < 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE (dt >= '2015-01-15' AND dt < '2015-02-15') OR (dt > '2015-03-15'); + QUERY PLAN +--------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_1 + Filter: (dt >= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_2 + Filter: (dt < 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_3 + Filter: (dt > 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_4 +(8 rows) + +SET enable_indexscan = ON; +SET enable_bitmapscan = OFF; +SET enable_seqscan = OFF; +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_0 + -> Seq Scan on hash_rel_1 + -> Seq Scan on hash_rel_2 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE false; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = NULL; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_1 + Filter: (value = 2) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE 2 = value; /* test commutator */ + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_1 + Filter: (2 = value) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_1 + Filter: (value = 2) + -> Seq Scan on hash_rel_2 + Filter: (value = 1) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 = id; /* test commutator */ + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (2500 = id) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 < id; /* test commutator */ + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (2500 < id) + -> Seq Scan on num_range_rel_4 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (id > 2500) + -> Seq Scan on num_range_rel_4 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1000 AND id < 3000; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_2 + -> Seq Scan on num_range_rel_3 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1500 AND id < 2500; + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 + Index Cond: (id >= 1500) + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (id < 2500) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE (id >= 500 AND id < 1500) OR (id > 2500); + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 + Index Cond: (id >= 500) + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 + Index Cond: (id < 1500) + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (id > 2500) + -> Seq Scan on num_range_rel_4 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel ORDER BY id; + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id <= 2500 ORDER BY id; + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (id <= 2500) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2015-02-15'; + QUERY PLAN +------------------------------------------------------------------------------------ + Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 + Index Cond: (dt > 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_3 + -> Seq Scan on range_rel_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE '2015-02-15' < dt; /* test commutator */ + QUERY PLAN +------------------------------------------------------------------------------------ + Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 + Index Cond: ('Sun Feb 15 00:00:00 2015'::timestamp without time zone < dt) + -> Seq Scan on range_rel_3 + -> Seq Scan on range_rel_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-01'; + QUERY PLAN +------------------------------- + Append + -> Seq Scan on range_rel_2 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-15' AND dt < '2015-03-15'; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 + Index Cond: (dt >= 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Index Scan using range_rel_3_dt_idx on range_rel_3 + Index Cond: (dt < 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE (dt >= '2015-01-15' AND dt < '2015-02-15') OR (dt > '2015-03-15'); + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt >= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) + -> Index Scan using range_rel_2_dt_idx on range_rel_2 + Index Cond: (dt < 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Index Scan using range_rel_3_dt_idx on range_rel_3 + Index Cond: (dt > 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_4 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel ORDER BY dt; + QUERY PLAN +---------------------------------------------------------- + Append + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + -> Index Scan using range_rel_2_dt_idx on range_rel_2 + -> Index Scan using range_rel_3_dt_idx on range_rel_3 + -> Index Scan using range_rel_4_dt_idx on range_rel_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-01-15' ORDER BY dt DESC; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan Backward using range_rel_4_dt_idx on range_rel_4 + -> Index Scan Backward using range_rel_3_dt_idx on range_rel_3 + -> Index Scan Backward using range_rel_2_dt_idx on range_rel_2 + -> Index Scan Backward using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt >= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(6 rows) + +/* + * Sorting + */ +SET enable_indexscan = OFF; +SET enable_seqscan = ON; +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2015-03-01' ORDER BY dt; + QUERY PLAN +------------------------------------- + Sort + Sort Key: range_rel_1.dt + -> Append + -> Seq Scan on range_rel_1 + -> Seq Scan on range_rel_2 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel_1 UNION ALL SELECT * FROM test.range_rel_2 ORDER BY dt; + QUERY PLAN +------------------------------------- + Sort + Sort Key: range_rel_1.dt + -> Append + -> Seq Scan on range_rel_1 + -> Seq Scan on range_rel_2 +(5 rows) + +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2015-03-01' ORDER BY dt; + QUERY PLAN +---------------------------------------------------------- + Append + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + -> Index Scan using range_rel_2_dt_idx on range_rel_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel_1 UNION ALL SELECT * FROM test.range_rel_2 ORDER BY dt; + QUERY PLAN +---------------------------------------------------------- + Merge Append + Sort Key: range_rel_1.dt + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + -> Index Scan using range_rel_2_dt_idx on range_rel_2 +(4 rows) + +/* + * Join + */ +set enable_nestloop = OFF; +SET enable_hashjoin = ON; +SET enable_mergejoin = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Hash Join + Hash Cond: (j1.id = j2.id) + -> Hash Join + Hash Cond: (j3.id = j1.id) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_1 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_2 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_3 + -> Hash + -> Append + -> Index Scan using range_rel_1_pkey on range_rel_1 j1 + -> Index Scan using range_rel_2_pkey on range_rel_2 j1_1 + -> Hash + -> Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 j2 + -> Index Scan using range_rel_3_dt_idx on range_rel_3 j2_1 + -> Index Scan using range_rel_4_dt_idx on range_rel_4 j2_2 +(20 rows) + +/* + * Test inlined SQL functions + */ +CREATE TABLE test.sql_inline (id INT NOT NULL); +SELECT pathman.create_hash_partitions('test.sql_inline', 'id', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +CREATE OR REPLACE FUNCTION test.sql_inline_func(i_id int) RETURNS SETOF INT AS $$ + select * from test.sql_inline where id = i_id limit 1; +$$ LANGUAGE sql STABLE; +EXPLAIN (COSTS OFF) SELECT * FROM test.sql_inline_func(5); + QUERY PLAN +-------------------------------------- + Limit + -> Append + -> Seq Scan on sql_inline_0 + Filter: (id = 5) +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.sql_inline_func(1); + QUERY PLAN +-------------------------------------- + Limit + -> Append + -> Seq Scan on sql_inline_2 + Filter: (id = 1) +(4 rows) + +DROP FUNCTION test.sql_inline_func(int); +DROP TABLE test.sql_inline CASCADE; +NOTICE: drop cascades to 3 other objects +/* + * Test by @baiyinqiqi (issue #60) + */ +CREATE TABLE test.hash_varchar(val VARCHAR(40) NOT NULL); +INSERT INTO test.hash_varchar SELECT generate_series(1, 20); +SELECT pathman.create_hash_partitions('test.hash_varchar', 'val', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +SELECT * FROM test.hash_varchar WHERE val = 'a'; + val +----- +(0 rows) + +SELECT * FROM test.hash_varchar WHERE val = '12'::TEXT; + val +----- + 12 +(1 row) + +DROP TABLE test.hash_varchar CASCADE; +NOTICE: drop cascades to 4 other objects +/* + * Test split and merge + */ +/* Split first partition in half */ +SELECT pathman.split_range_partition('test.num_range_rel_1', 500); + split_range_partition +----------------------- + test.num_range_rel_5 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 700; + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 + Index Cond: (id >= 100) + -> Index Scan using num_range_rel_5_pkey on num_range_rel_5 + Index Cond: (id <= 700) +(5 rows) + +SELECT tableoid::regclass, id FROM test.num_range_rel WHERE id IN (499, 500, 501) ORDER BY id; + tableoid | id +----------------------+----- + test.num_range_rel_1 | 499 + test.num_range_rel_5 | 500 + test.num_range_rel_5 | 501 +(3 rows) + +SELECT pathman.split_range_partition('test.range_rel_1', '2015-01-15'::DATE); + split_range_partition +----------------------- + test.range_rel_5 +(1 row) + +/* Merge two partitions into one */ +SELECT pathman.merge_range_partitions('test.num_range_rel_1', 'test.num_range_rel_' || currval('test.num_range_rel_seq')); + merge_range_partitions +------------------------ + test.num_range_rel_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 700; + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 + Index Cond: ((id >= 100) AND (id <= 700)) +(3 rows) + +SELECT pathman.merge_range_partitions('test.range_rel_1', 'test.range_rel_' || currval('test.range_rel_seq')); + merge_range_partitions +------------------------ + test.range_rel_1 +(1 row) + +/* Append and prepend partitions */ +SELECT pathman.append_range_partition('test.num_range_rel'); + append_range_partition +------------------------ + test.num_range_rel_6 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 4000; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_6 +(2 rows) + +SELECT pathman.prepend_range_partition('test.num_range_rel'); + prepend_range_partition +------------------------- + test.num_range_rel_7 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id < 0; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_7 +(2 rows) + +SELECT pathman.drop_range_partition('test.num_range_rel_7'); + drop_range_partition +---------------------- + test.num_range_rel_7 +(1 row) + +SELECT pathman.drop_range_partition_expand_next('test.num_range_rel_4'); + drop_range_partition_expand_next +---------------------------------- + +(1 row) + +SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.num_range_rel'::regclass; + parent | partition | parttype | expr | range_min | range_max +--------------------+----------------------+----------+------+-----------+----------- + test.num_range_rel | test.num_range_rel_1 | 2 | id | 0 | 1000 + test.num_range_rel | test.num_range_rel_2 | 2 | id | 1000 | 2000 + test.num_range_rel | test.num_range_rel_3 | 2 | id | 2000 | 3000 + test.num_range_rel | test.num_range_rel_6 | 2 | id | 3000 | 5000 +(4 rows) + +SELECT pathman.drop_range_partition_expand_next('test.num_range_rel_6'); + drop_range_partition_expand_next +---------------------------------- + +(1 row) + +SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.num_range_rel'::regclass; + parent | partition | parttype | expr | range_min | range_max +--------------------+----------------------+----------+------+-----------+----------- + test.num_range_rel | test.num_range_rel_1 | 2 | id | 0 | 1000 + test.num_range_rel | test.num_range_rel_2 | 2 | id | 1000 | 2000 + test.num_range_rel | test.num_range_rel_3 | 2 | id | 2000 | 3000 +(3 rows) + +SELECT pathman.append_range_partition('test.range_rel'); + append_range_partition +------------------------ + test.range_rel_6 +(1 row) + +SELECT pathman.prepend_range_partition('test.range_rel'); + prepend_range_partition +------------------------- + test.range_rel_7 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-12-15' AND '2015-01-15'; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan using range_rel_7_dt_idx on range_rel_7 + Index Cond: (dt >= 'Mon Dec 15 00:00:00 2014'::timestamp without time zone) + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(5 rows) + +SELECT pathman.drop_range_partition('test.range_rel_7'); + drop_range_partition +---------------------- + test.range_rel_7 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-12-15' AND '2015-01-15'; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(3 rows) + +SELECT pathman.add_range_partition('test.range_rel', '2014-12-01'::DATE, '2015-01-02'::DATE); +ERROR: specified range [12-01-2014, 01-02-2015) overlaps with existing partitions +SELECT pathman.add_range_partition('test.range_rel', '2014-12-01'::DATE, '2015-01-01'::DATE); + add_range_partition +--------------------- + test.range_rel_8 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-12-15' AND '2015-01-15'; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan using range_rel_8_dt_idx on range_rel_8 + Index Cond: (dt >= 'Mon Dec 15 00:00:00 2014'::timestamp without time zone) + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(5 rows) + +CREATE TABLE test.range_rel_archive (LIKE test.range_rel INCLUDING ALL); +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_archive', '2014-01-01'::DATE, '2015-01-01'::DATE); +ERROR: specified range [01-01-2014, 01-01-2015) overlaps with existing partitions +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_archive', '2014-01-01'::DATE, '2014-12-01'::DATE); + attach_range_partition +------------------------ + test.range_rel_archive +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-11-15' AND '2015-01-15'; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan using range_rel_archive_dt_idx on range_rel_archive + Index Cond: (dt >= 'Sat Nov 15 00:00:00 2014'::timestamp without time zone) + -> Seq Scan on range_rel_8 + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(6 rows) + +SELECT pathman.detach_range_partition('test.range_rel_archive'); + detach_range_partition +------------------------ + test.range_rel_archive +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-11-15' AND '2015-01-15'; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_8 + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(4 rows) + +CREATE TABLE test.range_rel_test1 ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP, + txt TEXT, + abc INTEGER); +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_test1', '2013-01-01'::DATE, '2014-01-01'::DATE); +ERROR: partition must have a compatible tuple format +CREATE TABLE test.range_rel_test2 ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP); +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_test2', '2013-01-01'::DATE, '2014-01-01'::DATE); +ERROR: column "dt" in child table must be marked NOT NULL +/* Half open ranges */ +SELECT pathman.add_range_partition('test.range_rel', NULL, '2014-12-01'::DATE, 'test.range_rel_minus_infinity'); + add_range_partition +------------------------------- + test.range_rel_minus_infinity +(1 row) + +SELECT pathman.add_range_partition('test.range_rel', '2015-06-01'::DATE, NULL, 'test.range_rel_plus_infinity'); + add_range_partition +------------------------------ + test.range_rel_plus_infinity +(1 row) + +SELECT pathman.append_range_partition('test.range_rel'); +ERROR: Cannot append partition because last partition's range is half open +SELECT pathman.prepend_range_partition('test.range_rel'); +ERROR: Cannot prepend partition because first partition's range is half open +DROP TABLE test.range_rel_minus_infinity; +CREATE TABLE test.range_rel_minus_infinity (LIKE test.range_rel INCLUDING ALL); +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_minus_infinity', NULL, '2014-12-01'::DATE); + attach_range_partition +------------------------------- + test.range_rel_minus_infinity +(1 row) + +SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.range_rel'::REGCLASS; + parent | partition | parttype | expr | range_min | range_max +----------------+-------------------------------+----------+------+--------------------------+-------------------------- + test.range_rel | test.range_rel_minus_infinity | 2 | dt | | Mon Dec 01 00:00:00 2014 + test.range_rel | test.range_rel_8 | 2 | dt | Mon Dec 01 00:00:00 2014 | Thu Jan 01 00:00:00 2015 + test.range_rel | test.range_rel_1 | 2 | dt | Thu Jan 01 00:00:00 2015 | Sun Feb 01 00:00:00 2015 + test.range_rel | test.range_rel_2 | 2 | dt | Sun Feb 01 00:00:00 2015 | Sun Mar 01 00:00:00 2015 + test.range_rel | test.range_rel_3 | 2 | dt | Sun Mar 01 00:00:00 2015 | Wed Apr 01 00:00:00 2015 + test.range_rel | test.range_rel_4 | 2 | dt | Wed Apr 01 00:00:00 2015 | Fri May 01 00:00:00 2015 + test.range_rel | test.range_rel_6 | 2 | dt | Fri May 01 00:00:00 2015 | Mon Jun 01 00:00:00 2015 + test.range_rel | test.range_rel_plus_infinity | 2 | dt | Mon Jun 01 00:00:00 2015 | +(8 rows) + +INSERT INTO test.range_rel (dt) VALUES ('2012-06-15'); +INSERT INTO test.range_rel (dt) VALUES ('2015-12-15'); +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2015-01-01'; + QUERY PLAN +-------------------------------------------- + Append + -> Seq Scan on range_rel_minus_infinity + -> Seq Scan on range_rel_8 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-05-01'; + QUERY PLAN +------------------------------------------- + Append + -> Seq Scan on range_rel_6 + -> Seq Scan on range_rel_plus_infinity +(3 rows) + +/* + * Zero partitions count and adding partitions with specified name + */ +CREATE TABLE test.zero( + id SERIAL PRIMARY KEY, + value INT NOT NULL); +INSERT INTO test.zero SELECT g, g FROM generate_series(1, 100) as g; +SELECT pathman.create_range_partitions('test.zero', 'value', 50, 10, 0); + create_range_partitions +------------------------- + 0 +(1 row) + +SELECT pathman.append_range_partition('test.zero', 'test.zero_0'); +ERROR: relation "zero" has no partitions +SELECT pathman.prepend_range_partition('test.zero', 'test.zero_1'); +ERROR: relation "zero" has no partitions +SELECT pathman.add_range_partition('test.zero', 50, 70, 'test.zero_50'); + add_range_partition +--------------------- + test.zero_50 +(1 row) + +SELECT pathman.append_range_partition('test.zero', 'test.zero_appended'); + append_range_partition +------------------------ + test.zero_appended +(1 row) + +SELECT pathman.prepend_range_partition('test.zero', 'test.zero_prepended'); + prepend_range_partition +------------------------- + test.zero_prepended +(1 row) + +SELECT pathman.split_range_partition('test.zero_50', 60, 'test.zero_60'); + split_range_partition +----------------------- + test."test.zero_60" +(1 row) + +DROP TABLE test.zero CASCADE; +NOTICE: drop cascades to 5 other objects +/* + * Check that altering table columns doesn't break trigger + */ +ALTER TABLE test.hash_rel ADD COLUMN abc int; +INSERT INTO test.hash_rel (id, value, abc) VALUES (123, 456, 789); +SELECT * FROM test.hash_rel WHERE id = 123; + id | value | abc +-----+-------+----- + 123 | 456 | 789 +(1 row) + +/* Test replacing hash partition */ +CREATE TABLE test.hash_rel_extern (LIKE test.hash_rel INCLUDING ALL); +SELECT pathman.replace_hash_partition('test.hash_rel_0', 'test.hash_rel_extern'); + replace_hash_partition +------------------------ + test.hash_rel_extern +(1 row) + +/* Check the consistency of test.hash_rel_0 and test.hash_rel_extern relations */ +EXPLAIN(COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on hash_rel_extern + -> Seq Scan on hash_rel_1 + -> Seq Scan on hash_rel_2 +(4 rows) + +SELECT parent, partition, parttype +FROM pathman.pathman_partition_list +WHERE parent='test.hash_rel'::regclass +ORDER BY 2; + parent | partition | parttype +---------------+----------------------+---------- + test.hash_rel | test.hash_rel_1 | 1 + test.hash_rel | test.hash_rel_2 | 1 + test.hash_rel | test.hash_rel_extern | 1 +(3 rows) + +SELECT c.oid::regclass::text, + array_agg(pg_get_indexdef(i.indexrelid)) AS indexes, + array_agg(pg_get_triggerdef(t.oid)) AS triggers +FROM pg_class c + LEFT JOIN pg_index i ON c.oid=i.indrelid + LEFT JOIN pg_trigger t ON c.oid=t.tgrelid +WHERE c.oid IN ('test.hash_rel_0'::regclass, 'test.hash_rel_extern'::regclass) +GROUP BY 1 ORDER BY 1; + oid | indexes | triggers +----------------------+---------------------------------------------------------------------------------------+---------- + test.hash_rel_0 | {"CREATE UNIQUE INDEX hash_rel_0_pkey ON test.hash_rel_0 USING btree (id)"} | {NULL} + test.hash_rel_extern | {"CREATE UNIQUE INDEX hash_rel_extern_pkey ON test.hash_rel_extern USING btree (id)"} | {NULL} +(2 rows) + +SELECT pathman.is_tuple_convertible('test.hash_rel_0', 'test.hash_rel_extern'); + is_tuple_convertible +---------------------- + t +(1 row) + +INSERT INTO test.hash_rel SELECT * FROM test.hash_rel_0; +DROP TABLE test.hash_rel_0; +/* Table with which we are replacing partition must have exact same structure */ +CREATE TABLE test.hash_rel_wrong( + id INTEGER NOT NULL, + value INTEGER); +SELECT pathman.replace_hash_partition('test.hash_rel_1', 'test.hash_rel_wrong'); +ERROR: column "value" in child table must be marked NOT NULL +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on hash_rel_extern + -> Seq Scan on hash_rel_1 + -> Seq Scan on hash_rel_2 +(4 rows) + +/* + * Clean up + */ +SELECT pathman.drop_partitions('test.hash_rel'); +NOTICE: 3 rows copied from test.hash_rel_1 +NOTICE: 2 rows copied from test.hash_rel_2 +NOTICE: 2 rows copied from test.hash_rel_extern + drop_partitions +----------------- + 3 +(1 row) + +SELECT COUNT(*) FROM ONLY test.hash_rel; + count +------- + 7 +(1 row) + +SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT pathman.drop_partitions('test.hash_rel', TRUE); + drop_partitions +----------------- + 3 +(1 row) + +SELECT COUNT(*) FROM ONLY test.hash_rel; + count +------- + 0 +(1 row) + +DROP TABLE test.hash_rel CASCADE; +SELECT pathman.drop_partitions('test.num_range_rel'); +NOTICE: 999 rows copied from test.num_range_rel_1 +NOTICE: 1000 rows copied from test.num_range_rel_2 +NOTICE: 1000 rows copied from test.num_range_rel_3 + drop_partitions +----------------- + 3 +(1 row) + +DROP TABLE test.num_range_rel CASCADE; +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 10 other objects +/* Test attributes copying */ +CREATE UNLOGGED TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt DATE NOT NULL) +WITH (fillfactor = 70); +INSERT INTO test.range_rel (dt) + SELECT g FROM generate_series('2015-01-01', '2015-02-15', '1 month'::interval) AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2015-01-01'::date, '1 month'::interval); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT reloptions, relpersistence FROM pg_class WHERE oid='test.range_rel'::REGCLASS; + reloptions | relpersistence +-----------------+---------------- + {fillfactor=70} | u +(1 row) + +SELECT reloptions, relpersistence FROM pg_class WHERE oid='test.range_rel_1'::REGCLASS; + reloptions | relpersistence +-----------------+---------------- + {fillfactor=70} | u +(1 row) + +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 3 other objects +/* Test automatic partition creation */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + data TEXT); +SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '10 days'::INTERVAL, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +INSERT INTO test.range_rel (dt) +SELECT generate_series('2015-01-01', '2015-04-30', '1 day'::interval); +INSERT INTO test.range_rel (dt) +SELECT generate_series('2014-12-31', '2014-12-01', '-1 day'::interval); +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt = '2014-12-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_14 + Filter: (dt = 'Mon Dec 15 00:00:00 2014'::timestamp without time zone) +(3 rows) + +SELECT * FROM test.range_rel WHERE dt = '2014-12-15'; + id | dt | data +-----+--------------------------+------ + 137 | Mon Dec 15 00:00:00 2014 | +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt = '2015-03-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_8 + Filter: (dt = 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) +(3 rows) + +SELECT * FROM test.range_rel WHERE dt = '2015-03-15'; + id | dt | data +----+--------------------------+------ + 74 | Sun Mar 15 00:00:00 2015 | +(1 row) + +SELECT pathman.set_auto('test.range_rel', false); + set_auto +---------- + +(1 row) + +INSERT INTO test.range_rel (dt) VALUES ('2015-06-01'); +ERROR: no suitable partition for key 'Mon Jun 01 00:00:00 2015' +SELECT pathman.set_auto('test.range_rel', true); + set_auto +---------- + +(1 row) + +INSERT INTO test.range_rel (dt) VALUES ('2015-06-01'); +/* + * Test auto removing record from config on table DROP (but not on column drop + * as it used to be before version 1.2) + */ +ALTER TABLE test.range_rel DROP COLUMN data; +SELECT * FROM pathman.pathman_config; + partrel | expr | parttype | range_interval | cooked_expr +----------------+------+----------+----------------+------------------------------------------------------------------------------------------------------------------------- + test.range_rel | dt | 2 | @ 10 days | {VAR :varno 1 :varattno 2 :vartype 1114 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 2 :location 8} +(1 row) + +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 21 other objects +SELECT * FROM pathman.pathman_config; + partrel | expr | parttype | range_interval | cooked_expr +---------+------+----------+----------------+------------- +(0 rows) + +/* Check overlaps */ +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 1000, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 4001, 5000); +ERROR: specified range [4001, 5000) overlaps with existing partitions +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 4000, 5000); +ERROR: specified range [4000, 5000) overlaps with existing partitions +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 3999, 5000); +ERROR: specified range [3999, 5000) overlaps with existing partitions +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 3000, 3500); +ERROR: specified range [3000, 3500) overlaps with existing partitions +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 0, 999); + check_range_available +----------------------- + +(1 row) + +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 0, 1000); + check_range_available +----------------------- + +(1 row) + +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 0, 1001); +ERROR: specified range [0, 1001) overlaps with existing partitions +/* CaMeL cAsE table names and attributes */ +CREATE TABLE test."TeSt" (a INT NOT NULL, b INT); +SELECT pathman.create_hash_partitions('test.TeSt', 'a', 3); +ERROR: relation "test.test" does not exist at character 39 +SELECT pathman.create_hash_partitions('test."TeSt"', 'a', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +INSERT INTO test."TeSt" VALUES (1, 1); +INSERT INTO test."TeSt" VALUES (2, 2); +INSERT INTO test."TeSt" VALUES (3, 3); +SELECT * FROM test."TeSt"; + a | b +---+--- + 3 | 3 + 2 | 2 + 1 | 1 +(3 rows) + +DROP TABLE test."TeSt" CASCADE; +NOTICE: drop cascades to 3 other objects +CREATE TABLE test."RangeRel" ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +INSERT INTO test."RangeRel" (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-01-03', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test."RangeRel"', 'dt', '2015-01-01'::DATE, '1 day'::INTERVAL); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT pathman.append_range_partition('test."RangeRel"'); + append_range_partition +------------------------ + test."RangeRel_4" +(1 row) + +SELECT pathman.prepend_range_partition('test."RangeRel"'); + prepend_range_partition +------------------------- + test."RangeRel_5" +(1 row) + +SELECT pathman.merge_range_partitions('test."RangeRel_1"', 'test."RangeRel_' || currval('test."RangeRel_seq"') || '"'); + merge_range_partitions +------------------------ + test."RangeRel_1" +(1 row) + +SELECT pathman.split_range_partition('test."RangeRel_1"', '2015-01-01'::DATE); + split_range_partition +----------------------- + test."RangeRel_6" +(1 row) + +DROP TABLE test."RangeRel" CASCADE; +NOTICE: drop cascades to 6 other objects +SELECT * FROM pathman.pathman_config; + partrel | expr | parttype | range_interval | cooked_expr +--------------------+------+----------+----------------+----------------------------------------------------------------------------------------------------------------------- + test.num_range_rel | id | 2 | 1000 | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} +(1 row) + +CREATE TABLE test."RangeRel" ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +SELECT pathman.create_range_partitions('test."RangeRel"', 'id', 1, 100, 3); + create_range_partitions +------------------------- + 3 +(1 row) + +DROP TABLE test."RangeRel" CASCADE; +NOTICE: drop cascades to 4 other objects +DROP EXTENSION pg_pathman; +/* Test that everything works fine without schemas */ +CREATE EXTENSION pg_pathman; +/* Hash */ +CREATE TABLE test.hash_rel ( + id SERIAL PRIMARY KEY, + value INTEGER NOT NULL); +INSERT INTO test.hash_rel (value) SELECT g FROM generate_series(1, 10000) as g; +SELECT create_hash_partitions('test.hash_rel', 'value', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE id = 1234; + QUERY PLAN +------------------------------------------------------ + Append + -> Index Scan using hash_rel_0_pkey on hash_rel_0 + Index Cond: (id = 1234) + -> Index Scan using hash_rel_1_pkey on hash_rel_1 + Index Cond: (id = 1234) + -> Index Scan using hash_rel_2_pkey on hash_rel_2 + Index Cond: (id = 1234) +(7 rows) + +/* Range */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + value INTEGER); +INSERT INTO test.range_rel (dt, value) SELECT g, extract(day from g) FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') as g; +SELECT create_range_partitions('test.range_rel', 'dt', '2010-01-01'::date, '1 month'::interval, 12); + create_range_partitions +------------------------- + 12 +(1 row) + +SELECT merge_range_partitions('test.range_rel_1', 'test.range_rel_2'); + merge_range_partitions +------------------------ + test.range_rel_1 +(1 row) + +SELECT split_range_partition('test.range_rel_1', '2010-02-15'::date); + split_range_partition +----------------------- + test.range_rel_13 +(1 row) + +SELECT append_range_partition('test.range_rel'); + append_range_partition +------------------------ + test.range_rel_14 +(1 row) + +SELECT prepend_range_partition('test.range_rel'); + prepend_range_partition +------------------------- + test.range_rel_15 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2010-03-01'; + QUERY PLAN +-------------------------------- + Append + -> Seq Scan on range_rel_15 + -> Seq Scan on range_rel_1 + -> Seq Scan on range_rel_13 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2010-12-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_12 + Filter: (dt > 'Wed Dec 15 00:00:00 2010'::timestamp without time zone) + -> Seq Scan on range_rel_14 +(4 rows) + +/* Create range partitions from whole range */ +SELECT drop_partitions('test.range_rel'); +NOTICE: 45 rows copied from test.range_rel_1 +NOTICE: 31 rows copied from test.range_rel_3 +NOTICE: 30 rows copied from test.range_rel_4 +NOTICE: 31 rows copied from test.range_rel_5 +NOTICE: 30 rows copied from test.range_rel_6 +NOTICE: 31 rows copied from test.range_rel_7 +NOTICE: 31 rows copied from test.range_rel_8 +NOTICE: 30 rows copied from test.range_rel_9 +NOTICE: 31 rows copied from test.range_rel_10 +NOTICE: 30 rows copied from test.range_rel_11 +NOTICE: 31 rows copied from test.range_rel_12 +NOTICE: 14 rows copied from test.range_rel_13 +NOTICE: 0 rows copied from test.range_rel_14 +NOTICE: 0 rows copied from test.range_rel_15 + drop_partitions +----------------- + 14 +(1 row) + +/* Test NOT operator */ +CREATE TABLE bool_test(a INT NOT NULL, b BOOLEAN); +SELECT create_hash_partitions('bool_test', 'a', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +INSERT INTO bool_test SELECT g, (g % 4) = 0 FROM generate_series(1, 100) AS g; +SELECT count(*) FROM bool_test; + count +------- + 100 +(1 row) + +SELECT count(*) FROM bool_test WHERE (b = true AND b = false); + count +------- + 0 +(1 row) + +SELECT count(*) FROM bool_test WHERE b = false; /* 75 values */ + count +------- + 75 +(1 row) + +SELECT count(*) FROM bool_test WHERE b = true; /* 25 values */ + count +------- + 25 +(1 row) + +DROP TABLE bool_test CASCADE; +NOTICE: drop cascades to 3 other objects +/* Special test case (quals generation) -- fixing commit f603e6c5 */ +CREATE TABLE test.special_case_1_ind_o_s(val serial, comment text); +INSERT INTO test.special_case_1_ind_o_s SELECT generate_series(1, 200), NULL; +SELECT create_range_partitions('test.special_case_1_ind_o_s', 'val', 1, 50); + create_range_partitions +------------------------- + 4 +(1 row) + +INSERT INTO test.special_case_1_ind_o_s_2 SELECT 75 FROM generate_series(1, 6000); +CREATE INDEX ON test.special_case_1_ind_o_s_2 (val, comment); +VACUUM ANALYZE test.special_case_1_ind_o_s_2; +EXPLAIN (COSTS OFF) SELECT * FROM test.special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on special_case_1_ind_o_s_1 + Filter: (comment = 'a'::text) + -> Index Only Scan using special_case_1_ind_o_s_2_val_comment_idx on special_case_1_ind_o_s_2 + Index Cond: ((val < 75) AND (comment = 'a'::text)) +(5 rows) + +SELECT set_enable_parent('test.special_case_1_ind_o_s', true); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on special_case_1_ind_o_s + Filter: ((val < 75) AND (comment = 'a'::text)) + -> Seq Scan on special_case_1_ind_o_s_1 + Filter: (comment = 'a'::text) + -> Index Only Scan using special_case_1_ind_o_s_2_val_comment_idx on special_case_1_ind_o_s_2 + Index Cond: ((val < 75) AND (comment = 'a'::text)) +(7 rows) + +SELECT set_enable_parent('test.special_case_1_ind_o_s', false); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on special_case_1_ind_o_s_1 + Filter: (comment = 'a'::text) + -> Index Only Scan using special_case_1_ind_o_s_2_val_comment_idx on special_case_1_ind_o_s_2 + Index Cond: ((val < 75) AND (comment = 'a'::text)) +(5 rows) + +/* Test index scans on child relation under enable_parent is set */ +CREATE TABLE test.index_on_childs(c1 integer not null, c2 integer); +CREATE INDEX ON test.index_on_childs(c2); +INSERT INTO test.index_on_childs SELECT i, (random()*10000)::integer FROM generate_series(1, 10000) i; +SELECT create_range_partitions('test.index_on_childs', 'c1', 1, 1000, 0, false); + create_range_partitions +------------------------- + 0 +(1 row) + +SELECT add_range_partition('test.index_on_childs', 1, 1000, 'test.index_on_childs_1_1k'); + add_range_partition +--------------------------- + test.index_on_childs_1_1k +(1 row) + +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_1k_2k'); + append_range_partition +---------------------------- + test.index_on_childs_1k_2k +(1 row) + +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_2k_3k'); + append_range_partition +---------------------------- + test.index_on_childs_2k_3k +(1 row) + +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_3k_4k'); + append_range_partition +---------------------------- + test.index_on_childs_3k_4k +(1 row) + +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_4k_5k'); + append_range_partition +---------------------------- + test.index_on_childs_4k_5k +(1 row) + +SELECT set_enable_parent('test.index_on_childs', true); + set_enable_parent +------------------- + +(1 row) + +VACUUM ANALYZE test.index_on_childs; +EXPLAIN (COSTS OFF) SELECT * FROM test.index_on_childs WHERE c1 > 100 AND c1 < 2500 AND c2 = 500; + QUERY PLAN +------------------------------------------------------------------------------ + Append + -> Index Scan using index_on_childs_c2_idx on index_on_childs + Index Cond: (c2 = 500) + Filter: ((c1 > 100) AND (c1 < 2500)) + -> Index Scan using index_on_childs_1_1k_c2_idx on index_on_childs_1_1k + Index Cond: (c2 = 500) + Filter: (c1 > 100) + -> Index Scan using index_on_childs_1k_2k_c2_idx on index_on_childs_1k_2k + Index Cond: (c2 = 500) + -> Index Scan using index_on_childs_2k_3k_c2_idx on index_on_childs_2k_3k + Index Cond: (c2 = 500) + Filter: (c1 < 2500) +(12 rows) + +/* Test create_range_partitions() + partition_names */ +CREATE TABLE test.provided_part_names(id INT NOT NULL); +INSERT INTO test.provided_part_names SELECT generate_series(1, 10); +SELECT create_hash_partitions('test.provided_part_names', 'id', 2, + partition_names := ARRAY['p1', 'p2']::TEXT[]); /* ok */ + create_hash_partitions +------------------------ + 2 +(1 row) + +/* list partitions */ +SELECT partition FROM pathman_partition_list +WHERE parent = 'test.provided_part_names'::REGCLASS +ORDER BY partition; + partition +----------- + p1 + p2 +(2 rows) + +DROP TABLE test.provided_part_names CASCADE; +NOTICE: drop cascades to 2 other objects +DROP SCHEMA test CASCADE; +NOTICE: drop cascades to 28 other objects +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman CASCADE; From 2ee5d316155b5445a2c67b01a28cd034f2593d1d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 11 Sep 2018 18:47:21 +0300 Subject: [PATCH 334/528] build FDW in hardcore mode --- run_tests.sh | 7 ++++++- tests/python/partitioning_test.py | 9 +++------ 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/run_tests.sh b/run_tests.sh index d0581e7f..82d1f9d3 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -45,13 +45,18 @@ if [ "$LEVEL" = "hardcore" ] || \ # enable additional options ./configure \ - CFLAGS='-O0 -ggdb3 -fno-omit-frame-pointer' \ + CFLAGS='-Og -ggdb3 -fno-omit-frame-pointer' \ --enable-cassert \ --prefix=$CUSTOM_PG_BIN \ --quiet + # build & install PG time make -s -j$(nproc) && make -s install + # build & install FDW + time make -s -C contrib/postgres_fdw -j$(nproc) && \ + make -s -C contrib/postgres_fdw install + # override default PostgreSQL instance export PATH=$CUSTOM_PG_BIN/bin:$PATH export LD_LIBRARY_PATH=$CUSTOM_PG_BIN/lib diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index e234f7ff..f2b2ea51 100644 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -76,10 +76,7 @@ def is_postgres_fdw_ready(): select count(*) from pg_available_extensions where name = 'postgres_fdw' """) - if result[0][0] > 0: - return True - - return False + return result[0][0] > 0 class Tests(unittest.TestCase): @@ -334,7 +331,7 @@ def check_tablespace(node, tablename, tablespace): self.assertTrue(check_tablespace(node, 'abc_added_2', 'pg_default')) self.assertTrue(check_tablespace(node, 'abc_splitted_2', 'pg_default')) - @unittest.skipUnless(is_postgres_fdw_ready(), 'might be missing') + @unittest.skipUnless(is_postgres_fdw_ready(), 'FDW might be missing') def test_foreign_table(self): """ Test foreign tables """ @@ -427,7 +424,7 @@ def test_foreign_table(self): b'1|\n2|\n5|\n6|\n8|\n9|\n3|\n4|\n7|\n10|\n') master.safe_psql("select drop_partitions('hash_test')") - @unittest.skipUnless(is_postgres_fdw_ready(), 'might be missing') + @unittest.skipUnless(is_postgres_fdw_ready(), 'FDW might be missing') def test_parallel_nodes(self): """ Test parallel queries under partitions """ From 801f2ae4b9114af012a1ee2f59da60f42d44419c Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 11 Sep 2018 20:53:13 +0300 Subject: [PATCH 335/528] PartitionRouter supports AFTER STATEMENT triggers --- expected/pathman_update_triggers.out | 90 +++++++++++++++++++++- sql/pathman_update_triggers.sql | 76 ++++++++++++++++++- src/include/partition_router.h | 11 ++- src/partition_filter.c | 3 +- src/partition_router.c | 107 +++++++++++++++++++-------- 5 files changed, 249 insertions(+), 38 deletions(-) diff --git a/expected/pathman_update_triggers.out b/expected/pathman_update_triggers.out index 6b366fb3..d5c92b9f 100644 --- a/expected/pathman_update_triggers.out +++ b/expected/pathman_update_triggers.out @@ -22,6 +22,76 @@ begin return new; end if; end; $$ language plpgsql; +/* Enable our precious custom node */ +set pg_pathman.enable_partitionrouter = t; +/* + * Statement level triggers + */ +create trigger bus before update ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger bds before delete ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger bis before insert ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger aus after update ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger ads after delete ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger ais after insert ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger bus before update ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger bds before delete ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger bis before insert ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger aus after update ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger ads after delete ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger ais after insert ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger bus before update ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger bds before delete ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger bis before insert ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger aus after update ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger ads after delete ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger ais after insert ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +/* multiple values */ +insert into test_update_triggers.test select generate_series(1, 200); +NOTICE: BEFORE INSERT STATEMENT (test) +NOTICE: AFTER INSERT STATEMENT (test) +update test_update_triggers.test set val = val + 1; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: AFTER UPDATE STATEMENT (test) +update test_update_triggers.test set val = val + 1; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: AFTER UPDATE STATEMENT (test) +update test_update_triggers.test set val = val + 1; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: AFTER UPDATE STATEMENT (test) +update test_update_triggers.test set val = val + 1; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: AFTER UPDATE STATEMENT (test) +update test_update_triggers.test set val = val + 1; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: AFTER UPDATE STATEMENT (test) +select count(distinct val) from test_update_triggers.test; + count +------- + 200 +(1 row) + +truncate test_update_triggers.test; +/* + * Row level triggers + */ create trigger bu before update ON test_update_triggers.test_1 for each row execute procedure test_update_triggers.test_trigger (); create trigger bd before delete ON test_update_triggers.test_1 @@ -46,56 +116,74 @@ create trigger ad after delete ON test_update_triggers.test_2 for each row execute procedure test_update_triggers.test_trigger (); create trigger ai after insert ON test_update_triggers.test_2 for each row execute procedure test_update_triggers.test_trigger (); +/* single value */ insert into test_update_triggers.test values (1); +NOTICE: BEFORE INSERT STATEMENT (test) NOTICE: BEFORE INSERT ROW (test_1) NOTICE: AFTER INSERT ROW (test_1) -set pg_pathman.enable_partitionrouter = t; +NOTICE: AFTER INSERT STATEMENT (test) update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE STATEMENT (test) NOTICE: BEFORE UPDATE ROW (test_1) NOTICE: AFTER UPDATE ROW (test_1) +NOTICE: AFTER UPDATE STATEMENT (test) val | tableoid -----+----------------------------- 2 | test_update_triggers.test_1 (1 row) update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE STATEMENT (test) NOTICE: BEFORE UPDATE ROW (test_1) NOTICE: BEFORE DELETE ROW (test_1) NOTICE: BEFORE INSERT ROW (test_2) NOTICE: AFTER DELETE ROW (test_1) NOTICE: AFTER INSERT ROW (test_2) +NOTICE: AFTER UPDATE STATEMENT (test) val | tableoid -----+----------------------------- 3 | test_update_triggers.test_2 (1 row) update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE STATEMENT (test) NOTICE: BEFORE UPDATE ROW (test_2) NOTICE: AFTER UPDATE ROW (test_2) +NOTICE: AFTER UPDATE STATEMENT (test) val | tableoid -----+----------------------------- 4 | test_update_triggers.test_2 (1 row) update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE STATEMENT (test) NOTICE: BEFORE UPDATE ROW (test_2) NOTICE: BEFORE DELETE ROW (test_2) NOTICE: BEFORE INSERT ROW (test_1) NOTICE: AFTER DELETE ROW (test_2) NOTICE: AFTER INSERT ROW (test_1) +NOTICE: AFTER UPDATE STATEMENT (test) val | tableoid -----+----------------------------- 5 | test_update_triggers.test_1 (1 row) update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE STATEMENT (test) NOTICE: BEFORE UPDATE ROW (test_1) NOTICE: AFTER UPDATE ROW (test_1) +NOTICE: AFTER UPDATE STATEMENT (test) val | tableoid -----+----------------------------- 6 | test_update_triggers.test_1 (1 row) +select count(distinct val) from test_update_triggers.test; + count +------- + 1 +(1 row) + DROP SCHEMA test_update_triggers CASCADE; NOTICE: drop cascades to 4 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_update_triggers.sql b/sql/pathman_update_triggers.sql index c289d12c..e8405acb 100644 --- a/sql/pathman_update_triggers.sql +++ b/sql/pathman_update_triggers.sql @@ -25,6 +25,79 @@ begin $$ language plpgsql; +/* Enable our precious custom node */ +set pg_pathman.enable_partitionrouter = t; + + +/* + * Statement level triggers + */ + +create trigger bus before update ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger bds before delete ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger bis before insert ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); + + +create trigger aus after update ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger ads after delete ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger ais after insert ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); + + +create trigger bus before update ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger bds before delete ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger bis before insert ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); + +create trigger aus after update ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger ads after delete ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger ais after insert ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); + + +create trigger bus before update ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger bds before delete ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger bis before insert ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); + +create trigger aus after update ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger ads after delete ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger ais after insert ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); + + +/* multiple values */ +insert into test_update_triggers.test select generate_series(1, 200); + +update test_update_triggers.test set val = val + 1; +update test_update_triggers.test set val = val + 1; +update test_update_triggers.test set val = val + 1; +update test_update_triggers.test set val = val + 1; +update test_update_triggers.test set val = val + 1; + +select count(distinct val) from test_update_triggers.test; + + +truncate test_update_triggers.test; + + +/* + * Row level triggers + */ + create trigger bu before update ON test_update_triggers.test_1 for each row execute procedure test_update_triggers.test_trigger (); create trigger bd before delete ON test_update_triggers.test_1 @@ -55,15 +128,16 @@ create trigger ai after insert ON test_update_triggers.test_2 for each row execute procedure test_update_triggers.test_trigger (); +/* single value */ insert into test_update_triggers.test values (1); -set pg_pathman.enable_partitionrouter = t; update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +select count(distinct val) from test_update_triggers.test; DROP SCHEMA test_update_triggers CASCADE; diff --git a/src/include/partition_router.h b/src/include/partition_router.h index 79ae71a3..683af938 100644 --- a/src/include/partition_router.h +++ b/src/include/partition_router.h @@ -35,11 +35,18 @@ typedef struct PartitionRouterState JunkFilter *junkfilter; /* 'ctid' extraction facility */ ResultRelInfo *current_rri; + /* Machinery required for EvalPlanQual */ EPQState epqstate; int epqparam; - ModifyTableState *mt_state; /* need this for a GREAT deal of hackery */ - TupleTableSlot *saved_slot; + /* Preserved slot from last call */ + bool yielded; + TupleTableSlot *yielded_slot; + + /* Need these for a GREAT deal of hackery */ + ModifyTableState *mt_state; + bool update_stmt_triggers, + insert_stmt_triggers; } PartitionRouterState; diff --git a/src/partition_filter.c b/src/partition_filter.c index 9850dde1..098a72a5 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -801,8 +801,7 @@ partition_filter_end(CustomScanState *node) void partition_filter_rescan(CustomScanState *node) { - Assert(list_length(node->custom_ps) == 1); - ExecReScan((PlanState *) linitial(node->custom_ps)); + elog(ERROR, "partition_filter_rescan is not implemented"); } void diff --git a/src/partition_router.c b/src/partition_router.c index 64feddd9..b746765e 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -26,8 +26,38 @@ #include "utils/rel.h" +/* Highlight hacks with ModifyTable's fields */ #define MTHackField(mt_state, field) ( (mt_state)->field ) +/* Is current plan the last one? */ +#define MTIsLastPlan(mt_state) ( (mt_state)->mt_whichplan == (mt_state)->mt_nplans - 1 ) + + +#define MTDisableStmtTriggers(mt_state, pr_state) \ + do { \ + TriggerDesc *triggers = (mt_state)->resultRelInfo->ri_TrigDesc; \ + \ + if (triggers) \ + { \ + (pr_state)->insert_stmt_triggers |= triggers->trig_insert_after_statement; \ + (pr_state)->update_stmt_triggers |= triggers->trig_update_after_statement; \ + triggers->trig_insert_after_statement = false; \ + triggers->trig_update_after_statement = false; \ + } \ + } while (0) + +#define MTEnableStmtTriggers(mt_state, pr_state) \ + do { \ + TriggerDesc *triggers = (mt_state)->resultRelInfo->ri_TrigDesc; \ + \ + if (triggers) \ + { \ + triggers->trig_insert_after_statement = (pr_state)->insert_stmt_triggers; \ + triggers->trig_update_after_statement = (pr_state)->update_stmt_triggers; \ + } \ + } while (0) + + bool pg_pathman_enable_partition_router = true; @@ -47,7 +77,7 @@ static TupleTableSlot *router_set_slot(PartitionRouterState *state, static TupleTableSlot *router_get_slot(PartitionRouterState *state, bool *should_process); -static void router_lazy_init_junkfilter(PartitionRouterState *state, EState *estate); +static void router_lazy_init_junkfilter(PartitionRouterState *state); static void router_lazy_init_constraint(PartitionRouterState *state); static ItemPointerData router_extract_ctid(PartitionRouterState *state, @@ -56,8 +86,7 @@ static ItemPointerData router_extract_ctid(PartitionRouterState *state, static TupleTableSlot *router_lock_or_delete_tuple(PartitionRouterState *state, TupleTableSlot *slot, ItemPointer tupleid, - bool *deleted, - EState *estate); + bool *deleted); void init_partition_router_static_data(void) @@ -213,7 +242,7 @@ partition_router_exec(CustomScanState *node) ItemPointerSetInvalid(&ctid); /* Build new junkfilter lazily */ - router_lazy_init_junkfilter(state, estate); + router_lazy_init_junkfilter(state); /* Build recheck constraint state lazily */ router_lazy_init_constraint(state); @@ -226,8 +255,8 @@ partition_router_exec(CustomScanState *node) /* Lock or delete tuple from old partition */ Assert(ItemPointerIsValid(&ctid)); - slot = router_lock_or_delete_tuple(state, slot, &ctid, - &deleted, estate); + slot = router_lock_or_delete_tuple(state, slot, + &ctid, &deleted); /* We require a tuple (previous one has vanished) */ if (TupIsNull(slot)) @@ -257,12 +286,7 @@ partition_router_end(CustomScanState *node) void partition_router_rescan(CustomScanState *node) { - PartitionRouterState *state = (PartitionRouterState *) node; - - Assert(list_length(node->custom_ps) == 1); - ExecReScan((PlanState *) linitial(node->custom_ps)); - - state->saved_slot = NULL; + elog(ERROR, "partition_router_rescan is not implemented"); } void @@ -313,18 +337,15 @@ router_run_modify_table(PlanState *state) return slot; } -/* Return tuple OR stash it and change ModifyTable's operation */ +/* Return tuple OR yield it and change ModifyTable's operation */ static TupleTableSlot * router_set_slot(PartitionRouterState *state, TupleTableSlot *slot, CmdType operation) { - ModifyTableState *mt_state = state->mt_state; - - /* Check invariants */ - Assert(!TupIsNull(slot)); - Assert(state->junkfilter); + ModifyTableState *mt_state = state->mt_state; + /* Fast path for correct operation type */ if (mt_state->operation == operation) return slot; @@ -332,36 +353,58 @@ router_set_slot(PartitionRouterState *state, MTHackField(mt_state, mt_nplans) = -mt_state->mt_whichplan; MTHackField(mt_state, operation) = operation; - /* HACK: conditionally disable junk filter in result relation */ - state->current_rri->ri_junkFilter = (operation == CMD_UPDATE) ? - state->junkfilter : - NULL; + /* HACK: disable AFTER STATEMENT triggers */ + MTDisableStmtTriggers(mt_state, state); + + if (!TupIsNull(slot)) + { + /* We should've cached junk filter already */ + Assert(state->junkfilter); + + /* HACK: conditionally disable junk filter in result relation */ + state->current_rri->ri_junkFilter = (operation == CMD_UPDATE) ? + state->junkfilter : + NULL; - /* Set saved_slot and yield */ - state->saved_slot = slot; + /* Don't forget to set saved_slot! */ + state->yielded_slot = slot; + } + + /* Yield */ + state->yielded = true; return NULL; } -/* Fetch next tuple (either fresh or stashed) */ +/* Fetch next tuple (either fresh or yielded) */ static TupleTableSlot * router_get_slot(PartitionRouterState *state, bool *should_process) { TupleTableSlot *slot; - if (!TupIsNull(state->saved_slot)) + /* Do we have a preserved slot? */ + if (state->yielded) { - /* Reset saved_slot */ - slot = state->saved_slot; - state->saved_slot = NULL; + /* HACK: enable AFTER STATEMENT triggers */ + MTEnableStmtTriggers(state->mt_state, state); + + /* Reset saved slot */ + slot = state->yielded_slot; + state->yielded_slot = NULL; + state->yielded = false; /* We shouldn't process preserved slot... */ *should_process = false; } else { + /* Fetch next tuple */ slot = ExecProcNode((PlanState *) linitial(state->css.custom_ps)); + /* Restore operation type for AFTER STATEMENT triggers */ + if (TupIsNull(slot) && MTIsLastPlan(state->mt_state)) + slot = router_set_slot(state, NULL, CMD_UPDATE); + /* But we have to process non-empty slot */ *should_process = !TupIsNull(slot); } @@ -370,7 +413,7 @@ router_get_slot(PartitionRouterState *state, } static void -router_lazy_init_junkfilter(PartitionRouterState *state, EState *estate) +router_lazy_init_junkfilter(PartitionRouterState *state) { if (state->junkfilter == NULL) state->junkfilter = state->current_rri->ri_junkFilter; @@ -443,12 +486,12 @@ static TupleTableSlot * router_lock_or_delete_tuple(PartitionRouterState *state, TupleTableSlot *slot, ItemPointer tupleid, - bool *deleted, /* return value #1 */ - EState *estate) + bool *deleted /* return value #1 */) { ResultRelInfo *rri; Relation rel; + EState *estate = state->css.ss.ps.state; ExprContext *econtext = GetPerTupleExprContext(estate); ExprState *constraint = state->constraint; From 45e040aacc8fc719292d746fd52b34e7728e4c10 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 12 Sep 2018 13:29:42 +0300 Subject: [PATCH 336/528] fix memory issues found by Valgrind (reset state after each subplan) --- src/partition_router.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/partition_router.c b/src/partition_router.c index b746765e..55331bff 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -29,9 +29,6 @@ /* Highlight hacks with ModifyTable's fields */ #define MTHackField(mt_state, field) ( (mt_state)->field ) -/* Is current plan the last one? */ -#define MTIsLastPlan(mt_state) ( (mt_state)->mt_whichplan == (mt_state)->mt_nplans - 1 ) - #define MTDisableStmtTriggers(mt_state, pr_state) \ do { \ @@ -402,7 +399,7 @@ router_get_slot(PartitionRouterState *state, slot = ExecProcNode((PlanState *) linitial(state->css.custom_ps)); /* Restore operation type for AFTER STATEMENT triggers */ - if (TupIsNull(slot) && MTIsLastPlan(state->mt_state)) + if (TupIsNull(slot)) slot = router_set_slot(state, NULL, CMD_UPDATE); /* But we have to process non-empty slot */ From 431b316a08c7747f2193ed058e432fdaa2731421 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 12 Sep 2018 13:52:45 +0300 Subject: [PATCH 337/528] PG 11: check moved rows in router_lock_or_delete_tuple() --- src/include/compat/pg_compat.h | 10 ++++++++++ src/partition_router.c | 4 ++++ 2 files changed, 14 insertions(+) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 6f748eb1..1ae1b33b 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -529,6 +529,16 @@ char get_rel_persistence(Oid relid); #endif +/* + * ItemPointerIndicatesMovedPartitions() + * + * supported since v11, provide a stub for previous versions. + */ +#if PG_VERSION_NUM < 110000 +#define ItemPointerIndicatesMovedPartitions(ctid) ( false ) +#endif + + /* * make_restrictinfo() */ diff --git a/src/partition_router.c b/src/partition_router.c index 55331bff..7459315a 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -575,6 +575,10 @@ router_lock_or_delete_tuple(PartitionRouterState *state, ereport(ERROR, (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), errmsg("could not serialize access due to concurrent update"))); + if (ItemPointerIndicatesMovedPartitions(&hufd.ctid)) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("tuple to be updated was already moved to another partition due to concurrent update"))); if (!ItemPointerEquals(tupleid, &hufd.ctid)) { From 1817d26fd099dc6249a635ee315d38e1d0891507 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Wed, 12 Sep 2018 16:34:49 +0300 Subject: [PATCH 338/528] Add more tests on dropped columns (issue #174) --- expected/pathman_dropped_cols.out | 126 ++++++++++++++++++++++++++++++ sql/pathman_dropped_cols.sql | 61 +++++++++++++++ 2 files changed, 187 insertions(+) diff --git a/expected/pathman_dropped_cols.out b/expected/pathman_dropped_cols.out index 89585b52..7c9e2806 100644 --- a/expected/pathman_dropped_cols.out +++ b/expected/pathman_dropped_cols.out @@ -80,5 +80,131 @@ select pg_get_constraintdef(oid, true) from pg_constraint where conname = 'pathm drop table test_hash cascade; NOTICE: drop cascades to 3 other objects +-- Yury Smirnov case +CREATE TABLE root_dict ( + id BIGSERIAL PRIMARY KEY NOT NULL, + root_id BIGINT NOT NULL, + start_date DATE, + num TEXT, + main TEXT, + dict_code TEXT, + dict_name TEXT, + edit_num TEXT, + edit_date DATE, + sign CHAR(4) +); +CREATE INDEX "root_dict_root_id_idx" ON "root_dict" ("root_id"); +DO +$$ +DECLARE + r RECORD; +BEGIN + FOR r IN SELECT * FROM generate_series(1, 3) r + LOOP + FOR d IN 1..2 LOOP + INSERT INTO root_dict (root_id, start_date, num, main, dict_code, dict_name, edit_num, edit_date, sign) VALUES + (r.r, now(), 'num_' || d, (d % 2) + 1, 'code_' || d, 'name_' || d, NULL, NULL, '2014'); + END LOOP; + END LOOP; +END +$$; +ALTER TABLE root_dict ADD COLUMN dict_id BIGINT DEFAULT 3; +ALTER TABLE root_dict DROP COLUMN dict_code, + DROP COLUMN dict_name, + DROP COLUMN sign; +CREATE EXTENSION pg_pathman; +ERROR: extension "pg_pathman" already exists +SELECT create_hash_partitions('root_dict' :: REGCLASS, + 'root_id', + 3, + true); + create_hash_partitions +------------------------ + 3 +(1 row) + +VACUUM FULL ANALYZE "root_dict"; +SELECT set_enable_parent('root_dict' :: REGCLASS, FALSE); + set_enable_parent +------------------- + +(1 row) + +PREPARE getbyroot AS +SELECT + id, root_id, start_date, num, main, edit_num, edit_date, dict_id +FROM root_dict +WHERE root_id = $1; +EXECUTE getbyroot(2); + id | root_id | start_date | num | main | edit_num | edit_date | dict_id +----+---------+------------+-------+------+----------+-----------+--------- + 3 | 2 | 09-12-2018 | num_1 | 2 | | | 3 + 4 | 2 | 09-12-2018 | num_2 | 1 | | | 3 +(2 rows) + +EXECUTE getbyroot(2); + id | root_id | start_date | num | main | edit_num | edit_date | dict_id +----+---------+------------+-------+------+----------+-----------+--------- + 3 | 2 | 09-12-2018 | num_1 | 2 | | | 3 + 4 | 2 | 09-12-2018 | num_2 | 1 | | | 3 +(2 rows) + +EXECUTE getbyroot(2); + id | root_id | start_date | num | main | edit_num | edit_date | dict_id +----+---------+------------+-------+------+----------+-----------+--------- + 3 | 2 | 09-12-2018 | num_1 | 2 | | | 3 + 4 | 2 | 09-12-2018 | num_2 | 1 | | | 3 +(2 rows) + +EXECUTE getbyroot(2); + id | root_id | start_date | num | main | edit_num | edit_date | dict_id +----+---------+------------+-------+------+----------+-----------+--------- + 3 | 2 | 09-12-2018 | num_1 | 2 | | | 3 + 4 | 2 | 09-12-2018 | num_2 | 1 | | | 3 +(2 rows) + +EXECUTE getbyroot(2); + id | root_id | start_date | num | main | edit_num | edit_date | dict_id +----+---------+------------+-------+------+----------+-----------+--------- + 3 | 2 | 09-12-2018 | num_1 | 2 | | | 3 + 4 | 2 | 09-12-2018 | num_2 | 1 | | | 3 +(2 rows) + +-- errors usually start here +EXECUTE getbyroot(2); + id | root_id | start_date | num | main | edit_num | edit_date | dict_id +----+---------+------------+-------+------+----------+-----------+--------- + 3 | 2 | 09-12-2018 | num_1 | 2 | | | 3 + 4 | 2 | 09-12-2018 | num_2 | 1 | | | 3 +(2 rows) + +EXECUTE getbyroot(2); + id | root_id | start_date | num | main | edit_num | edit_date | dict_id +----+---------+------------+-------+------+----------+-----------+--------- + 3 | 2 | 09-12-2018 | num_1 | 2 | | | 3 + 4 | 2 | 09-12-2018 | num_2 | 1 | | | 3 +(2 rows) + +EXPLAIN EXECUTE getbyroot(2); + QUERY PLAN +-------------------------------------------------------------------------------------------- + Custom Scan (RuntimeAppend) (cost=4.17..11.28 rows=3 width=128) + Prune by: (root_dict.root_id = $1) + -> Bitmap Heap Scan on root_dict_0 root_dict (cost=4.17..11.28 rows=3 width=128) + Recheck Cond: (root_id = $1) + -> Bitmap Index Scan on root_dict_0_root_id_idx (cost=0.00..4.17 rows=3 width=0) + Index Cond: (root_id = $1) + -> Bitmap Heap Scan on root_dict_1 root_dict (cost=4.17..11.28 rows=3 width=128) + Recheck Cond: (root_id = $1) + -> Bitmap Index Scan on root_dict_1_root_id_idx (cost=0.00..4.17 rows=3 width=0) + Index Cond: (root_id = $1) + -> Bitmap Heap Scan on root_dict_2 root_dict (cost=4.17..11.28 rows=3 width=128) + Recheck Cond: (root_id = $1) + -> Bitmap Index Scan on root_dict_2_root_id_idx (cost=0.00..4.17 rows=3 width=0) + Index Cond: (root_id = $1) +(14 rows) + +DROP TABLE root_dict CASCADE; +NOTICE: drop cascades to 3 other objects DROP SCHEMA dropped_cols CASCADE; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_dropped_cols.sql b/sql/pathman_dropped_cols.sql index 32589c8c..6338d2f7 100644 --- a/sql/pathman_dropped_cols.sql +++ b/sql/pathman_dropped_cols.sql @@ -38,6 +38,67 @@ select pg_get_constraintdef(oid, true) from pg_constraint where conname = 'pathm select pg_get_constraintdef(oid, true) from pg_constraint where conname = 'pathman_test_dummy_check'; drop table test_hash cascade; +-- Yury Smirnov case +CREATE TABLE root_dict ( + id BIGSERIAL PRIMARY KEY NOT NULL, + root_id BIGINT NOT NULL, + start_date DATE, + num TEXT, + main TEXT, + dict_code TEXT, + dict_name TEXT, + edit_num TEXT, + edit_date DATE, + sign CHAR(4) +); +CREATE INDEX "root_dict_root_id_idx" ON "root_dict" ("root_id"); + +DO +$$ +DECLARE + r RECORD; +BEGIN + FOR r IN SELECT * FROM generate_series(1, 3) r + LOOP + FOR d IN 1..2 LOOP + INSERT INTO root_dict (root_id, start_date, num, main, dict_code, dict_name, edit_num, edit_date, sign) VALUES + (r.r, now(), 'num_' || d, (d % 2) + 1, 'code_' || d, 'name_' || d, NULL, NULL, '2014'); + END LOOP; + END LOOP; +END +$$; + +ALTER TABLE root_dict ADD COLUMN dict_id BIGINT DEFAULT 3; +ALTER TABLE root_dict DROP COLUMN dict_code, + DROP COLUMN dict_name, + DROP COLUMN sign; + +CREATE EXTENSION pg_pathman; +SELECT create_hash_partitions('root_dict' :: REGCLASS, + 'root_id', + 3, + true); +VACUUM FULL ANALYZE "root_dict"; +SELECT set_enable_parent('root_dict' :: REGCLASS, FALSE); + +PREPARE getbyroot AS +SELECT + id, root_id, start_date, num, main, edit_num, edit_date, dict_id +FROM root_dict +WHERE root_id = $1; + +EXECUTE getbyroot(2); +EXECUTE getbyroot(2); +EXECUTE getbyroot(2); +EXECUTE getbyroot(2); +EXECUTE getbyroot(2); + +-- errors usually start here +EXECUTE getbyroot(2); +EXECUTE getbyroot(2); +EXPLAIN EXECUTE getbyroot(2); + +DROP TABLE root_dict CASCADE; DROP SCHEMA dropped_cols CASCADE; DROP EXTENSION pg_pathman; From 8918bd37f8fca14588d8aff72cd9bdba42029df4 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Wed, 12 Sep 2018 16:47:23 +0300 Subject: [PATCH 339/528] Make few fixes in tests added by 1817d26f --- expected/pathman_dropped_cols.out | 3 +-- sql/pathman_dropped_cols.sql | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/expected/pathman_dropped_cols.out b/expected/pathman_dropped_cols.out index 7c9e2806..4a4f3549 100644 --- a/expected/pathman_dropped_cols.out +++ b/expected/pathman_dropped_cols.out @@ -112,8 +112,6 @@ ALTER TABLE root_dict ADD COLUMN dict_id BIGINT DEFAULT 3; ALTER TABLE root_dict DROP COLUMN dict_code, DROP COLUMN dict_name, DROP COLUMN sign; -CREATE EXTENSION pg_pathman; -ERROR: extension "pg_pathman" already exists SELECT create_hash_partitions('root_dict' :: REGCLASS, 'root_id', 3, @@ -204,6 +202,7 @@ EXPLAIN EXECUTE getbyroot(2); Index Cond: (root_id = $1) (14 rows) +DEALLOCATE getbyroot; DROP TABLE root_dict CASCADE; NOTICE: drop cascades to 3 other objects DROP SCHEMA dropped_cols CASCADE; diff --git a/sql/pathman_dropped_cols.sql b/sql/pathman_dropped_cols.sql index 6338d2f7..a4d3c844 100644 --- a/sql/pathman_dropped_cols.sql +++ b/sql/pathman_dropped_cols.sql @@ -74,7 +74,6 @@ ALTER TABLE root_dict DROP COLUMN dict_code, DROP COLUMN dict_name, DROP COLUMN sign; -CREATE EXTENSION pg_pathman; SELECT create_hash_partitions('root_dict' :: REGCLASS, 'root_id', 3, @@ -99,6 +98,7 @@ EXECUTE getbyroot(2); EXECUTE getbyroot(2); EXPLAIN EXECUTE getbyroot(2); +DEALLOCATE getbyroot; DROP TABLE root_dict CASCADE; DROP SCHEMA dropped_cols CASCADE; DROP EXTENSION pg_pathman; From 4666195a96dbef63bc800736bf61943815c0b0dd Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Thu, 13 Sep 2018 16:52:56 +0300 Subject: [PATCH 340/528] Fix compilation for v10 and fix dropped_cols test, there is still segfault on update nodes --- expected/pathman_dropped_cols.out | 30 +++++++++++++++--------------- sql/pathman_dropped_cols.sql | 2 +- src/include/compat/pg_compat.h | 2 +- src/partition_router.c | 11 ++++++++++- 4 files changed, 27 insertions(+), 18 deletions(-) diff --git a/expected/pathman_dropped_cols.out b/expected/pathman_dropped_cols.out index 4a4f3549..79e781b2 100644 --- a/expected/pathman_dropped_cols.out +++ b/expected/pathman_dropped_cols.out @@ -103,7 +103,7 @@ BEGIN LOOP FOR d IN 1..2 LOOP INSERT INTO root_dict (root_id, start_date, num, main, dict_code, dict_name, edit_num, edit_date, sign) VALUES - (r.r, now(), 'num_' || d, (d % 2) + 1, 'code_' || d, 'name_' || d, NULL, NULL, '2014'); + (r.r, '2010-10-10'::date, 'num_' || d, (d % 2) + 1, 'code_' || d, 'name_' || d, NULL, NULL, '2014'); END LOOP; END LOOP; END @@ -136,51 +136,51 @@ WHERE root_id = $1; EXECUTE getbyroot(2); id | root_id | start_date | num | main | edit_num | edit_date | dict_id ----+---------+------------+-------+------+----------+-----------+--------- - 3 | 2 | 09-12-2018 | num_1 | 2 | | | 3 - 4 | 2 | 09-12-2018 | num_2 | 1 | | | 3 + 3 | 2 | 10-10-2010 | num_1 | 2 | | | 3 + 4 | 2 | 10-10-2010 | num_2 | 1 | | | 3 (2 rows) EXECUTE getbyroot(2); id | root_id | start_date | num | main | edit_num | edit_date | dict_id ----+---------+------------+-------+------+----------+-----------+--------- - 3 | 2 | 09-12-2018 | num_1 | 2 | | | 3 - 4 | 2 | 09-12-2018 | num_2 | 1 | | | 3 + 3 | 2 | 10-10-2010 | num_1 | 2 | | | 3 + 4 | 2 | 10-10-2010 | num_2 | 1 | | | 3 (2 rows) EXECUTE getbyroot(2); id | root_id | start_date | num | main | edit_num | edit_date | dict_id ----+---------+------------+-------+------+----------+-----------+--------- - 3 | 2 | 09-12-2018 | num_1 | 2 | | | 3 - 4 | 2 | 09-12-2018 | num_2 | 1 | | | 3 + 3 | 2 | 10-10-2010 | num_1 | 2 | | | 3 + 4 | 2 | 10-10-2010 | num_2 | 1 | | | 3 (2 rows) EXECUTE getbyroot(2); id | root_id | start_date | num | main | edit_num | edit_date | dict_id ----+---------+------------+-------+------+----------+-----------+--------- - 3 | 2 | 09-12-2018 | num_1 | 2 | | | 3 - 4 | 2 | 09-12-2018 | num_2 | 1 | | | 3 + 3 | 2 | 10-10-2010 | num_1 | 2 | | | 3 + 4 | 2 | 10-10-2010 | num_2 | 1 | | | 3 (2 rows) EXECUTE getbyroot(2); id | root_id | start_date | num | main | edit_num | edit_date | dict_id ----+---------+------------+-------+------+----------+-----------+--------- - 3 | 2 | 09-12-2018 | num_1 | 2 | | | 3 - 4 | 2 | 09-12-2018 | num_2 | 1 | | | 3 + 3 | 2 | 10-10-2010 | num_1 | 2 | | | 3 + 4 | 2 | 10-10-2010 | num_2 | 1 | | | 3 (2 rows) -- errors usually start here EXECUTE getbyroot(2); id | root_id | start_date | num | main | edit_num | edit_date | dict_id ----+---------+------------+-------+------+----------+-----------+--------- - 3 | 2 | 09-12-2018 | num_1 | 2 | | | 3 - 4 | 2 | 09-12-2018 | num_2 | 1 | | | 3 + 3 | 2 | 10-10-2010 | num_1 | 2 | | | 3 + 4 | 2 | 10-10-2010 | num_2 | 1 | | | 3 (2 rows) EXECUTE getbyroot(2); id | root_id | start_date | num | main | edit_num | edit_date | dict_id ----+---------+------------+-------+------+----------+-----------+--------- - 3 | 2 | 09-12-2018 | num_1 | 2 | | | 3 - 4 | 2 | 09-12-2018 | num_2 | 1 | | | 3 + 3 | 2 | 10-10-2010 | num_1 | 2 | | | 3 + 4 | 2 | 10-10-2010 | num_2 | 1 | | | 3 (2 rows) EXPLAIN EXECUTE getbyroot(2); diff --git a/sql/pathman_dropped_cols.sql b/sql/pathman_dropped_cols.sql index a4d3c844..0ae16c8a 100644 --- a/sql/pathman_dropped_cols.sql +++ b/sql/pathman_dropped_cols.sql @@ -63,7 +63,7 @@ BEGIN LOOP FOR d IN 1..2 LOOP INSERT INTO root_dict (root_id, start_date, num, main, dict_code, dict_name, edit_num, edit_date, sign) VALUES - (r.r, now(), 'num_' || d, (d % 2) + 1, 'code_' || d, 'name_' || d, NULL, NULL, '2014'); + (r.r, '2010-10-10'::date, 'num_' || d, (d % 2) + 1, 'code_' || d, 'name_' || d, NULL, NULL, '2014'); END LOOP; END LOOP; END diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 1ae1b33b..fdb421ce 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -415,7 +415,7 @@ ExecCheck(ExprState *state, ExprContext *econtext) /* * extract_actual_join_clauses() */ -#if (PG_VERSION_NUM >= 100004) || \ +#if (PG_VERSION_NUM >= 100003) || \ (PG_VERSION_NUM < 100000 && PG_VERSION_NUM >= 90609) || \ (PG_VERSION_NUM < 90600 && PG_VERSION_NUM >= 90513) #define extract_actual_join_clauses_compat(restrictinfo_list, \ diff --git a/src/partition_router.c b/src/partition_router.c index 7459315a..efd3a382 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -170,11 +170,20 @@ prepare_modify_table_for_partition_router(PlanState *state, void *context) if (!changed_method) { + /* HACK: replace ModifyTable's execution method */ +#if PG_VERSION_NUM >= 110000 if (!mt_method) mt_method = state->ExecProcNodeReal; - /* HACK: replace ModifyTable's execution method */ ExecSetExecProcNode(state, router_run_modify_table); +#elif PG_VERSION_NUM >= 100000 + if (!mt_method) + mt_method = state->ExecProcNode; + + state->ExecProcNode = router_run_modify_table; +#else +#error "doesn't supported yet" +#endif changed_method = true; } From 94f621b47d4fd3f697c82d871d2a36d8ba674c14 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Thu, 13 Sep 2018 17:15:48 +0300 Subject: [PATCH 341/528] Read parents list before it could lead to segfault --- src/include/compat/pg_compat.h | 2 +- src/relation_info.c | 29 ++++------------------------- 2 files changed, 5 insertions(+), 26 deletions(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 4228d264..b3abfcd2 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -322,7 +322,7 @@ static inline void mult_result_handler() { elog(ERROR, ERR_PART_ATTR_MULTIPLE_RE /* * extract_actual_join_clauses() */ -#if (PG_VERSION_NUM >= 100004) || \ +#if (PG_VERSION_NUM >= 100003) || \ (PG_VERSION_NUM < 100000 && PG_VERSION_NUM >= 90609) || \ (PG_VERSION_NUM < 90600 && PG_VERSION_NUM >= 90513) #define extract_actual_join_clauses_compat(restrictinfo_list, \ diff --git a/src/relation_info.c b/src/relation_info.c index 1d191f1a..eacc491b 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -991,7 +991,6 @@ finish_delayed_invalidation(void) { Oid *parents = NULL; int parents_count = 0; - bool parents_fetched = false; ListCell *lc; AcceptInvalidationMessages(); @@ -1017,26 +1016,19 @@ finish_delayed_invalidation(void) /* Disregard all remaining invalidation jobs */ delayed_invalidation_whole_cache = false; - free_invalidation_lists(); - /* No need to continue, exit */ - return; + goto end; } } + parents = read_parent_oids(&parents_count); + /* We might be asked to perform a complete cache invalidation */ if (delayed_invalidation_whole_cache) { /* Unset 'invalidation_whole_cache' flag */ delayed_invalidation_whole_cache = false; - /* Fetch all partitioned tables */ - if (!parents_fetched) - { - parents = read_parent_oids(&parents_count); - parents_fetched = true; - } - /* Invalidate live entries and remove dead ones */ invalidate_pathman_relation_info_cache(parents, parents_count); } @@ -1050,13 +1042,6 @@ finish_delayed_invalidation(void) if (IsToastNamespace(get_rel_namespace(parent))) continue; - /* Fetch all partitioned tables */ - if (!parents_fetched) - { - parents = read_parent_oids(&parents_count); - parents_fetched = true; - } - /* Check if parent still exists */ if (bsearch_oid(parent, parents, parents_count)) /* get_pathman_relation_info() will refresh this entry */ @@ -1074,13 +1059,6 @@ finish_delayed_invalidation(void) if (IsToastNamespace(get_rel_namespace(vague_rel))) continue; - /* Fetch all partitioned tables */ - if (!parents_fetched) - { - parents = read_parent_oids(&parents_count); - parents_fetched = true; - } - /* It might be a partitioned table or a partition */ if (!try_invalidate_parent(vague_rel, parents, parents_count)) { @@ -1117,6 +1095,7 @@ finish_delayed_invalidation(void) } } +end: /* Finally, free invalidation jobs lists */ free_invalidation_lists(); From 0c69df3454dd39e02a200dc31e53bd80336d3c05 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Thu, 13 Sep 2018 18:08:55 +0300 Subject: [PATCH 342/528] Fix hash join test on 10.5 --- META.json | 2 +- Makefile | 1 + expected/pathman_basic.out | 35 ---------------- expected/pathman_calamity.out | 2 +- expected/pathman_hashjoin.out | 73 +++++++++++++++++++++++++++++++++ expected/pathman_hashjoin_1.out | 73 +++++++++++++++++++++++++++++++++ sql/pathman_basic.sql | 12 ------ sql/pathman_hashjoin.sql | 44 ++++++++++++++++++++ src/include/init.h | 2 +- 9 files changed, 194 insertions(+), 50 deletions(-) create mode 100644 expected/pathman_hashjoin.out create mode 100644 expected/pathman_hashjoin_1.out create mode 100644 sql/pathman_hashjoin.sql diff --git a/META.json b/META.json index a198d696..447629a4 100644 --- a/META.json +++ b/META.json @@ -23,7 +23,7 @@ "pg_pathman": { "file": "pg_pathman--1.4.sql", "docfile": "README.md", - "version": "1.4.13", + "version": "1.4.14", "abstract": "Partitioning tool" } }, diff --git a/Makefile b/Makefile index 8fdc0cde..42456b07 100644 --- a/Makefile +++ b/Makefile @@ -45,6 +45,7 @@ REGRESS = pathman_array_qual \ pathman_interval \ pathman_join_clause \ pathman_lateral \ + pathman_hashjoin \ pathman_mergejoin \ pathman_multilevel \ pathman_only \ diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index fa946d72..c9bce988 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -810,41 +810,6 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel_1 UNION ALL SELECT * FROM test. -> Index Scan using range_rel_2_dt_idx on range_rel_2 (4 rows) -/* - * Join - */ -set enable_nestloop = OFF; -SET enable_hashjoin = ON; -SET enable_mergejoin = OFF; -EXPLAIN (COSTS OFF) -SELECT * FROM test.range_rel j1 -JOIN test.range_rel j2 on j2.id = j1.id -JOIN test.num_range_rel j3 on j3.id = j1.id -WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; - QUERY PLAN -------------------------------------------------------------------------------------------- - Sort - Sort Key: j2.dt - -> Hash Join - Hash Cond: (j3.id = j2.id) - -> Append - -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3 - -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_1 - -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_2 - -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_3 - -> Hash - -> Hash Join - Hash Cond: (j2.id = j1.id) - -> Append - -> Index Scan using range_rel_2_dt_idx on range_rel_2 j2 - -> Index Scan using range_rel_3_dt_idx on range_rel_3 j2_1 - -> Index Scan using range_rel_4_dt_idx on range_rel_4 j2_2 - -> Hash - -> Append - -> Index Scan using range_rel_1_pkey on range_rel_1 j1 - -> Index Scan using range_rel_2_pkey on range_rel_2 j1_1 -(20 rows) - /* * Test inlined SQL functions */ diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 5cda7bc5..e1e65af0 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -13,7 +13,7 @@ SELECT debug_capture(); SELECT get_pathman_lib_version(); get_pathman_lib_version ------------------------- - 1.4.13 + 1.4.14 (1 row) set client_min_messages = NOTICE; diff --git a/expected/pathman_hashjoin.out b/expected/pathman_hashjoin.out new file mode 100644 index 00000000..71ea1085 --- /dev/null +++ b/expected/pathman_hashjoin.out @@ -0,0 +1,73 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) + SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SET pg_pathman.enable_runtimeappend = OFF; +SET pg_pathman.enable_runtimemergeappend = OFF; +VACUUM; +/* + * Hash join + */ +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +SET enable_nestloop = OFF; +SET enable_hashjoin = ON; +SET enable_mergejoin = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Hash Join + Hash Cond: (j1.id = j2.id) + -> Hash Join + Hash Cond: (j3.id = j1.id) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_1 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_2 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_3 + -> Hash + -> Append + -> Index Scan using range_rel_1_pkey on range_rel_1 j1 + -> Index Scan using range_rel_2_pkey on range_rel_2 j1_1 + -> Hash + -> Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 j2 + -> Index Scan using range_rel_3_dt_idx on range_rel_3 j2_1 + -> Index Scan using range_rel_4_dt_idx on range_rel_4 j2_2 +(20 rows) + +DROP SCHEMA test CASCADE; +NOTICE: drop cascades to 12 other objects +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman CASCADE; diff --git a/expected/pathman_hashjoin_1.out b/expected/pathman_hashjoin_1.out new file mode 100644 index 00000000..8e0007d4 --- /dev/null +++ b/expected/pathman_hashjoin_1.out @@ -0,0 +1,73 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) + SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SET pg_pathman.enable_runtimeappend = OFF; +SET pg_pathman.enable_runtimemergeappend = OFF; +VACUUM; +/* + * Hash join + */ +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +SET enable_nestloop = OFF; +SET enable_hashjoin = ON; +SET enable_mergejoin = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +------------------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Hash Join + Hash Cond: (j3.id = j2.id) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_1 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_2 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_3 + -> Hash + -> Hash Join + Hash Cond: (j2.id = j1.id) + -> Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 j2 + -> Index Scan using range_rel_3_dt_idx on range_rel_3 j2_1 + -> Index Scan using range_rel_4_dt_idx on range_rel_4 j2_2 + -> Hash + -> Append + -> Index Scan using range_rel_1_pkey on range_rel_1 j1 + -> Index Scan using range_rel_2_pkey on range_rel_2 j1_1 +(20 rows) + +DROP SCHEMA test CASCADE; +NOTICE: drop cascades to 12 other objects +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman CASCADE; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index f24716c0..b7d460c4 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -215,18 +215,6 @@ SET enable_seqscan = OFF; EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2015-03-01' ORDER BY dt; EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel_1 UNION ALL SELECT * FROM test.range_rel_2 ORDER BY dt; -/* - * Join - */ -set enable_nestloop = OFF; -SET enable_hashjoin = ON; -SET enable_mergejoin = OFF; -EXPLAIN (COSTS OFF) -SELECT * FROM test.range_rel j1 -JOIN test.range_rel j2 on j2.id = j1.id -JOIN test.num_range_rel j3 on j3.id = j1.id -WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; - /* * Test inlined SQL functions */ diff --git a/sql/pathman_hashjoin.sql b/sql/pathman_hashjoin.sql new file mode 100644 index 00000000..d3cc1b2b --- /dev/null +++ b/sql/pathman_hashjoin.sql @@ -0,0 +1,44 @@ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; + +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) + SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; + +SET pg_pathman.enable_runtimeappend = OFF; +SET pg_pathman.enable_runtimemergeappend = OFF; +VACUUM; + +/* + * Hash join + */ +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +SET enable_nestloop = OFF; +SET enable_hashjoin = ON; +SET enable_mergejoin = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + +DROP SCHEMA test CASCADE; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman CASCADE; diff --git a/src/include/init.h b/src/include/init.h index 6bdccc2e..2227533e 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -158,7 +158,7 @@ simpify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT 0x010400 /* Current version of native C library (0xAA_BB_CC) */ -#define CURRENT_LIB_VERSION 0x010413 +#define CURRENT_LIB_VERSION 0x010414 void *pathman_cache_search_relid(HTAB *cache_table, From d34a77e061963f4e14a0e8ec9f89e35fc3eb1e3c Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Thu, 13 Sep 2018 18:36:01 +0300 Subject: [PATCH 343/528] Bump version in META.json --- META.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/META.json b/META.json index 447629a4..a211fc36 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_pathman", "abstract": "Fast partitioning tool for PostgreSQL", "description": "pg_pathman provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.4.13", + "version": "1.4.14", "maintainer": [ "Dmitry Ivanov ", "Ildus Kurbangaliev " From 7e76912ea3777aadb4851416d61102139b5ba81b Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Tue, 18 Sep 2018 16:28:24 +0300 Subject: [PATCH 344/528] Add support of pg11 --- src/include/compat/pg_compat.h | 25 +++++++++++++------------ src/partition_router.c | 23 ++++++++++++++--------- src/utility_stmt_hooking.c | 5 ++--- 3 files changed, 29 insertions(+), 24 deletions(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index fdb421ce..145b2113 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -802,17 +802,6 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, MakeTupleTableSlot() #endif -/* - * ExecInitExtraTupleSlot() - */ -#if PG_VERSION_NUM >= 110000 -#define ExecInitExtraTupleSlotCompat(estate) \ - ExecInitExtraTupleSlot((estate), NULL) -#else -#define ExecInitExtraTupleSlotCompat(estate) \ - ExecInitExtraTupleSlot(estate) -#endif - /* * BackgroundWorkerInitializeConnectionByOid() */ @@ -877,7 +866,6 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, find_childrel_appendrelinfo((root), (rel)) #endif - /* * HeapTupleGetXmin() * Vanilla PostgreSQL has HeaptTupleHeaderGetXmin, but for 64-bit xid @@ -895,6 +883,19 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, * Common code * ------------- */ +static inline TupleTableSlot * +ExecInitExtraTupleSlotCompat(EState *s, TupleDesc t) +{ +#if PG_VERSION_NUM >= 110000 + return ExecInitExtraTupleSlot(s,t); +#else + TupleTableSlot *res = ExecInitExtraTupleSlot(s); + if (t) + ExecSetSlotDescriptor(res, t); + + return res; +#endif +} /* See ExecEvalParamExtern() */ static inline ParamExternData * diff --git a/src/partition_router.c b/src/partition_router.c index efd3a382..6f3a143b 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -171,15 +171,12 @@ prepare_modify_table_for_partition_router(PlanState *state, void *context) if (!changed_method) { /* HACK: replace ModifyTable's execution method */ -#if PG_VERSION_NUM >= 110000 if (!mt_method) mt_method = state->ExecProcNodeReal; +#if PG_VERSION_NUM >= 110000 ExecSetExecProcNode(state, router_run_modify_table); #elif PG_VERSION_NUM >= 100000 - if (!mt_method) - mt_method = state->ExecProcNode; - state->ExecProcNode = router_run_modify_table; #else #error "doesn't supported yet" @@ -316,7 +313,7 @@ router_run_modify_table(PlanState *state) mt_state = (ModifyTableState *) state; /* Get initial signal */ - mt_plans_old = MTHackField(mt_state, mt_nplans); + mt_plans_old = mt_state->mt_nplans; restart: /* Fetch next tuple */ @@ -359,21 +356,29 @@ router_set_slot(PartitionRouterState *state, MTHackField(mt_state, mt_nplans) = -mt_state->mt_whichplan; MTHackField(mt_state, operation) = operation; - /* HACK: disable AFTER STATEMENT triggers */ - MTDisableStmtTriggers(mt_state, state); - if (!TupIsNull(slot)) { /* We should've cached junk filter already */ Assert(state->junkfilter); + /* HACK: disable AFTER STATEMENT triggers */ + MTDisableStmtTriggers(mt_state, state); + + /* HACK: conditionally disable junk filter in result relation */ state->current_rri->ri_junkFilter = (operation == CMD_UPDATE) ? state->junkfilter : NULL; /* Don't forget to set saved_slot! */ - state->yielded_slot = slot; + state->yielded_slot = ExecInitExtraTupleSlotCompat(mt_state->ps.state, + slot->tts_tupleDescriptor); + ExecCopySlot(state->yielded_slot, slot); + } + else + { + /* HACK: enable AFTER STATEMENT triggers */ + MTEnableStmtTriggers(mt_state, state); } /* Yield */ diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index fcd6a1dc..c90a01da 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -514,10 +514,9 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, RPS_RRI_CB(finish_rri_for_copy, NULL)); /* Set up a tuple slot too */ - myslot = ExecInitExtraTupleSlotCompat(estate); - ExecSetSlotDescriptor(myslot, tupDesc); + myslot = ExecInitExtraTupleSlotCompat(estate, NULL); /* Triggers might need a slot as well */ - estate->es_trig_tuple_slot = ExecInitExtraTupleSlotCompat(estate); + estate->es_trig_tuple_slot = ExecInitExtraTupleSlotCompat(estate, tupDesc); /* Prepare to catch AFTER triggers. */ AfterTriggerBeginQuery(); From 0207c4e64b99470ec1c639726ff593d172235cf1 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Tue, 18 Sep 2018 18:28:00 +0300 Subject: [PATCH 345/528] Add overseer node (not working yet --- Makefile | 2 +- src/hooks.c | 74 ++++++----------- src/include/partition_overseer.h | 54 ++++++++++++ src/include/partition_router.h | 8 +- src/include/planner_tree_modification.h | 8 +- src/partition_overseer.c | 105 ++++++++++++++++++++++++ src/partition_router.c | 64 ++------------- src/pg_pathman.c | 4 +- src/planner_tree_modification.c | 46 ++++++----- 9 files changed, 225 insertions(+), 140 deletions(-) create mode 100644 src/include/partition_overseer.h create mode 100644 src/partition_overseer.c diff --git a/Makefile b/Makefile index f9567f94..7ba97cbd 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ OBJS = src/init.o src/relation_info.o src/utils.o src/partition_filter.o \ src/hooks.o src/nodes_common.o src/xact_handling.o src/utility_stmt_hooking.o \ src/planner_tree_modification.o src/debug_print.o src/partition_creation.o \ src/compat/pg_compat.o src/compat/rowmarks_fix.o src/partition_router.o \ - $(WIN32RES) + src/partition_overseer.o $(WIN32RES) ifdef USE_PGXS override PG_CPPFLAGS += -I$(CURDIR)/src/include diff --git a/src/hooks.c b/src/hooks.c index 1ebb726b..b8c7a194 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -68,7 +68,6 @@ planner_hook_type pathman_planner_hook_next = NULL; post_parse_analyze_hook_type pathman_post_parse_analyze_hook_next = NULL; shmem_startup_hook_type pathman_shmem_startup_hook_next = NULL; ProcessUtility_hook_type pathman_process_utility_hook_next = NULL; -ExecutorRun_hook_type pathman_executor_run_hook_next = NULL; /* Take care of joins */ @@ -616,6 +615,29 @@ pathman_enable_assign_hook(bool newval, void *extra) newval ? "enabled" : "disabled"); } +static void +execute_for_plantree(PlannedStmt *planned_stmt, + Plan *(*proc) (List *rtable, Plan *plan)) +{ + List *subplans = NIL; + ListCell *lc; + Plan *resplan = proc(planned_stmt->rtable, planned_stmt->planTree); + + if (resplan) + planned_stmt->planTree = resplan; + + foreach (lc, planned_stmt->subplans) + { + Plan *subplan = lfirst(lc); + resplan = proc(planned_stmt->rtable, (Plan *) lfirst(lc)); + if (resplan) + subplans = lappend(subplans, resplan); + else + subplans = lappend(subplans, subplan); + } + planned_stmt->subplans = subplans; +} + /* * Planner hook. It disables inheritance for tables that have been partitioned * by pathman to prevent standart PostgreSQL partitioning mechanism from @@ -624,14 +646,6 @@ pathman_enable_assign_hook(bool newval, void *extra) PlannedStmt * pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) { -#define ExecuteForPlanTree(planned_stmt, proc) \ - do { \ - ListCell *lc; \ - proc((planned_stmt)->rtable, (planned_stmt)->planTree); \ - foreach (lc, (planned_stmt)->subplans) \ - proc((planned_stmt)->rtable, (Plan *) lfirst(lc)); \ - } while (0) - PlannedStmt *result; uint32 query_id = parse->queryId; @@ -658,10 +672,10 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) if (pathman_ready) { /* Add PartitionFilter node for INSERT queries */ - ExecuteForPlanTree(result, add_partition_filters); + execute_for_plantree(result, add_partition_filters); /* Add PartitionRouter node for UPDATE queries */ - ExecuteForPlanTree(result, add_partition_routers); + execute_for_plantree(result, add_partition_routers); /* Decrement planner() calls count */ decr_planner_calls_count(); @@ -686,7 +700,6 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) /* Finally return the Plan */ return result; -#undef ExecuteForPlanTree } /* @@ -950,40 +963,3 @@ pathman_process_utility_hook(Node *first_arg, context, params, queryEnv, dest, completionTag); } - -/* - * Executor hook (for PartitionRouter). - */ -#if PG_VERSION_NUM >= 100000 -void -pathman_executor_hook(QueryDesc *queryDesc, - ScanDirection direction, - ExecutorRun_CountArgType count, - bool execute_once) -#else -void -pathman_executor_hook(QueryDesc *queryDesc, - ScanDirection direction, - ExecutorRun_CountArgType count) -#endif -{ -#define EXECUTOR_HOOK pathman_executor_run_hook_next -#if PG_VERSION_NUM >= 100000 -#define EXECUTOR_HOOK_NEXT(q,d,c) EXECUTOR_HOOK((q),(d),(c), execute_once) -#define EXECUTOR_RUN(q,d,c) standard_ExecutorRun((q),(d),(c), execute_once) -#else -#define EXECUTOR_HOOK_NEXT(q,d,c) EXECUTOR_HOOK((q),(d),(c)) -#define EXECUTOR_RUN(q,d,c) standard_ExecutorRun((q),(d),(c)) -#endif - - /* Prepare ModifyTable nodes for PartitionRouter hackery */ - state_tree_visitor((PlanState *) queryDesc->planstate, - prepare_modify_table_for_partition_router, - NULL); - - /* Call hooks set by other extensions if needed */ - if (EXECUTOR_HOOK) - EXECUTOR_HOOK_NEXT(queryDesc, direction, count); - /* Else call internal implementation */ - else EXECUTOR_RUN(queryDesc, direction, count); -} diff --git a/src/include/partition_overseer.h b/src/include/partition_overseer.h new file mode 100644 index 00000000..ddf84c7a --- /dev/null +++ b/src/include/partition_overseer.h @@ -0,0 +1,54 @@ +/* ------------------------------------------------------------------------ + * + * partition_overseer.h + * Restart ModifyTable for unobvious reasons + * + * Copyright (c) 2018, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + +#ifndef PARTITION_OVERSEER_H +#define PARTITION_OVERSEER_H + +#include "relation_info.h" +#include "utils.h" + +#include "postgres.h" +#include "access/tupconvert.h" +#include "commands/explain.h" +#include "optimizer/planner.h" + +#if PG_VERSION_NUM >= 90600 +#include "nodes/extensible.h" +#endif + + +#define OVERSEER_NODE_NAME "PartitionOverseer" + + +extern CustomScanMethods partition_overseer_plan_methods; +extern CustomExecMethods partition_overseer_exec_methods; + + +void init_partition_overseer_static_data(void); +Plan *make_partition_overseer(Plan *subplan); + +Node *partition_overseer_create_scan_state(CustomScan *node); + +void partition_overseer_begin(CustomScanState *node, + EState *estate, + int eflags); + +TupleTableSlot *partition_overseer_exec(CustomScanState *node); + +void partition_overseer_end(CustomScanState *node); + +void partition_overseer_rescan(CustomScanState *node); + +void partition_overseer_explain(CustomScanState *node, + List *ancestors, + ExplainState *es); + + +#endif /* PARTITION_OVERSEER_H */ diff --git a/src/include/partition_router.h b/src/include/partition_router.h index 683af938..a07bde60 100644 --- a/src/include/partition_router.h +++ b/src/include/partition_router.h @@ -74,12 +74,7 @@ extern CustomExecMethods partition_router_exec_methods; void init_partition_router_static_data(void); - -Plan *make_partition_router(Plan *subplan, - Oid parent_relid, - Index parent_rti, - int epq_param, - List *returning_list); +Plan *make_partition_router(Plan *subplan, int epq_param); void prepare_modify_table_for_partition_router(PlanState *state, void *context); @@ -98,5 +93,6 @@ void partition_router_explain(CustomScanState *node, List *ancestors, ExplainState *es); +TupleTableSlot *partition_router_run_modify_table(PlanState *state); #endif /* PARTITION_UPDATE_H */ diff --git a/src/include/planner_tree_modification.h b/src/include/planner_tree_modification.h index b93224ba..43f7a24b 100644 --- a/src/include/planner_tree_modification.h +++ b/src/include/planner_tree_modification.h @@ -25,8 +25,8 @@ void assign_query_id(Query *query); void reset_query_id_generator(void); /* Plan tree rewriting utility */ -void plan_tree_visitor(Plan *plan, - void (*visitor) (Plan *plan, void *context), +Plan * plan_tree_visitor(Plan *plan, + Plan *(*visitor) (Plan *plan, void *context), void *context); /* PlanState tree rewriting utility */ @@ -38,8 +38,8 @@ void state_tree_visitor(PlanState *state, void pathman_transform_query(Query *parse, ParamListInfo params); /* These functions scribble on Plan tree */ -void add_partition_filters(List *rtable, Plan *plan); -void add_partition_routers(List *rtable, Plan *plan); +Plan *add_partition_filters(List *rtable, Plan *plan); +Plan *add_partition_routers(List *rtable, Plan *plan); /* used by assign_rel_parenthood_status() etc */ diff --git a/src/partition_overseer.c b/src/partition_overseer.c new file mode 100644 index 00000000..52eea377 --- /dev/null +++ b/src/partition_overseer.c @@ -0,0 +1,105 @@ +#include "postgres.h" + +#include "partition_overseer.h" +#include "partition_filter.h" +#include "partition_router.h" + +CustomScanMethods partition_overseer_plan_methods; +CustomExecMethods partition_overseer_exec_methods; + +void +init_partition_overseer_static_data(void) +{ + partition_overseer_plan_methods.CustomName = OVERSEER_NODE_NAME; + partition_overseer_plan_methods.CreateCustomScanState = partition_overseer_create_scan_state; + + partition_overseer_exec_methods.CustomName = OVERSEER_NODE_NAME; + partition_overseer_exec_methods.BeginCustomScan = partition_overseer_begin; + partition_overseer_exec_methods.ExecCustomScan = partition_overseer_exec; + partition_overseer_exec_methods.EndCustomScan = partition_overseer_end; + partition_overseer_exec_methods.ReScanCustomScan = partition_overseer_rescan; + partition_overseer_exec_methods.MarkPosCustomScan = NULL; + partition_overseer_exec_methods.RestrPosCustomScan = NULL; + partition_overseer_exec_methods.ExplainCustomScan = partition_overseer_explain; + + RegisterCustomScanMethods(&partition_overseer_plan_methods); +} + +Plan * +make_partition_overseer(Plan *subplan) +{ + CustomScan *cscan = makeNode(CustomScan); + + /* Copy costs etc */ + cscan->scan.plan.startup_cost = subplan->startup_cost; + cscan->scan.plan.total_cost = subplan->total_cost; + cscan->scan.plan.plan_rows = subplan->plan_rows; + cscan->scan.plan.plan_width = subplan->plan_width; + + /* Setup methods, child plan and param number for EPQ */ + cscan->methods = &partition_overseer_plan_methods; + cscan->custom_plans = list_make1(subplan); + cscan->custom_private = NIL; + + /* No physical relation will be scanned */ + cscan->scan.scanrelid = 0; + + /* Build an appropriate target list */ + cscan->scan.plan.targetlist = pfilter_build_tlist(subplan); + cscan->custom_scan_tlist = subplan->targetlist; + + return &cscan->scan.plan; +} + + +Node * +partition_overseer_create_scan_state(CustomScan *node) +{ + CustomScanState *state = palloc0(sizeof(CustomScanState)); + NodeSetTag(state, T_CustomScanState); + + state->flags = node->flags; + state->methods = &partition_overseer_exec_methods; + + return (Node *) state; +} + +void +partition_overseer_begin(CustomScanState *node, + EState *estate, + int eflags) +{ + CustomScan *css = (CustomScan *) node->ss.ps.plan; + Plan *plan = linitial(css->custom_plans); + + /* It's convenient to store PlanState in 'custom_ps' */ + node->custom_ps = list_make1(ExecInitNode(plan, estate, eflags)); +} + +TupleTableSlot * +partition_overseer_exec(CustomScanState *node) +{ + PlanState *state = linitial(node->custom_ps); + return partition_router_run_modify_table(state); +} + +void +partition_overseer_end(CustomScanState *node) +{ + Assert(list_length(node->custom_ps) == 1); + ExecEndNode((PlanState *) linitial(node->custom_ps)); +} + +void +partition_overseer_rescan(CustomScanState *node) +{ + elog(ERROR, "partition_overseer_rescan is not implemented"); +} + +void +partition_overseer_explain(CustomScanState *node, + List *ancestors, + ExplainState *es) +{ + /* nothing to do */ +} diff --git a/src/partition_router.c b/src/partition_router.c index 6f3a143b..53349730 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -61,13 +61,6 @@ bool pg_pathman_enable_partition_router = true; CustomScanMethods partition_router_plan_methods; CustomExecMethods partition_router_exec_methods; - -/* FIXME: replace this magic with a CustomScan */ -static ExecProcNodeMtd mt_method = NULL; - - -static TupleTableSlot *router_run_modify_table(PlanState *state); - static TupleTableSlot *router_set_slot(PartitionRouterState *state, TupleTableSlot *slot, CmdType operation); @@ -115,12 +108,7 @@ init_partition_router_static_data(void) } Plan * -make_partition_router(Plan *subplan, - Oid parent_relid, - Index parent_rti, - int epq_param, - List *returning_list) - +make_partition_router(Plan *subplan, int epq_param) { CustomScan *cscan = makeNode(CustomScan); @@ -147,49 +135,6 @@ make_partition_router(Plan *subplan, return &cscan->scan.plan; } -void -prepare_modify_table_for_partition_router(PlanState *state, void *context) -{ - if (IsA(state, ModifyTableState)) - { - ModifyTableState *mt_state = (ModifyTableState *) state; - bool changed_method = false; - int i; - - for (i = 0; i < mt_state->mt_nplans; i++) - { - CustomScanState *pf_state = (CustomScanState *) mt_state->mt_plans[i]; - PartitionRouterState *pr_state; - - /* Check if this is a PartitionFilter + PartitionRouter combo */ - if (IsPartitionFilterState(pf_state) && - IsPartitionRouterState(pr_state = linitial(pf_state->custom_ps))) - { - /* HACK: point to ModifyTable in PartitionRouter */ - pr_state->mt_state = mt_state; - - if (!changed_method) - { - /* HACK: replace ModifyTable's execution method */ - if (!mt_method) - mt_method = state->ExecProcNodeReal; - -#if PG_VERSION_NUM >= 110000 - ExecSetExecProcNode(state, router_run_modify_table); -#elif PG_VERSION_NUM >= 100000 - state->ExecProcNode = router_run_modify_table; -#else -#error "doesn't supported yet" -#endif - - changed_method = true; - } - } - } - } -} - - Node * partition_router_create_scan_state(CustomScan *node) { @@ -198,6 +143,7 @@ partition_router_create_scan_state(CustomScan *node) state = (PartitionRouterState *) palloc0(sizeof(PartitionRouterState)); NodeSetTag(state, T_CustomScanState); + state = (PartitionRouterState *) makeNode(CustomScanState); state->css.flags = node->flags; state->css.methods = &partition_router_exec_methods; @@ -302,8 +248,8 @@ partition_router_explain(CustomScanState *node, /* Smart wrapper over ModifyTable */ -static TupleTableSlot * -router_run_modify_table(PlanState *state) +TupleTableSlot * +partition_router_run_modify_table(PlanState *state) { ModifyTableState *mt_state; TupleTableSlot *slot; @@ -317,7 +263,7 @@ router_run_modify_table(PlanState *state) restart: /* Fetch next tuple */ - slot = mt_method(state); + slot = ExecProcNode(state); /* Get current signal */ mt_plans_new = MTHackField(mt_state, mt_nplans); diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 69497f92..1b65a832 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -17,6 +17,7 @@ #include "pathman.h" #include "partition_filter.h" #include "partition_router.h" +#include "partition_overseer.h" #include "planner_tree_modification.h" #include "runtime_append.h" #include "runtime_merge_append.h" @@ -317,8 +318,6 @@ _PG_init(void) planner_hook = pathman_planner_hook; pathman_process_utility_hook_next = ProcessUtility_hook; ProcessUtility_hook = pathman_process_utility_hook; - pathman_executor_run_hook_next = ExecutorRun_hook; - ExecutorRun_hook = pathman_executor_hook; /* Initialize static data for all subsystems */ init_main_pathman_toggles(); @@ -327,6 +326,7 @@ _PG_init(void) init_runtime_merge_append_static_data(); init_partition_filter_static_data(); init_partition_router_static_data(); + init_partition_overseer_static_data(); } /* Get cached PATHMAN_CONFIG relation Oid */ diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 071c179f..6b453256 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -14,6 +14,7 @@ #include "partition_filter.h" #include "partition_router.h" +#include "partition_overseer.h" #include "planner_tree_modification.h" #include "relation_info.h" #include "rewrite/rewriteManip.h" @@ -110,8 +111,8 @@ static bool pathman_transform_query_walker(Node *node, void *context); static void disable_standard_inheritance(Query *parse, transform_query_cxt *context); static void handle_modification_query(Query *parse, transform_query_cxt *context); -static void partition_filter_visitor(Plan *plan, void *context); -static void partition_router_visitor(Plan *plan, void *context); +static Plan *partition_filter_visitor(Plan *plan, void *context); +static Plan *partition_router_visitor(Plan *plan, void *context); static void state_visit_subplans(List *plans, void (*visitor) (), void *context); static void state_visit_members(PlanState **planstates, int nplans, void (*visitor) (), void *context); @@ -154,15 +155,15 @@ reset_query_id_generator(void) * * 'visitor' is applied right before return. */ -void +Plan * plan_tree_visitor(Plan *plan, - void (*visitor) (Plan *plan, void *context), + Plan *(*visitor) (Plan *plan, void *context), void *context) { ListCell *l; if (plan == NULL) - return; + return NULL; check_stack_depth(); @@ -211,7 +212,7 @@ plan_tree_visitor(Plan *plan, plan_tree_visitor(plan->righttree, visitor, context); /* Apply visitor to the current node */ - visitor(plan, context); + return visitor(plan, context); } void @@ -687,19 +688,23 @@ adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context) */ /* Add PartitionFilter nodes to the plan tree */ -void +Plan * add_partition_filters(List *rtable, Plan *plan) { if (pg_pathman_enable_partition_filter) - plan_tree_visitor(plan, partition_filter_visitor, rtable); + return plan_tree_visitor(plan, partition_filter_visitor, rtable); + + return NULL; } /* Add PartitionRouter nodes to the plan tree */ -void +Plan * add_partition_routers(List *rtable, Plan *plan) { if (pg_pathman_enable_partition_router) - plan_tree_visitor(plan, partition_router_visitor, rtable); + return plan_tree_visitor(plan, partition_router_visitor, rtable); + + return NULL; } /* @@ -707,7 +712,7 @@ add_partition_routers(List *rtable, Plan *plan) * * 'context' should point to the PlannedStmt->rtable. */ -static void +static Plan * partition_filter_visitor(Plan *plan, void *context) { List *rtable = (List *) context; @@ -718,7 +723,7 @@ partition_filter_visitor(Plan *plan, void *context) /* Skip if not ModifyTable with 'INSERT' command */ if (!IsA(modify_table, ModifyTable) || modify_table->operation != CMD_INSERT) - return; + return NULL; Assert(rtable && IsA(rtable, List)); @@ -748,6 +753,8 @@ partition_filter_visitor(Plan *plan, void *context) returning_list); } } + + return NULL; } /* @@ -755,7 +762,7 @@ partition_filter_visitor(Plan *plan, void *context) * * 'context' should point to the PlannedStmt->rtable. */ -static void +static Plan * partition_router_visitor(Plan *plan, void *context) { List *rtable = (List *) context; @@ -766,15 +773,16 @@ partition_router_visitor(Plan *plan, void *context) /* Skip if not ModifyTable with 'UPDATE' command */ if (!IsA(modify_table, ModifyTable) || modify_table->operation != CMD_UPDATE) - return; + return NULL; Assert(rtable && IsA(rtable, List)); if (modifytable_contains_fdw(rtable, modify_table)) { - ereport(ERROR, + ereport(WARNING, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg(UPDATE_NODE_NAME " does not support foreign data wrappers"))); + return NULL; } lc3 = list_head(modify_table->returningLists); @@ -803,10 +811,8 @@ partition_router_visitor(Plan *plan, void *context) lc3 = lnext(lc3); } - prouter = make_partition_router((Plan *) lfirst(lc1), relid, - modify_table->nominalRelation, - modify_table->epqParam, - returning_list); + prouter = make_partition_router((Plan *) lfirst(lc1), + modify_table->epqParam); pfilter = make_partition_filter((Plan *) prouter, relid, modify_table->nominalRelation, @@ -817,6 +823,8 @@ partition_router_visitor(Plan *plan, void *context) lfirst(lc1) = pfilter; } } + + return make_partition_overseer(plan); } From f18aa524276ea3a960afc1412cdbc326cd097e7c Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Tue, 18 Sep 2018 19:57:09 +0300 Subject: [PATCH 346/528] Fix updating using Overseer node (still has errors) --- expected/pathman_update_node.out | 42 ++++++++++---------- src/include/partition_router.h | 30 ++++---------- src/partition_overseer.c | 68 ++++++++++++++++++++++++++++++-- src/partition_router.c | 45 --------------------- src/planner_tree_modification.c | 17 +++++--- 5 files changed, 104 insertions(+), 98 deletions(-) diff --git a/expected/pathman_update_node.out b/expected/pathman_update_node.out index 4f379e05..120b42c4 100644 --- a/expected/pathman_update_node.out +++ b/expected/pathman_update_node.out @@ -15,29 +15,31 @@ SELECT create_range_partitions('test_update_node.test_range', 'val', 1, 10); /* Moving from 2st to 1st partition */ EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 5 WHERE val = 15; - QUERY PLAN -------------------------------------------------------------------- - Update on test_range_2 - -> Custom Scan (PartitionFilter) - -> Custom Scan (PartitionRouter) - -> Bitmap Heap Scan on test_range_2 - Recheck Cond: (val = '15'::numeric) - -> Bitmap Index Scan on test_range_2_val_idx - Index Cond: (val = '15'::numeric) -(7 rows) + QUERY PLAN +------------------------------------------------------------------------- + Custom Scan (PartitionOverseer) + -> Update on test_range_2 + -> Custom Scan (PartitionFilter) + -> Custom Scan (PartitionRouter) + -> Bitmap Heap Scan on test_range_2 + Recheck Cond: (val = '15'::numeric) + -> Bitmap Index Scan on test_range_2_val_idx + Index Cond: (val = '15'::numeric) +(8 rows) /* Keep same partition */ EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 14 WHERE val = 15; - QUERY PLAN -------------------------------------------------------------------- - Update on test_range_2 - -> Custom Scan (PartitionFilter) - -> Custom Scan (PartitionRouter) - -> Bitmap Heap Scan on test_range_2 - Recheck Cond: (val = '15'::numeric) - -> Bitmap Index Scan on test_range_2_val_idx - Index Cond: (val = '15'::numeric) -(7 rows) + QUERY PLAN +------------------------------------------------------------------------- + Custom Scan (PartitionOverseer) + -> Update on test_range_2 + -> Custom Scan (PartitionFilter) + -> Custom Scan (PartitionRouter) + -> Bitmap Heap Scan on test_range_2 + Recheck Cond: (val = '15'::numeric) + -> Bitmap Index Scan on test_range_2_val_idx + Index Cond: (val = '15'::numeric) +(8 rows) /* Update values in 1st partition (rows remain there) */ UPDATE test_update_node.test_range SET val = 5 WHERE val <= 10; diff --git a/src/include/partition_router.h b/src/include/partition_router.h index a07bde60..8240d13b 100644 --- a/src/include/partition_router.h +++ b/src/include/partition_router.h @@ -56,43 +56,27 @@ extern CustomScanMethods partition_router_plan_methods; extern CustomExecMethods partition_router_exec_methods; -#define IsPartitionRouterPlan(node) \ - ( \ - IsA((node), CustomScan) && \ - (((CustomScan *) (node))->methods == &partition_router_plan_methods) \ - ) - #define IsPartitionRouterState(node) \ ( \ IsA((node), CustomScanState) && \ (((CustomScanState *) (node))->methods == &partition_router_exec_methods) \ ) -#define IsPartitionRouter(node) \ - ( IsPartitionRouterPlan(node) || IsPartitionRouterState(node) ) - +/* Highlight hacks with ModifyTable's fields */ +#define MTHackField(mt_state, field) ( (mt_state)->field ) void init_partition_router_static_data(void); - -Plan *make_partition_router(Plan *subplan, int epq_param); - -void prepare_modify_table_for_partition_router(PlanState *state, void *context); - - -Node *partition_router_create_scan_state(CustomScan *node); - +void prepare_modify_table_for_partition_router(PlanState *state, + void *context); void partition_router_begin(CustomScanState *node, EState *estate, int eflags); - -TupleTableSlot *partition_router_exec(CustomScanState *node); - void partition_router_end(CustomScanState *node); - void partition_router_rescan(CustomScanState *node); - void partition_router_explain(CustomScanState *node, List *ancestors, ExplainState *es); -TupleTableSlot *partition_router_run_modify_table(PlanState *state); +Plan *make_partition_router(Plan *subplan, int epq_param); +Node *partition_router_create_scan_state(CustomScan *node); +TupleTableSlot *partition_router_exec(CustomScanState *node); #endif /* PARTITION_UPDATE_H */ diff --git a/src/partition_overseer.c b/src/partition_overseer.c index 52eea377..5178150d 100644 --- a/src/partition_overseer.c +++ b/src/partition_overseer.c @@ -1,8 +1,9 @@ #include "postgres.h" -#include "partition_overseer.h" #include "partition_filter.h" +#include "partition_overseer.h" #include "partition_router.h" +#include "planner_tree_modification.h" CustomScanMethods partition_overseer_plan_methods; CustomExecMethods partition_overseer_exec_methods; @@ -64,6 +65,30 @@ partition_overseer_create_scan_state(CustomScan *node) return (Node *) state; } +static void +set_mt_state_for_router(PlanState *state, void *context) +{ + if (IsA(state, ModifyTableState)) + { + ModifyTableState *mt_state = (ModifyTableState *) state; + int i; + + for (i = 0; i < mt_state->mt_nplans; i++) + { + CustomScanState *pf_state = (CustomScanState *) mt_state->mt_plans[i]; + PartitionRouterState *pr_state; + + /* Check if this is a PartitionFilter + PartitionRouter combo */ + if (IsPartitionFilterState(pf_state) && + IsPartitionRouterState(pr_state = linitial(pf_state->custom_ps))) + { + /* HACK: point to ModifyTable in PartitionRouter */ + pr_state->mt_state = mt_state; + } + } + } +} + void partition_overseer_begin(CustomScanState *node, EState *estate, @@ -74,13 +99,48 @@ partition_overseer_begin(CustomScanState *node, /* It's convenient to store PlanState in 'custom_ps' */ node->custom_ps = list_make1(ExecInitNode(plan, estate, eflags)); + + /* Save ModifyTableState in PartitionRouterState structs */ + state_tree_visitor((PlanState *) linitial(node->custom_ps), + set_mt_state_for_router, + NULL); } TupleTableSlot * partition_overseer_exec(CustomScanState *node) { - PlanState *state = linitial(node->custom_ps); - return partition_router_run_modify_table(state); + ModifyTableState *mt_state = linitial(node->custom_ps); + + TupleTableSlot *slot; + int mt_plans_old, + mt_plans_new; + + /* Get initial signal */ + mt_plans_old = mt_state->mt_nplans; + +restart: + /* Fetch next tuple */ + slot = ExecProcNode((PlanState *) mt_state); + + /* Get current signal */ + mt_plans_new = MTHackField(mt_state, mt_nplans); + + /* Did PartitionRouter ask us to restart? */ + if (mt_plans_new != mt_plans_old) + { + /* Signal points to current plan */ + int state_idx = -mt_plans_new; + + /* HACK: partially restore ModifyTable's state */ + MTHackField(mt_state, mt_done) = false; + MTHackField(mt_state, mt_nplans) = mt_plans_old; + MTHackField(mt_state, mt_whichplan) = state_idx; + + /* Restart ModifyTable */ + goto restart; + } + + return slot; } void @@ -101,5 +161,5 @@ partition_overseer_explain(CustomScanState *node, List *ancestors, ExplainState *es) { - /* nothing to do */ + /* Nothing to do here now */ } diff --git a/src/partition_router.c b/src/partition_router.c index 53349730..3ac1ece6 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -26,10 +26,6 @@ #include "utils/rel.h" -/* Highlight hacks with ModifyTable's fields */ -#define MTHackField(mt_state, field) ( (mt_state)->field ) - - #define MTDisableStmtTriggers(mt_state, pr_state) \ do { \ TriggerDesc *triggers = (mt_state)->resultRelInfo->ri_TrigDesc; \ @@ -143,7 +139,6 @@ partition_router_create_scan_state(CustomScan *node) state = (PartitionRouterState *) palloc0(sizeof(PartitionRouterState)); NodeSetTag(state, T_CustomScanState); - state = (PartitionRouterState *) makeNode(CustomScanState); state->css.flags = node->flags; state->css.methods = &partition_router_exec_methods; @@ -246,46 +241,6 @@ partition_router_explain(CustomScanState *node, /* Nothing to do here now */ } - -/* Smart wrapper over ModifyTable */ -TupleTableSlot * -partition_router_run_modify_table(PlanState *state) -{ - ModifyTableState *mt_state; - TupleTableSlot *slot; - int mt_plans_old, - mt_plans_new; - - mt_state = (ModifyTableState *) state; - - /* Get initial signal */ - mt_plans_old = mt_state->mt_nplans; - -restart: - /* Fetch next tuple */ - slot = ExecProcNode(state); - - /* Get current signal */ - mt_plans_new = MTHackField(mt_state, mt_nplans); - - /* Did PartitionRouter ask us to restart? */ - if (mt_plans_new != mt_plans_old) - { - /* Signal points to current plan */ - int state_idx = -mt_plans_new; - - /* HACK: partially restore ModifyTable's state */ - MTHackField(mt_state, mt_done) = false; - MTHackField(mt_state, mt_nplans) = mt_plans_old; - MTHackField(mt_state, mt_whichplan) = state_idx; - - /* Restart ModifyTable */ - goto restart; - } - - return slot; -} - /* Return tuple OR yield it and change ModifyTable's operation */ static TupleTableSlot * router_set_slot(PartitionRouterState *state, diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 6b453256..a3b06873 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -765,11 +765,12 @@ partition_filter_visitor(Plan *plan, void *context) static Plan * partition_router_visitor(Plan *plan, void *context) { - List *rtable = (List *) context; - ModifyTable *modify_table = (ModifyTable *) plan; - ListCell *lc1, - *lc2, - *lc3; + List *rtable = (List *) context; + ModifyTable *modify_table = (ModifyTable *) plan; + ListCell *lc1, + *lc2, + *lc3; + bool changed = false; /* Skip if not ModifyTable with 'UPDATE' command */ if (!IsA(modify_table, ModifyTable) || modify_table->operation != CMD_UPDATE) @@ -821,10 +822,14 @@ partition_router_visitor(Plan *plan, void *context) returning_list); lfirst(lc1) = pfilter; + changed = true; } } - return make_partition_overseer(plan); + if (changed) + return make_partition_overseer(plan); + + return NULL; } From 9d7980aebfc4767a0ada9ae8fb5835bf7fab1481 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Wed, 19 Sep 2018 13:19:16 +0300 Subject: [PATCH 347/528] Fix partition router running --- src/partition_overseer.c | 41 ++++++++++++++++++++-------------------- src/partition_router.c | 25 ++++++------------------ 2 files changed, 26 insertions(+), 40 deletions(-) diff --git a/src/partition_overseer.c b/src/partition_overseer.c index 5178150d..2456f6aa 100644 --- a/src/partition_overseer.c +++ b/src/partition_overseer.c @@ -68,25 +68,24 @@ partition_overseer_create_scan_state(CustomScan *node) static void set_mt_state_for_router(PlanState *state, void *context) { - if (IsA(state, ModifyTableState)) - { - ModifyTableState *mt_state = (ModifyTableState *) state; - int i; - - for (i = 0; i < mt_state->mt_nplans; i++) - { - CustomScanState *pf_state = (CustomScanState *) mt_state->mt_plans[i]; - PartitionRouterState *pr_state; - - /* Check if this is a PartitionFilter + PartitionRouter combo */ - if (IsPartitionFilterState(pf_state) && - IsPartitionRouterState(pr_state = linitial(pf_state->custom_ps))) - { - /* HACK: point to ModifyTable in PartitionRouter */ - pr_state->mt_state = mt_state; - } - } - } + ModifyTableState *mt_state = (ModifyTableState *) state; + + if (!IsA(state, ModifyTableState)) + return; + + for (int i = 0; i < mt_state->mt_nplans; i++) + { + CustomScanState *pf_state = (CustomScanState *) mt_state->mt_plans[i]; + PartitionRouterState *pr_state; + + /* Check if this is a PartitionFilter + PartitionRouter combo */ + if (IsPartitionFilterState(pf_state) && + IsPartitionRouterState(pr_state = linitial(pf_state->custom_ps))) + { + /* HACK: point to ModifyTable in PartitionRouter */ + pr_state->mt_state = mt_state; + } + } } void @@ -119,7 +118,7 @@ partition_overseer_exec(CustomScanState *node) mt_plans_old = mt_state->mt_nplans; restart: - /* Fetch next tuple */ + /* Run ModifyTable */ slot = ExecProcNode((PlanState *) mt_state); /* Get current signal */ @@ -136,7 +135,7 @@ partition_overseer_exec(CustomScanState *node) MTHackField(mt_state, mt_nplans) = mt_plans_old; MTHackField(mt_state, mt_whichplan) = state_idx; - /* Restart ModifyTable */ + /* Rerun ModifyTable */ goto restart; } diff --git a/src/partition_router.c b/src/partition_router.c index 3ac1ece6..82578c5d 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -63,7 +63,6 @@ static TupleTableSlot *router_set_slot(PartitionRouterState *state, static TupleTableSlot *router_get_slot(PartitionRouterState *state, bool *should_process); -static void router_lazy_init_junkfilter(PartitionRouterState *state); static void router_lazy_init_constraint(PartitionRouterState *state); static ItemPointerData router_extract_ctid(PartitionRouterState *state, @@ -185,8 +184,9 @@ partition_router_exec(CustomScanState *node) ItemPointerSetInvalid(&ctid); - /* Build new junkfilter lazily */ - router_lazy_init_junkfilter(state); + /* Build new junkfilter if needed */ + if (state->junkfilter == NULL) + state->junkfilter = state->current_rri->ri_junkFilter; /* Build recheck constraint state lazily */ router_lazy_init_constraint(state); @@ -257,15 +257,14 @@ router_set_slot(PartitionRouterState *state, MTHackField(mt_state, mt_nplans) = -mt_state->mt_whichplan; MTHackField(mt_state, operation) = operation; + /* HACK: disable AFTER STATEMENT triggers */ + MTDisableStmtTriggers(mt_state, state); + if (!TupIsNull(slot)) { /* We should've cached junk filter already */ Assert(state->junkfilter); - /* HACK: disable AFTER STATEMENT triggers */ - MTDisableStmtTriggers(mt_state, state); - - /* HACK: conditionally disable junk filter in result relation */ state->current_rri->ri_junkFilter = (operation == CMD_UPDATE) ? state->junkfilter : @@ -276,11 +275,6 @@ router_set_slot(PartitionRouterState *state, slot->tts_tupleDescriptor); ExecCopySlot(state->yielded_slot, slot); } - else - { - /* HACK: enable AFTER STATEMENT triggers */ - MTEnableStmtTriggers(mt_state, state); - } /* Yield */ state->yielded = true; @@ -324,13 +318,6 @@ router_get_slot(PartitionRouterState *state, return slot; } -static void -router_lazy_init_junkfilter(PartitionRouterState *state) -{ - if (state->junkfilter == NULL) - state->junkfilter = state->current_rri->ri_junkFilter; -} - static void router_lazy_init_constraint(PartitionRouterState *state) { From 5ab36a6b61adc8b49ce858775418e9cebed9474b Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Wed, 19 Sep 2018 16:02:00 +0300 Subject: [PATCH 348/528] Fix python tests --- tests/python/partitioning_test.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index f2b2ea51..cb1282c6 100644 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -1022,6 +1022,12 @@ def test_update_node_plan1(self): plan = con.execute('SELECT query_plan(\'%s\')' % test_query)[0][0] plan = plan[0]["Plan"] + # PartitionOverseer + self.assertEqual(plan["Node Type"], "Custom Scan") + self.assertEqual(plan["Custom Plan Provider"], 'PartitionOverseer') + + # ModifyTable + plan = plan["Plans"][0] self.assertEqual(plan["Node Type"], "ModifyTable") self.assertEqual(plan["Operation"], "Update") self.assertEqual(plan["Relation Name"], "test_range") From 60268e8e712e6c56c6d3efa080ca45305f35e730 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Wed, 19 Sep 2018 17:31:21 +0300 Subject: [PATCH 349/528] Return get_pathman_lib_version as deprecated function --- init.sql | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/init.sql b/init.sql index 12546cca..6fd6a0c7 100644 --- a/init.sql +++ b/init.sql @@ -847,3 +847,8 @@ LANGUAGE C STRICT; CREATE OR REPLACE FUNCTION @extschema@.pathman_version() RETURNS CSTRING AS 'pg_pathman', 'pathman_version' LANGUAGE C STRICT; + +-- deprecated +CREATE OR REPLACE FUNCTION public.get_pathman_lib_version() +RETURNS CSTRING AS 'pg_pathman', 'pathman_version' +LANGUAGE C STRICT; From 77ab2c4283ff543525b9556e12670494cd312e4b Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Wed, 19 Sep 2018 17:45:19 +0300 Subject: [PATCH 350/528] Add first revision of migration file --- pg_pathman--1.4--1.5.sql | 833 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 833 insertions(+) create mode 100644 pg_pathman--1.4--1.5.sql diff --git a/pg_pathman--1.4--1.5.sql b/pg_pathman--1.4--1.5.sql new file mode 100644 index 00000000..8b02dcf4 --- /dev/null +++ b/pg_pathman--1.4--1.5.sql @@ -0,0 +1,833 @@ +/* + * Drop triggers + */ +CREATE OR REPLACE FUNCTION @extschema@.drop_triggers( + parent_relid REGCLASS) +RETURNS VOID AS $$ +DECLARE + triggername TEXT; + relation OID; + +BEGIN + triggername := concat(parent_relid::text, '_upd_trig'); + + /* Drop trigger for each partition if exists */ + FOR relation IN (SELECT pg_catalog.pg_inherits.inhrelid + FROM pg_catalog.pg_inherits + JOIN pg_catalog.pg_trigger ON inhrelid = tgrelid + WHERE inhparent = parent_relid AND tgname = triggername) + LOOP + EXECUTE format('DROP TRIGGER IF EXISTS %s ON %s', + triggername, + relation::REGCLASS); + END LOOP; + + /* Drop trigger on parent */ + IF EXISTS (SELECT * FROM pg_catalog.pg_trigger + WHERE tgname = triggername AND tgrelid = parent_relid) + THEN + EXECUTE format('DROP TRIGGER IF EXISTS %s ON %s', + triggername, + parent_relid::TEXT); + END IF; +END +$$ LANGUAGE plpgsql STRICT; + +DO $$ +DECLARE r record; +BEGIN + FOR r IN SELECT parent_relid FROM @extschema@.pathman_config + LOOP + PERFORM @extschema@.drop_triggers(r.parent_relid); + + END LOOP; +END$$; + +/* + * Add new partition + */ +CREATE OR REPLACE FUNCTION @extschema@.add_range_partition( + parent_relid REGCLASS, + start_value ANYELEMENT, + end_value ANYELEMENT, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS $$ +DECLARE + part_name TEXT; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Acquire lock on parent's scheme */ + PERFORM @extschema@.prevent_part_modification(parent_relid); + + IF start_value >= end_value THEN + RAISE EXCEPTION 'failed to create partition: start_value is greater than end_value'; + END IF; + + /* Check range overlap */ + IF @extschema@.get_number_of_partitions(parent_relid) > 0 THEN + PERFORM @extschema@.check_range_available(parent_relid, + start_value, + end_value); + END IF; + + /* Create new partition */ + part_name := @extschema@.create_single_range_partition(parent_relid, + start_value, + end_value, + partition_name, + tablespace); + + RETURN part_name; +END +$$ LANGUAGE plpgsql; + +/* + * Append new partition. + */ +CREATE OR REPLACE FUNCTION @extschema@.append_range_partition( + parent_relid REGCLASS, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS $$ +DECLARE + part_expr_type REGTYPE; + part_name TEXT; + part_interval TEXT; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Acquire lock on parent's scheme */ + PERFORM @extschema@.prevent_part_modification(parent_relid); + + part_expr_type := @extschema@.get_partition_key_type(parent_relid); + + IF NOT @extschema@.is_date_type(part_expr_type) AND + NOT @extschema@.is_operator_supported(part_expr_type, '+') THEN + RAISE EXCEPTION 'type % does not support ''+'' operator', part_expr_type::REGTYPE; + END IF; + + SELECT range_interval + FROM @extschema@.pathman_config + WHERE partrel = parent_relid + INTO part_interval; + + EXECUTE + format('SELECT @extschema@.append_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', + @extschema@.get_base_type(part_expr_type)::TEXT) + USING + parent_relid, + part_expr_type, + part_interval, + partition_name, + tablespace + INTO + part_name; + + RETURN part_name; +END +$$ LANGUAGE plpgsql; + +/* + * Attach range partition + */ +CREATE OR REPLACE FUNCTION @extschema@.attach_range_partition( + parent_relid REGCLASS, + partition_relid REGCLASS, + start_value ANYELEMENT, + end_value ANYELEMENT) +RETURNS TEXT AS $$ +DECLARE + part_expr TEXT; + part_type INTEGER; + rel_persistence CHAR; + v_init_callback REGPROCEDURE; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_relname(partition_relid); + + /* Acquire lock on parent's scheme */ + PERFORM @extschema@.prevent_part_modification(parent_relid); + + /* Ignore temporary tables */ + SELECT relpersistence FROM pg_catalog.pg_class + WHERE oid = partition_relid INTO rel_persistence; + + IF rel_persistence = 't'::CHAR THEN + RAISE EXCEPTION 'temporary table "%" cannot be used as a partition', + partition_relid::TEXT; + END IF; + + /* Check range overlap */ + PERFORM @extschema@.check_range_available(parent_relid, start_value, end_value); + + IF NOT @extschema@.is_tuple_convertible(parent_relid, partition_relid) THEN + RAISE EXCEPTION 'partition must have a compatible tuple format'; + END IF; + + part_expr := @extschema@.get_partition_key(parent_relid); + part_type := @extschema@.get_partition_type(parent_relid); + + IF part_expr IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + /* Check if this is a RANGE partition */ + IF part_type != 2 THEN + RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; + END IF; + + /* Set inheritance */ + EXECUTE format('ALTER TABLE %s INHERIT %s', partition_relid, parent_relid); + + /* Set check constraint */ + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', + partition_relid::TEXT, + @extschema@.build_check_constraint_name(partition_relid), + @extschema@.build_range_condition(partition_relid, + part_expr, + start_value, + end_value)); + + /* Fetch init_callback from 'params' table */ + WITH stub_callback(stub) as (values (0)) + SELECT init_callback + FROM stub_callback + LEFT JOIN @extschema@.pathman_config_params AS params + ON params.partrel = parent_relid + INTO v_init_callback; + + /* Invoke an initialization callback */ + PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, + partition_relid, + v_init_callback, + start_value, + end_value); + + RETURN partition_relid; +END +$$ LANGUAGE plpgsql; + +/* + * Create a naming sequence for partitioned table. + */ +CREATE OR REPLACE FUNCTION @extschema@.create_naming_sequence( + parent_relid REGCLASS) +RETURNS TEXT AS $$ +DECLARE + seq_name TEXT; + +BEGIN + seq_name := @extschema@.build_sequence_name(parent_relid); + + EXECUTE format('DROP SEQUENCE IF EXISTS %s', seq_name); + EXECUTE format('CREATE SEQUENCE %s START 1', seq_name); + + RETURN seq_name; +END +$$ LANGUAGE plpgsql +SET client_min_messages = WARNING; /* mute NOTICE message */ + +/* + * Creates RANGE partitions for specified relation based on datetime attribute + */ +CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( + parent_relid REGCLASS, + expression TEXT, + start_value ANYELEMENT, + p_interval INTERVAL, + p_count INTEGER DEFAULT NULL, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS $$ +DECLARE + rows_count BIGINT; + max_value start_value%TYPE; + cur_value start_value%TYPE := start_value; + end_value start_value%TYPE; + part_count INTEGER := 0; + i INTEGER; + +BEGIN + PERFORM @extschema@.prepare_for_partitioning(parent_relid, + expression, + partition_data); + + IF p_count < 0 THEN + RAISE EXCEPTION '"p_count" must not be less than 0'; + END IF; + + /* Try to determine partitions count if not set */ + IF p_count IS NULL THEN + EXECUTE format('SELECT count(*), max(%s) FROM %s', expression, parent_relid) + INTO rows_count, max_value; + + IF rows_count = 0 THEN + RAISE EXCEPTION 'cannot determine partitions count for empty table'; + END IF; + + p_count := 0; + WHILE cur_value <= max_value + LOOP + cur_value := cur_value + p_interval; + p_count := p_count + 1; + END LOOP; + END IF; + + /* + * In case when user doesn't want to automatically create partitions + * and specifies partition count as 0 then do not check boundaries + */ + IF p_count != 0 THEN + /* Compute right bound of partitioning through additions */ + end_value := start_value; + FOR i IN 1..p_count + LOOP + end_value := end_value + p_interval; + END LOOP; + + /* Check boundaries */ + PERFORM @extschema@.check_boundaries(parent_relid, + expression, + start_value, + end_value); + END IF; + + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_naming_sequence(parent_relid); + + /* Insert new entry to pathman config */ + PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, + p_interval::TEXT); + + IF p_count != 0 THEN + part_count := @extschema@.create_range_partitions_internal( + parent_relid, + @extschema@.generate_range_bounds(start_value, + p_interval, + p_count), + NULL, + NULL); + END IF; + + /* Relocate data if asked to */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN part_count; +END +$$ LANGUAGE plpgsql; + +/* + * Creates RANGE partitions for specified relation based on numerical expression + */ +CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( + parent_relid REGCLASS, + expression TEXT, + start_value ANYELEMENT, + p_interval ANYELEMENT, + p_count INTEGER DEFAULT NULL, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS $$ +DECLARE + rows_count BIGINT; + max_value start_value%TYPE; + cur_value start_value%TYPE := start_value; + end_value start_value%TYPE; + part_count INTEGER := 0; + i INTEGER; + +BEGIN + PERFORM @extschema@.prepare_for_partitioning(parent_relid, + expression, + partition_data); + + IF p_count < 0 THEN + RAISE EXCEPTION 'partitions count must not be less than zero'; + END IF; + + /* Try to determine partitions count if not set */ + IF p_count IS NULL THEN + EXECUTE format('SELECT count(*), max(%s) FROM %s', expression, parent_relid) + INTO rows_count, max_value; + + IF rows_count = 0 THEN + RAISE EXCEPTION 'cannot determine partitions count for empty table'; + END IF; + + IF max_value IS NULL THEN + RAISE EXCEPTION 'expression "%" can return NULL values', expression; + END IF; + + p_count := 0; + WHILE cur_value <= max_value + LOOP + cur_value := cur_value + p_interval; + p_count := p_count + 1; + END LOOP; + END IF; + + /* + * In case when user doesn't want to automatically create partitions + * and specifies partition count as 0 then do not check boundaries + */ + IF p_count != 0 THEN + /* Compute right bound of partitioning through additions */ + end_value := start_value; + FOR i IN 1..p_count + LOOP + end_value := end_value + p_interval; + END LOOP; + + /* Check boundaries */ + PERFORM @extschema@.check_boundaries(parent_relid, + expression, + start_value, + end_value); + END IF; + + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_naming_sequence(parent_relid); + + /* Insert new entry to pathman config */ + PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, + p_interval::TEXT); + + IF p_count != 0 THEN + part_count := @extschema@.create_range_partitions_internal( + parent_relid, + @extschema@.generate_range_bounds(start_value, + p_interval, + p_count), + NULL, + NULL); + END IF; + + /* Relocate data if asked to */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN p_count; +END +$$ LANGUAGE plpgsql; + +/* + * Creates RANGE partitions for specified relation based on bounds array + */ +CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( + parent_relid REGCLASS, + expression TEXT, + bounds ANYARRAY, + partition_names TEXT[] DEFAULT NULL, + tablespaces TEXT[] DEFAULT NULL, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS $$ +DECLARE + part_count INTEGER := 0; + +BEGIN + IF array_ndims(bounds) > 1 THEN + RAISE EXCEPTION 'Bounds array must be a one dimensional array'; + END IF; + + IF array_length(bounds, 1) < 2 THEN + RAISE EXCEPTION 'Bounds array must have at least two values'; + END IF; + + PERFORM @extschema@.prepare_for_partitioning(parent_relid, + expression, + partition_data); + + /* Check boundaries */ + PERFORM @extschema@.check_boundaries(parent_relid, + expression, + bounds[1], + bounds[array_length(bounds, 1)]); + + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_naming_sequence(parent_relid); + + /* Insert new entry to pathman config */ + PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, NULL); + + /* Create partitions */ + part_count := @extschema@.create_range_partitions_internal(parent_relid, + bounds, + partition_names, + tablespaces); + + /* Relocate data if asked to */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN part_count; +END +$$ +LANGUAGE plpgsql; + +/* + * Detach range partition + */ +CREATE OR REPLACE FUNCTION @extschema@.detach_range_partition( + partition_relid REGCLASS) +RETURNS TEXT AS $$ +DECLARE + parent_relid REGCLASS; + part_type INTEGER; + +BEGIN + parent_relid := @extschema@.get_parent_of_partition(partition_relid); + + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_relname(partition_relid); + + /* Acquire lock on partition's scheme */ + PERFORM @extschema@.prevent_part_modification(partition_relid); + + /* Acquire lock on parent */ + PERFORM @extschema@.prevent_data_modification(parent_relid); + + part_type := @extschema@.get_partition_type(parent_relid); + + /* Check if this is a RANGE partition */ + IF part_type != 2 THEN + RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; + END IF; + + /* Remove inheritance */ + EXECUTE format('ALTER TABLE %s NO INHERIT %s', + partition_relid::TEXT, + parent_relid::TEXT); + + /* Remove check constraint */ + EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', + partition_relid::TEXT, + @extschema@.build_check_constraint_name(partition_relid)); + + RETURN partition_relid; +END +$$ LANGUAGE plpgsql; + +/* + * Disable pathman partitioning for specified relation. + */ +CREATE OR REPLACE FUNCTION @extschema@.disable_pathman_for( + parent_relid REGCLASS) +RETURNS VOID AS $$ +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Delete rows from both config tables */ + DELETE FROM @extschema@.pathman_config WHERE partrel = parent_relid; + DELETE FROM @extschema@.pathman_config_params WHERE partrel = parent_relid; +END +$$ LANGUAGE plpgsql STRICT; + +/* + * Drop a naming sequence for partitioned table. + */ +CREATE OR REPLACE FUNCTION @extschema@.drop_naming_sequence( + parent_relid REGCLASS) +RETURNS VOID AS $$ +DECLARE + seq_name TEXT; + +BEGIN + seq_name := @extschema@.build_sequence_name(parent_relid); + + EXECUTE format('DROP SEQUENCE IF EXISTS %s', seq_name); +END +$$ LANGUAGE plpgsql +SET client_min_messages = WARNING; /* mute NOTICE message */ + +/* + * Drop partitions. If delete_data set to TRUE, partitions + * will be dropped with all the data. + */ +CREATE OR REPLACE FUNCTION @extschema@.drop_partitions( + parent_relid REGCLASS, + delete_data BOOLEAN DEFAULT FALSE) +RETURNS INTEGER AS $$ +DECLARE + child REGCLASS; + rows_count BIGINT; + part_count INTEGER := 0; + rel_kind CHAR; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Acquire data modification lock */ + PERFORM @extschema@.prevent_data_modification(parent_relid); + + IF NOT EXISTS (SELECT FROM @extschema@.pathman_config + WHERE partrel = parent_relid) THEN + RAISE EXCEPTION 'table "%" has no partitions', parent_relid::TEXT; + END IF; + + /* Also drop naming sequence */ + PERFORM @extschema@.drop_naming_sequence(parent_relid); + + FOR child IN (SELECT inhrelid::REGCLASS + FROM pg_catalog.pg_inherits + WHERE inhparent::regclass = parent_relid + ORDER BY inhrelid ASC) + LOOP + IF NOT delete_data THEN + EXECUTE format('INSERT INTO %s SELECT * FROM %s', + parent_relid::TEXT, + child::TEXT); + GET DIAGNOSTICS rows_count = ROW_COUNT; + + /* Show number of copied rows */ + RAISE NOTICE '% rows copied from %', rows_count, child; + END IF; + + SELECT relkind FROM pg_catalog.pg_class + WHERE oid = child + INTO rel_kind; + + /* + * Determine the kind of child relation. It can be either a regular + * table (r) or a foreign table (f). Depending on relkind we use + * DROP TABLE or DROP FOREIGN TABLE. + */ + IF rel_kind = 'f' THEN + EXECUTE format('DROP FOREIGN TABLE %s', child); + ELSE + EXECUTE format('DROP TABLE %s', child); + END IF; + + part_count := part_count + 1; + END LOOP; + + /* Finally delete both config entries */ + DELETE FROM @extschema@.pathman_config WHERE partrel = parent_relid; + DELETE FROM @extschema@.pathman_config_params WHERE partrel = parent_relid; + + RETURN part_count; +END +$$ LANGUAGE plpgsql +SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is OFF */ + +/* + * Drop range partition + */ +CREATE OR REPLACE FUNCTION @extschema@.drop_range_partition( + partition_relid REGCLASS, + delete_data BOOLEAN DEFAULT TRUE) +RETURNS TEXT AS $$ +DECLARE + parent_relid REGCLASS; + part_name TEXT; + part_type INTEGER; + v_relkind CHAR; + v_rows BIGINT; + +BEGIN + parent_relid := @extschema@.get_parent_of_partition(partition_relid); + + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_relname(partition_relid); + + part_name := partition_relid::TEXT; /* save the name to be returned */ + part_type := @extschema@.get_partition_type(parent_relid); + + /* Check if this is a RANGE partition */ + IF part_type != 2 THEN + RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; + END IF; + + /* Acquire lock on parent's scheme */ + PERFORM @extschema@.prevent_part_modification(parent_relid); + + IF NOT delete_data THEN + EXECUTE format('INSERT INTO %s SELECT * FROM %s', + parent_relid::TEXT, + partition_relid::TEXT); + GET DIAGNOSTICS v_rows = ROW_COUNT; + + /* Show number of copied rows */ + RAISE NOTICE '% rows copied from %', v_rows, partition_relid::TEXT; + END IF; + + SELECT relkind FROM pg_catalog.pg_class + WHERE oid = partition_relid + INTO v_relkind; + + /* + * Determine the kind of child relation. It can be either regular + * table (r) or foreign table (f). Depending on relkind we use + * DROP TABLE or DROP FOREIGN TABLE. + */ + IF v_relkind = 'f' THEN + EXECUTE format('DROP FOREIGN TABLE %s', partition_relid::TEXT); + ELSE + EXECUTE format('DROP TABLE %s', partition_relid::TEXT); + END IF; + + RETURN part_name; +END +$$ LANGUAGE plpgsql +SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is OFF */ + +-- deprecated +CREATE OR REPLACE FUNCTION public.get_pathman_lib_version() +RETURNS CSTRING AS 'pg_pathman', 'pathman_version' +LANGUAGE C STRICT; + +/* + * Get number of partitions managed by pg_pathman. + */ +CREATE OR REPLACE FUNCTION @extschema@.get_number_of_partitions( + parent_relid REGCLASS) +RETURNS INT4 AS +$$ + SELECT count(*)::INT4 + FROM pg_catalog.pg_inherits + WHERE inhparent = parent_relid; +$$ +LANGUAGE sql STRICT; + +/* + * Get partitioning key. + */ +CREATE OR REPLACE FUNCTION @extschema@.get_partition_key( + parent_relid REGCLASS) +RETURNS TEXT AS +$$ + SELECT expr + FROM @extschema@.pathman_config + WHERE partrel = parent_relid; +$$ +LANGUAGE sql STRICT; + +/* + * Get partitioning key type. + */ +CREATE OR REPLACE FUNCTION @extschema@.get_partition_key_type( + parent_relid REGCLASS) +RETURNS REGTYPE AS 'pg_pathman', 'get_partition_key_type_pl' +LANGUAGE C STRICT; + +/* + * Get partitioning type. + */ +CREATE OR REPLACE FUNCTION @extschema@.get_partition_type( + parent_relid REGCLASS) +RETURNS INT4 AS +$$ + SELECT parttype + FROM @extschema@.pathman_config + WHERE partrel = parent_relid; +$$ +LANGUAGE sql STRICT; + +/* + * Merge RANGE partitions. + */ +DROP FUNCTION public.merge_range_partitions(regclass[]); +DROP FUNCTION public.merge_range_partitions(regclass, regclass); + +CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions( + variadic partitions REGCLASS[]) +RETURNS REGCLASS AS 'pg_pathman', 'merge_range_partitions' +LANGUAGE C STRICT; + +/* + * Prepend new partition. + */ +CREATE OR REPLACE FUNCTION @extschema@.prepend_range_partition( + parent_relid REGCLASS, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS $$ +DECLARE + part_expr_type REGTYPE; + part_name TEXT; + part_interval TEXT; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Acquire lock on parent's scheme */ + PERFORM @extschema@.prevent_part_modification(parent_relid); + + part_expr_type := @extschema@.get_partition_key_type(parent_relid); + + IF NOT @extschema@.is_date_type(part_expr_type) AND + NOT @extschema@.is_operator_supported(part_expr_type, '-') THEN + RAISE EXCEPTION 'type % does not support ''-'' operator', part_expr_type::REGTYPE; + END IF; + + SELECT range_interval + FROM @extschema@.pathman_config + WHERE partrel = parent_relid + INTO part_interval; + + EXECUTE + format('SELECT @extschema@.prepend_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', + @extschema@.get_base_type(part_expr_type)::TEXT) + USING + parent_relid, + part_expr_type, + part_interval, + partition_name, + tablespace + INTO + part_name; + + RETURN part_name; +END +$$ LANGUAGE plpgsql; + +/* + * Show all existing concurrent partitioning tasks. + */ +CREATE OR REPLACE FUNCTION @extschema@.show_concurrent_part_tasks() +RETURNS TABLE ( + userid REGROLE, + pid INT, + dbid OID, + relid REGCLASS, + processed INT8, + status TEXT) +AS 'pg_pathman', 'show_concurrent_part_tasks_internal' +LANGUAGE C STRICT; + +/* + * Split RANGE partition in two using a pivot. + */ +DROP FUNCTION public.split_range_partition(regclass, anyelement, text, text, OUT anyarray); +CREATE OR REPLACE FUNCTION @extschema@.split_range_partition( + partition_relid REGCLASS, + split_value ANYELEMENT, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS REGCLASS AS 'pg_pathman', 'split_range_partition' +LANGUAGE C; + +ALTER TABLE public.pathman_concurrent_part_tasks + ALTER COLUMN processed SET TYPE bigint; + +DROP FUNCTION @extschema@.build_update_trigger_func_name(regclass); +DROP FUNCTION @extschema@.build_update_trigger_name(regclass); +DROP FUNCTION @extschema@.create_single_update_trigger(regclass, regclass); +DROP FUNCTION @extschema@.create_update_triggers(regclass); +DROP FUNCTION @extschema@.drop_triggers(regclass); +DROP FUNCTION @extschema@.has_update_trigger(regclass); +DROP FUNCTION @extschema@.pathman_update_trigger_func(); From 4f932ab2c734a72e5021107609fd5a2914546b7c Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Wed, 19 Sep 2018 18:40:20 +0300 Subject: [PATCH 351/528] Fix migration script --- Makefile | 3 +- pg_pathman--1.4--1.5.sql | 68 +++++++++------------------------------- 2 files changed, 17 insertions(+), 54 deletions(-) diff --git a/Makefile b/Makefile index 7ba97cbd..efd0cbc5 100644 --- a/Makefile +++ b/Makefile @@ -25,7 +25,8 @@ DATA_built = pg_pathman--$(EXTVERSION).sql DATA = pg_pathman--1.0--1.1.sql \ pg_pathman--1.1--1.2.sql \ pg_pathman--1.2--1.3.sql \ - pg_pathman--1.3--1.4.sql + pg_pathman--1.3--1.4.sql \ + pg_pathman--1.4--1.5.sql PGFILEDESC = "pg_pathman - partitioning tool for PostgreSQL" diff --git a/pg_pathman--1.4--1.5.sql b/pg_pathman--1.4--1.5.sql index 8b02dcf4..cdda146b 100644 --- a/pg_pathman--1.4--1.5.sql +++ b/pg_pathman--1.4--1.5.sql @@ -1,47 +1,3 @@ -/* - * Drop triggers - */ -CREATE OR REPLACE FUNCTION @extschema@.drop_triggers( - parent_relid REGCLASS) -RETURNS VOID AS $$ -DECLARE - triggername TEXT; - relation OID; - -BEGIN - triggername := concat(parent_relid::text, '_upd_trig'); - - /* Drop trigger for each partition if exists */ - FOR relation IN (SELECT pg_catalog.pg_inherits.inhrelid - FROM pg_catalog.pg_inherits - JOIN pg_catalog.pg_trigger ON inhrelid = tgrelid - WHERE inhparent = parent_relid AND tgname = triggername) - LOOP - EXECUTE format('DROP TRIGGER IF EXISTS %s ON %s', - triggername, - relation::REGCLASS); - END LOOP; - - /* Drop trigger on parent */ - IF EXISTS (SELECT * FROM pg_catalog.pg_trigger - WHERE tgname = triggername AND tgrelid = parent_relid) - THEN - EXECUTE format('DROP TRIGGER IF EXISTS %s ON %s', - triggername, - parent_relid::TEXT); - END IF; -END -$$ LANGUAGE plpgsql STRICT; - -DO $$ -DECLARE r record; -BEGIN - FOR r IN SELECT parent_relid FROM @extschema@.pathman_config - LOOP - PERFORM @extschema@.drop_triggers(r.parent_relid); + - END LOOP; -END$$; - /* * Add new partition */ @@ -685,8 +641,7 @@ END $$ LANGUAGE plpgsql SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is OFF */ --- deprecated -CREATE OR REPLACE FUNCTION public.get_pathman_lib_version() +CREATE FUNCTION @extschema@.pathman_version() RETURNS CSTRING AS 'pg_pathman', 'pathman_version' LANGUAGE C STRICT; @@ -706,7 +661,8 @@ LANGUAGE sql STRICT; /* * Get partitioning key. */ -CREATE OR REPLACE FUNCTION @extschema@.get_partition_key( +DROP FUNCTION @extschema@.get_partition_key(REGCLASS); +CREATE FUNCTION @extschema@.get_partition_key( parent_relid REGCLASS) RETURNS TEXT AS $$ @@ -719,7 +675,8 @@ LANGUAGE sql STRICT; /* * Get partitioning key type. */ -CREATE OR REPLACE FUNCTION @extschema@.get_partition_key_type( +DROP FUNCTION @extschema@.get_partition_key_type(REGCLASS); +CREATE FUNCTION @extschema@.get_partition_key_type( parent_relid REGCLASS) RETURNS REGTYPE AS 'pg_pathman', 'get_partition_key_type_pl' LANGUAGE C STRICT; @@ -727,6 +684,7 @@ LANGUAGE C STRICT; /* * Get partitioning type. */ +DROP FUNCTION @extschema@.get_partition_type(REGCLASS); CREATE OR REPLACE FUNCTION @extschema@.get_partition_type( parent_relid REGCLASS) RETURNS INT4 AS @@ -798,7 +756,9 @@ $$ LANGUAGE plpgsql; /* * Show all existing concurrent partitioning tasks. */ -CREATE OR REPLACE FUNCTION @extschema@.show_concurrent_part_tasks() +DROP VIEW @extschema@.pathman_concurrent_part_tasks; +DROP FUNCTION @extschema@.show_concurrent_part_tasks(); +CREATE FUNCTION @extschema@.show_concurrent_part_tasks() RETURNS TABLE ( userid REGROLE, pid INT, @@ -809,6 +769,10 @@ RETURNS TABLE ( AS 'pg_pathman', 'show_concurrent_part_tasks_internal' LANGUAGE C STRICT; +CREATE VIEW @extschema@.pathman_concurrent_part_tasks +AS SELECT * FROM @extschema@.show_concurrent_part_tasks(); +GRANT SELECT ON @extschema@.pathman_concurrent_part_tasks TO PUBLIC; + /* * Split RANGE partition in two using a pivot. */ @@ -821,13 +785,11 @@ CREATE OR REPLACE FUNCTION @extschema@.split_range_partition( RETURNS REGCLASS AS 'pg_pathman', 'split_range_partition' LANGUAGE C; -ALTER TABLE public.pathman_concurrent_part_tasks - ALTER COLUMN processed SET TYPE bigint; - DROP FUNCTION @extschema@.build_update_trigger_func_name(regclass); DROP FUNCTION @extschema@.build_update_trigger_name(regclass); DROP FUNCTION @extschema@.create_single_update_trigger(regclass, regclass); DROP FUNCTION @extschema@.create_update_triggers(regclass); DROP FUNCTION @extschema@.drop_triggers(regclass); DROP FUNCTION @extschema@.has_update_trigger(regclass); -DROP FUNCTION @extschema@.pathman_update_trigger_func(); +DROP FUNCTION @extschema@.pathman_update_trigger_func() CASCADE; +DROP FUNCTION @extschema@.get_pathman_lib_version(); From fc4463e77ea8b809409d6815aad24615b42df4ab Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Thu, 20 Sep 2018 12:36:58 +0300 Subject: [PATCH 352/528] Remove get_pathman_lib_version --- init.sql | 5 ----- 1 file changed, 5 deletions(-) diff --git a/init.sql b/init.sql index 6fd6a0c7..12546cca 100644 --- a/init.sql +++ b/init.sql @@ -847,8 +847,3 @@ LANGUAGE C STRICT; CREATE OR REPLACE FUNCTION @extschema@.pathman_version() RETURNS CSTRING AS 'pg_pathman', 'pathman_version' LANGUAGE C STRICT; - --- deprecated -CREATE OR REPLACE FUNCTION public.get_pathman_lib_version() -RETURNS CSTRING AS 'pg_pathman', 'pathman_version' -LANGUAGE C STRICT; From b5f7633b99dcc8a3d8cc596e575f377b547220f5 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Thu, 20 Sep 2018 14:08:00 +0300 Subject: [PATCH 353/528] Fix migration script --- pg_pathman--1.4--1.5.sql | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pg_pathman--1.4--1.5.sql b/pg_pathman--1.4--1.5.sql index cdda146b..fe29a586 100644 --- a/pg_pathman--1.4--1.5.sql +++ b/pg_pathman--1.4--1.5.sql @@ -698,8 +698,8 @@ LANGUAGE sql STRICT; /* * Merge RANGE partitions. */ -DROP FUNCTION public.merge_range_partitions(regclass[]); -DROP FUNCTION public.merge_range_partitions(regclass, regclass); +DROP FUNCTION @extschema@.merge_range_partitions(regclass[]); +DROP FUNCTION @extschema@.merge_range_partitions(regclass, regclass); CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions( variadic partitions REGCLASS[]) @@ -776,7 +776,7 @@ GRANT SELECT ON @extschema@.pathman_concurrent_part_tasks TO PUBLIC; /* * Split RANGE partition in two using a pivot. */ -DROP FUNCTION public.split_range_partition(regclass, anyelement, text, text, OUT anyarray); +DROP FUNCTION @extschema@.split_range_partition(regclass, anyelement, text, text, OUT anyarray); CREATE OR REPLACE FUNCTION @extschema@.split_range_partition( partition_relid REGCLASS, split_value ANYELEMENT, From e9041c78028cff641ccc26dc75651e27bde22087 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Thu, 20 Sep 2018 16:35:47 +0300 Subject: [PATCH 354/528] Remove cooked_expr column from pathman_config --- expected/pathman_basic.out | 16 ++++----- expected/pathman_calamity.out | 18 +++++----- expected/pathman_column_type.out | 35 ++++++++++++------- expected/pathman_permissions.out | 6 ++-- init.sql | 15 +++++--- pg_pathman--1.4--1.5.sql | 27 ++++++++++++++ sql/pathman_calamity.sql | 16 ++++----- sql/pathman_column_type.sql | 10 +++--- src/hooks.c | 3 -- src/include/pathman.h | 3 +- src/include/relation_info.h | 16 +-------- src/init.c | 60 -------------------------------- src/partition_creation.c | 12 +++---- src/pl_funcs.c | 26 +++++++++++--- src/pl_range_funcs.c | 31 ++--------------- src/relation_info.c | 35 ++++++------------- 16 files changed, 132 insertions(+), 197 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index e9950470..3a9e0a65 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1438,16 +1438,16 @@ INSERT INTO test.range_rel (dt) VALUES ('2015-06-01'); */ ALTER TABLE test.range_rel DROP COLUMN data; SELECT * FROM pathman.pathman_config; - partrel | expr | parttype | range_interval | cooked_expr -----------------+------+----------+----------------+------------------------------------------------------------------------------------------------------------------------- - test.range_rel | dt | 2 | @ 10 days | {VAR :varno 1 :varattno 2 :vartype 1114 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 2 :location 8} + partrel | expr | parttype | range_interval +----------------+------+----------+---------------- + test.range_rel | dt | 2 | @ 10 days (1 row) DROP TABLE test.range_rel CASCADE; NOTICE: drop cascades to 21 other objects SELECT * FROM pathman.pathman_config; - partrel | expr | parttype | range_interval | cooked_expr ----------+------+----------+----------------+------------- + partrel | expr | parttype | range_interval +---------+------+----------+---------------- (0 rows) /* Check overlaps */ @@ -1544,9 +1544,9 @@ SELECT pathman.split_range_partition('test."RangeRel_1"', '2015-01-01'::DATE); DROP TABLE test."RangeRel" CASCADE; NOTICE: drop cascades to 6 other objects SELECT * FROM pathman.pathman_config; - partrel | expr | parttype | range_interval | cooked_expr ---------------------+------+----------+----------------+----------------------------------------------------------------------------------------------------------------------- - test.num_range_rel | id | 2 | 1000 | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} + partrel | expr | parttype | range_interval +--------------------+------+----------+---------------- + test.num_range_rel | id | 2 | 1000 (1 row) CREATE TABLE test."RangeRel" ( diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 2889cc80..e28777bf 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -281,21 +281,21 @@ SELECT build_range_condition('calamity.part_test', 'val', NULL, 10); /* OK */ (1 row) /* check function validate_interval_value() */ -SELECT validate_interval_value(1::REGCLASS, 'expr', 2, '1 mon', 'cooked_expr'); /* not ok */ +SELECT validate_interval_value(1::REGCLASS, 'expr', 2, '1 mon'); /* not ok */ ERROR: relation "1" does not exist -SELECT validate_interval_value(NULL, 'expr', 2, '1 mon', 'cooked_expr'); /* not ok */ +SELECT validate_interval_value(NULL, 'expr', 2, '1 mon'); /* not ok */ ERROR: 'partrel' should not be NULL -SELECT validate_interval_value('pg_class', NULL, 2, '1 mon', 'cooked_expr'); /* not ok */ +SELECT validate_interval_value('pg_class', NULL, 2, '1 mon'); /* not ok */ ERROR: 'expression' should not be NULL -SELECT validate_interval_value('pg_class', 'relname', NULL, '1 mon', 'cooked_expr'); /* not ok */ +SELECT validate_interval_value('pg_class', 'relname', NULL, '1 mon'); /* not ok */ ERROR: 'parttype' should not be NULL -SELECT validate_interval_value('pg_class', 'relname', 1, 'HASH', NULL); /* not ok */ +SELECT validate_interval_value('pg_class', 'relname', 1, 'HASH'); /* not ok */ ERROR: interval should be NULL for HASH partitioned table -SELECT validate_interval_value('pg_class', 'expr', 2, '1 mon', NULL); /* not ok */ +SELECT validate_interval_value('pg_class', 'expr', 2, '1 mon'); /* not ok */ ERROR: failed to analyze partitioning expression "expr" -SELECT validate_interval_value('pg_class', 'expr', 2, NULL, 'cooked_expr'); /* not ok */ -ERROR: unrecognized token: "cooked_expr" -SELECT validate_interval_value('pg_class', 'EXPR', 1, 'HASH', NULL); /* not ok */ +SELECT validate_interval_value('pg_class', 'expr', 2, NULL); /* not ok */ +ERROR: failed to analyze partitioning expression "expr" +SELECT validate_interval_value('pg_class', 'EXPR', 1, 'HASH'); /* not ok */ ERROR: failed to analyze partitioning expression "EXPR" /* check function validate_relname() */ SELECT validate_relname('calamity.part_test'); diff --git a/expected/pathman_column_type.out b/expected/pathman_column_type.out index eacdb97a..d3022d77 100644 --- a/expected/pathman_column_type.out +++ b/expected/pathman_column_type.out @@ -29,12 +29,30 @@ SELECT context, entries FROM pathman_cache_stats ORDER BY context; (4 rows) /* change column's type (should flush caches) */ +SELECT get_partition_cooked_key('test_column_type.test'::REGCLASS); + get_partition_cooked_key +----------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} +(1 row) + +SELECT get_partition_key_type('test_column_type.test'::REGCLASS); + get_partition_key_type +------------------------ + integer +(1 row) + ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; -/* check that parsed expression was cleared */ -SELECT partrel, cooked_expr FROM pathman_config; - partrel | cooked_expr ------------------------+------------- - test_column_type.test | +/* check that expression has been built */ +SELECT get_partition_key_type('test_column_type.test'::REGCLASS); + get_partition_key_type +------------------------ + numeric +(1 row) + +SELECT get_partition_cooked_key('test_column_type.test'::REGCLASS); + get_partition_cooked_key +------------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 1700 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} (1 row) /* make sure that everything works properly */ @@ -43,13 +61,6 @@ SELECT * FROM test_column_type.test; ----- (0 rows) -/* check that expression has been built */ -SELECT partrel, cooked_expr FROM pathman_config; - partrel | cooked_expr ------------------------+------------------------------------------------------------------------------------------------------------------------- - test_column_type.test | {VAR :varno 1 :varattno 1 :vartype 1700 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} -(1 row) - SELECT context, entries FROM pathman_cache_stats ORDER BY context; context | entries -------------------------+--------- diff --git a/expected/pathman_permissions.out b/expected/pathman_permissions.out index 388fc2bc..d03588c7 100644 --- a/expected/pathman_permissions.out +++ b/expected/pathman_permissions.out @@ -44,9 +44,9 @@ SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); /* Should be able to see */ SET ROLE user2; SELECT * FROM pathman_config; - partrel | expr | parttype | range_interval | cooked_expr --------------------------+------+----------+----------------+----------------------------------------------------------------------------------------------------------------------- - permissions.user1_table | id | 2 | 10 | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} + partrel | expr | parttype | range_interval +-------------------------+------+----------+---------------- + permissions.user1_table | id | 2 | 10 (1 row) SELECT * FROM pathman_config_params; diff --git a/init.sql b/init.sql index 12546cca..fdb774db 100644 --- a/init.sql +++ b/init.sql @@ -18,8 +18,7 @@ CREATE OR REPLACE FUNCTION @extschema@.validate_interval_value( partrel REGCLASS, expr TEXT, parttype INTEGER, - range_interval TEXT, - cooked_expr TEXT) + range_interval TEXT) RETURNS BOOL AS 'pg_pathman', 'validate_interval_value' LANGUAGE C; @@ -37,7 +36,6 @@ CREATE TABLE IF NOT EXISTS @extschema@.pathman_config ( expr TEXT NOT NULL, parttype INTEGER NOT NULL, range_interval TEXT DEFAULT NULL, - cooked_expr TEXT DEFAULT NULL, /* check for allowed part types */ CONSTRAINT pathman_config_parttype_check CHECK (parttype IN (1, 2)), @@ -47,8 +45,7 @@ CREATE TABLE IF NOT EXISTS @extschema@.pathman_config ( CHECK (@extschema@.validate_interval_value(partrel, expr, parttype, - range_interval, - cooked_expr)) + range_interval)) ); @@ -674,6 +671,14 @@ CREATE OR REPLACE FUNCTION @extschema@.get_partition_key_type( RETURNS REGTYPE AS 'pg_pathman', 'get_partition_key_type_pl' LANGUAGE C STRICT; +/* + * Get parsed and analyzed expression. + */ +CREATE OR REPLACE FUNCTION @extschema@.get_partition_cooked_key( + parent_relid REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'get_partition_cooked_key_pl' +LANGUAGE C STRICT; + /* * Get partitioning type. */ diff --git a/pg_pathman--1.4--1.5.sql b/pg_pathman--1.4--1.5.sql index fe29a586..a8e7fb21 100644 --- a/pg_pathman--1.4--1.5.sql +++ b/pg_pathman--1.4--1.5.sql @@ -1,3 +1,30 @@ +ALTER TABLE @extschema@.pathman_config DROP CONSTRAINT pathman_config_interval_check; + +DROP FUNCTION @extschema@.validate_interval_value(REGCLASS, TEXT, INTEGER, + TEXT, TEXT); +CREATE OR REPLACE FUNCTION @extschema@.validate_interval_value( + partrel REGCLASS, + expr TEXT, + parttype INTEGER, + range_interval TEXT) +RETURNS BOOL AS 'pg_pathman', 'validate_interval_value' +LANGUAGE C; + +ALTER TABLE @extschema@.pathman_config DROP COLUMN cooked_expr; +ALTER TABLE @extschema@.pathman_config ADD CONSTRAINT pathman_config_interval_check + CHECK (@extschema@.validate_interval_value(partrel, + expr, + parttype, + range_interval)); + +/* + * Get parsed and analyzed expression. + */ +CREATE OR REPLACE FUNCTION @extschema@.get_partition_cooked_key( + parent_relid REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'get_partition_cooked_key_pl' +LANGUAGE C STRICT; + /* * Add new partition */ diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index 1c48138e..51827887 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -132,14 +132,14 @@ SELECT build_range_condition('calamity.part_test', 'val', 10, NULL); /* OK */ SELECT build_range_condition('calamity.part_test', 'val', NULL, 10); /* OK */ /* check function validate_interval_value() */ -SELECT validate_interval_value(1::REGCLASS, 'expr', 2, '1 mon', 'cooked_expr'); /* not ok */ -SELECT validate_interval_value(NULL, 'expr', 2, '1 mon', 'cooked_expr'); /* not ok */ -SELECT validate_interval_value('pg_class', NULL, 2, '1 mon', 'cooked_expr'); /* not ok */ -SELECT validate_interval_value('pg_class', 'relname', NULL, '1 mon', 'cooked_expr'); /* not ok */ -SELECT validate_interval_value('pg_class', 'relname', 1, 'HASH', NULL); /* not ok */ -SELECT validate_interval_value('pg_class', 'expr', 2, '1 mon', NULL); /* not ok */ -SELECT validate_interval_value('pg_class', 'expr', 2, NULL, 'cooked_expr'); /* not ok */ -SELECT validate_interval_value('pg_class', 'EXPR', 1, 'HASH', NULL); /* not ok */ +SELECT validate_interval_value(1::REGCLASS, 'expr', 2, '1 mon'); /* not ok */ +SELECT validate_interval_value(NULL, 'expr', 2, '1 mon'); /* not ok */ +SELECT validate_interval_value('pg_class', NULL, 2, '1 mon'); /* not ok */ +SELECT validate_interval_value('pg_class', 'relname', NULL, '1 mon'); /* not ok */ +SELECT validate_interval_value('pg_class', 'relname', 1, 'HASH'); /* not ok */ +SELECT validate_interval_value('pg_class', 'expr', 2, '1 mon'); /* not ok */ +SELECT validate_interval_value('pg_class', 'expr', 2, NULL); /* not ok */ +SELECT validate_interval_value('pg_class', 'EXPR', 1, 'HASH'); /* not ok */ /* check function validate_relname() */ SELECT validate_relname('calamity.part_test'); diff --git a/sql/pathman_column_type.sql b/sql/pathman_column_type.sql index 47d38cc5..ab2b43f1 100644 --- a/sql/pathman_column_type.sql +++ b/sql/pathman_column_type.sql @@ -18,17 +18,17 @@ SELECT * FROM test_column_type.test; SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* change column's type (should flush caches) */ +SELECT get_partition_cooked_key('test_column_type.test'::REGCLASS); +SELECT get_partition_key_type('test_column_type.test'::REGCLASS); ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; -/* check that parsed expression was cleared */ -SELECT partrel, cooked_expr FROM pathman_config; +/* check that expression has been built */ +SELECT get_partition_key_type('test_column_type.test'::REGCLASS); +SELECT get_partition_cooked_key('test_column_type.test'::REGCLASS); /* make sure that everything works properly */ SELECT * FROM test_column_type.test; -/* check that expression has been built */ -SELECT partrel, cooked_expr FROM pathman_config; - SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* check insert dispatching */ diff --git a/src/hooks.c b/src/hooks.c index b8c7a194..5cd3e14c 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -949,9 +949,6 @@ pathman_process_utility_hook(Node *first_arg, " of table \"%s\" partitioned by HASH", get_attname_compat(relation_oid, attr_number), get_rel_name(relation_oid)))); - - /* Don't forget to invalidate parsed partitioning expression */ - pathman_config_invalidate_parsed_expression(relation_oid); } } diff --git a/src/include/pathman.h b/src/include/pathman.h index b5f9a156..b9acfe59 100644 --- a/src/include/pathman.h +++ b/src/include/pathman.h @@ -45,12 +45,11 @@ * Definitions for the "pathman_config" table. */ #define PATHMAN_CONFIG "pathman_config" -#define Natts_pathman_config 5 +#define Natts_pathman_config 4 #define Anum_pathman_config_partrel 1 /* partitioned relation (regclass) */ #define Anum_pathman_config_expr 2 /* partition expression (original) */ #define Anum_pathman_config_parttype 3 /* partitioning type (1|2) */ #define Anum_pathman_config_range_interval 4 /* interval for RANGE pt. (text) */ -#define Anum_pathman_config_cooked_expr 5 /* parsed partitioning expression (text) */ /* type modifier (typmod) for 'range_interval' */ #define PATHMAN_CONFIG_interval_typmod -1 diff --git a/src/include/relation_info.h b/src/include/relation_info.h index f3faa3d3..6b9ffa92 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -384,27 +384,13 @@ Node *parse_partitioning_expression(const Oid relid, char **query_string_out, Node **parsetree_out); -Datum cook_partitioning_expression(const Oid relid, +Node *cook_partitioning_expression(const Oid relid, const char *expr_cstr, Oid *expr_type); char *canonicalize_partitioning_expression(const Oid relid, const char *expr_cstr); -/* Partitioning expression routines */ -Node *parse_partitioning_expression(const Oid relid, - const char *expr_cstr, - char **query_string_out, - Node **parsetree_out); - -Datum cook_partitioning_expression(const Oid relid, - const char *expr_cstr, - Oid *expr_type); - -char *canonicalize_partitioning_expression(const Oid relid, - const char *expr_cstr); - - /* Global invalidation routines */ void delay_pathman_shutdown(void); void finish_delayed_invalidation(void); diff --git a/src/init.c b/src/init.c index 9e15628e..f6ddbdae 100644 --- a/src/init.c +++ b/src/init.c @@ -675,66 +675,6 @@ pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, return contains_rel; } -/* Invalidate parsed partitioning expression in PATHMAN_CONFIG */ -void -pathman_config_invalidate_parsed_expression(Oid relid) -{ - ItemPointerData iptr; /* pointer to tuple */ - Datum values[Natts_pathman_config]; - bool nulls[Natts_pathman_config]; - - /* Check that PATHMAN_CONFIG table contains this relation */ - if (pathman_config_contains_relation(relid, values, nulls, NULL, &iptr)) - { - Relation rel; - HeapTuple new_htup; - - /* Reset parsed expression */ - values[Anum_pathman_config_cooked_expr - 1] = (Datum) 0; - nulls[Anum_pathman_config_cooked_expr - 1] = true; - - rel = heap_open(get_pathman_config_relid(false), RowExclusiveLock); - - /* Form new tuple and perform an update */ - new_htup = heap_form_tuple(RelationGetDescr(rel), values, nulls); - CatalogTupleUpdate(rel, &iptr, new_htup); - heap_freetuple(new_htup); - - heap_close(rel, RowExclusiveLock); - } -} - -/* Refresh parsed partitioning expression in PATHMAN_CONFIG */ -void -pathman_config_refresh_parsed_expression(Oid relid, - Datum *values, - bool *isnull, - ItemPointer iptr) -{ - char *expr_cstr; - Datum expr_datum; - - Relation rel; - HeapTuple htup_new; - - /* get and parse expression */ - expr_cstr = TextDatumGetCString(values[Anum_pathman_config_expr - 1]); - expr_datum = cook_partitioning_expression(relid, expr_cstr, NULL); - pfree(expr_cstr); - - /* prepare tuple values */ - values[Anum_pathman_config_cooked_expr - 1] = expr_datum; - isnull[Anum_pathman_config_cooked_expr - 1] = false; - - rel = heap_open(get_pathman_config_relid(false), RowExclusiveLock); - - htup_new = heap_form_tuple(RelationGetDescr(rel), values, isnull); - CatalogTupleUpdate(rel, iptr, htup_new); - - heap_close(rel, RowExclusiveLock); -} - - /* * Loads additional pathman parameters like 'enable_parent' * or 'auto' from PATHMAN_CONFIG_PARAMS. diff --git a/src/partition_creation.c b/src/partition_creation.c index 1ddc39e1..fc950c4f 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1854,20 +1854,15 @@ build_partitioning_expression(Oid parent_relid, expr_cstr = TextDatumGetCString(values[Anum_pathman_config_expr - 1]); expr = parse_partitioning_expression(parent_relid, expr_cstr, NULL, NULL); - pfree(expr_cstr); /* We need expression type for hash functions */ if (expr_type) { - char *expr_p_cstr; - - /* We can safely assume that this field will always remain not null */ - Assert(!isnull[Anum_pathman_config_cooked_expr - 1]); - expr_p_cstr = - TextDatumGetCString(values[Anum_pathman_config_cooked_expr - 1]); + Node *expr; + expr = cook_partitioning_expression(parent_relid, expr_cstr, NULL); /* Finally return expression type */ - *expr_type = exprType(stringToNode(expr_p_cstr)); + *expr_type = exprType(expr); } if (columns) @@ -1877,5 +1872,6 @@ build_partitioning_expression(Oid parent_relid, extract_column_names(expr, columns); } + pfree(expr_cstr); return expr; } diff --git a/src/pl_funcs.c b/src/pl_funcs.c index b90619e0..44a5f93f 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -47,6 +47,7 @@ PG_FUNCTION_INFO_V1( get_number_of_partitions_pl ); PG_FUNCTION_INFO_V1( get_partition_key_type_pl ); +PG_FUNCTION_INFO_V1( get_partition_cooked_key_pl ); PG_FUNCTION_INFO_V1( get_parent_of_partition_pl ); PG_FUNCTION_INFO_V1( get_base_type_pl ); PG_FUNCTION_INFO_V1( get_tablespace_pl ); @@ -140,6 +141,25 @@ get_partition_key_type_pl(PG_FUNCTION_ARGS) PG_RETURN_OID(typid); } +/* + * Return partition key type. + */ +Datum +get_partition_cooked_key_pl(PG_FUNCTION_ARGS) +{ + Oid relid = PG_GETARG_OID(0); + PartRelationInfo *prel; + Datum res; + + prel = get_pathman_relation_info(relid); + shout_if_prel_is_invalid(relid, prel, PT_ANY); + + res = CStringGetTextDatum(nodeToString(prel->expr)); + close_pathman_relation_info(prel); + + PG_RETURN_TEXT_P(res); +} + /* * Extract basic type of a domain. */ @@ -685,7 +705,6 @@ add_to_pathman_config(PG_FUNCTION_ARGS) HeapTuple htup; Oid expr_type; - Datum expr_datum; PathmanInitState init_state; @@ -750,7 +769,7 @@ add_to_pathman_config(PG_FUNCTION_ARGS) } /* Parse and check expression */ - expr_datum = cook_partitioning_expression(relid, expression, &expr_type); + cook_partitioning_expression(relid, expression, &expr_type); /* Canonicalize user's expression (trim whitespaces etc) */ expression = canonicalize_partitioning_expression(relid, expression); @@ -778,9 +797,6 @@ add_to_pathman_config(PG_FUNCTION_ARGS) values[Anum_pathman_config_expr - 1] = CStringGetTextDatum(expression); isnull[Anum_pathman_config_expr - 1] = false; - values[Anum_pathman_config_cooked_expr - 1] = expr_datum; - isnull[Anum_pathman_config_cooked_expr - 1] = false; - /* Insert new row into PATHMAN_CONFIG */ pathman_config = heap_open(get_pathman_config_relid(false), RowExclusiveLock); diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index f8f52e9d..351926f7 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -400,7 +400,6 @@ validate_interval_value(PG_FUNCTION_ARGS) #define ARG_EXPRESSION 1 #define ARG_PARTTYPE 2 #define ARG_RANGE_INTERVAL 3 -#define ARG_EXPRESSION_P 4 Oid partrel; PartType parttype; @@ -433,35 +432,9 @@ validate_interval_value(PG_FUNCTION_ARGS) else parttype = DatumGetPartType(PG_GETARG_DATUM(ARG_PARTTYPE)); /* - * Fetch partitioning expression's type using - * either user's expression or parsed expression. - * - * NOTE: we check number of function's arguments - * in case of late updates (e.g. 1.1 => 1.4). + * Try to parse partitioning expression, could fail with ERROR. */ - if (PG_ARGISNULL(ARG_EXPRESSION_P) || PG_NARGS() <= ARG_EXPRESSION_P) - { - Datum expr_datum; - - /* We'll have to parse expression with our own hands */ - expr_datum = cook_partitioning_expression(partrel, expr_cstr, &expr_type); - - /* Free both expressions */ - pfree(DatumGetPointer(expr_datum)); - pfree(expr_cstr); - } - else - { - char *expr_p_cstr; - - /* Good, let's use a cached parsed expression */ - expr_p_cstr = TextDatumGetCString(PG_GETARG_TEXT_P(ARG_EXPRESSION_P)); - expr_type = exprType(stringToNode(expr_p_cstr)); - - /* Free both expressions */ - pfree(expr_p_cstr); - pfree(expr_cstr); - } + cook_partitioning_expression(partrel, expr_cstr, &expr_type); /* * NULL interval is fine for both HASH and RANGE. diff --git a/src/relation_info.c b/src/relation_info.c index 386008d2..8ee74217 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -340,19 +340,12 @@ get_pathman_relation_info(Oid relid) bool isnull[Natts_pathman_config]; bool found; - /* Check if PATHMAN_CONFIG table contains this relation */ + /* + * Check if PATHMAN_CONFIG table contains this relation and + * build a partitioned table cache entry (might emit ERROR). + */ if (pathman_config_contains_relation(relid, values, isnull, NULL, &iptr)) - { - bool upd_expr = isnull[Anum_pathman_config_cooked_expr - 1]; - - /* Update pending partitioning expression */ - if (upd_expr) - pathman_config_refresh_parsed_expression(relid, values, - isnull, &iptr); - - /* Build a partitioned table cache entry (might emit ERROR) */ prel = build_pathman_relation_info(relid, values); - } /* Create a new entry for this relation */ psin = pathman_cache_search_relid(status_cache, @@ -414,7 +407,6 @@ build_pathman_relation_info(Oid relid, Datum *values) { MemoryContext old_mcxt; const TypeCacheEntry *typcache; - char *expr; Datum param_values[Natts_pathman_config_params]; bool param_isnull[Natts_pathman_config_params]; Oid *prel_children; @@ -428,15 +420,12 @@ build_pathman_relation_info(Oid relid, Datum *values) /* Set partitioning type */ prel->parttype = DatumGetPartType(values[Anum_pathman_config_parttype - 1]); - /* Fetch cooked partitioning expression */ - expr = TextDatumGetCString(values[Anum_pathman_config_cooked_expr - 1]); - /* Switch to persistent memory context */ old_mcxt = MemoryContextSwitchTo(prel->mcxt); /* Build partitioning expression tree */ prel->expr_cstr = TextDatumGetCString(values[Anum_pathman_config_expr - 1]); - prel->expr = (Node *) stringToNode(expr); + prel->expr = cook_partitioning_expression(relid, prel->expr_cstr, NULL); fix_opfuncids(prel->expr); /* Extract Vars and varattnos of partitioning expression */ @@ -1361,18 +1350,16 @@ parse_partitioning_expression(const Oid relid, } /* Parse partitioning expression and return its type and nodeToString() as TEXT */ -Datum +Node * cook_partitioning_expression(const Oid relid, const char *expr_cstr, Oid *expr_type_out) /* ret value #1 */ { + Node *expr; Node *parse_tree; List *query_tree_list; - char *query_string, - *expr_serialized = ""; /* keep compiler happy */ - - Datum expr_datum; + char *query_string; MemoryContext parse_mcxt, old_mcxt; @@ -1400,7 +1387,6 @@ cook_partitioning_expression(const Oid relid, PG_TRY(); { Query *query; - Node *expr; int expr_attr; Relids expr_varnos; Bitmapset *expr_varattnos = NULL; @@ -1478,7 +1464,6 @@ cook_partitioning_expression(const Oid relid, bms_free(expr_varattnos); Assert(expr); - expr_serialized = nodeToString(expr); /* Set 'expr_type_out' if needed */ if (expr_type_out) @@ -1514,12 +1499,12 @@ cook_partitioning_expression(const Oid relid, MemoryContextSwitchTo(old_mcxt); /* Get Datum of serialized expression (right mcxt) */ - expr_datum = CStringGetTextDatum(expr_serialized); + expr = copyObject(expr); /* Free memory */ MemoryContextDelete(parse_mcxt); - return expr_datum; + return expr; } /* Canonicalize user's expression (trim whitespaces etc) */ From d739585de7366ca70acf0c4d50a6a1f1cd228fd1 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Thu, 20 Sep 2018 16:44:43 +0300 Subject: [PATCH 355/528] Fix tests for postgres with version >= 10 --- expected/pathman_basic_1.out | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/expected/pathman_basic_1.out b/expected/pathman_basic_1.out index 692de996..61aed5db 100644 --- a/expected/pathman_basic_1.out +++ b/expected/pathman_basic_1.out @@ -1438,16 +1438,16 @@ INSERT INTO test.range_rel (dt) VALUES ('2015-06-01'); */ ALTER TABLE test.range_rel DROP COLUMN data; SELECT * FROM pathman.pathman_config; - partrel | expr | parttype | range_interval | cooked_expr -----------------+------+----------+----------------+------------------------------------------------------------------------------------------------------------------------- - test.range_rel | dt | 2 | @ 10 days | {VAR :varno 1 :varattno 2 :vartype 1114 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 2 :location 8} + partrel | expr | parttype | range_interval +----------------+------+----------+---------------- + test.range_rel | dt | 2 | @ 10 days (1 row) DROP TABLE test.range_rel CASCADE; NOTICE: drop cascades to 21 other objects SELECT * FROM pathman.pathman_config; - partrel | expr | parttype | range_interval | cooked_expr ----------+------+----------+----------------+------------- + partrel | expr | parttype | range_interval +---------+------+----------+---------------- (0 rows) /* Check overlaps */ @@ -1544,9 +1544,9 @@ SELECT pathman.split_range_partition('test."RangeRel_1"', '2015-01-01'::DATE); DROP TABLE test."RangeRel" CASCADE; NOTICE: drop cascades to 6 other objects SELECT * FROM pathman.pathman_config; - partrel | expr | parttype | range_interval | cooked_expr ---------------------+------+----------+----------------+----------------------------------------------------------------------------------------------------------------------- - test.num_range_rel | id | 2 | 1000 | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} + partrel | expr | parttype | range_interval +--------------------+------+----------+---------------- + test.num_range_rel | id | 2 | 1000 (1 row) CREATE TABLE test."RangeRel" ( From adf5fd776dd7f6ed359a54d0b16c3e39472c4334 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Thu, 20 Sep 2018 18:59:01 +0300 Subject: [PATCH 356/528] Start working on update checking script --- tests/update/check_update.py | 253 +++++++++++++++++++++++++++++++++++ 1 file changed, 253 insertions(+) create mode 100755 tests/update/check_update.py diff --git a/tests/update/check_update.py b/tests/update/check_update.py new file mode 100755 index 00000000..f1ac3cef --- /dev/null +++ b/tests/update/check_update.py @@ -0,0 +1,253 @@ +#!/usr/bin/env python +#coding: utf-8 + +import os +import contextlib +import sys +import argparse +import testgres +import subprocess +import difflib + +repo_dir = os.path.abspath(os.path.join('../..', os.path.dirname(__file__))) + +compilation = ''' +make USE_PGXS=1 clean +make USE_PGXS=1 install +''' + +# just bunch of tables to create +run_sql = ''' +CREATE EXTENSION pg_pathman; + +CREATE TABLE hash_rel ( + id SERIAL PRIMARY KEY, + value INTEGER NOT NULL); +INSERT INTO hash_rel VALUES (1, 1); +INSERT INTO hash_rel VALUES (2, 2); +INSERT INTO hash_rel VALUES (3, 3); + +SELECT create_hash_partitions('hash_rel', 'Value', 3); + +CREATE TABLE range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP, + txt TEXT); +CREATE INDEX ON range_rel (dt); +INSERT INTO range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT create_range_partitions('range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + +CREATE TABLE num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT create_range_partitions('num_range_rel', 'id', 0, 1000, 4); +INSERT INTO num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; + +CREATE TABLE improved_dummy_test1 (id BIGSERIAL, name TEXT NOT NULL); +INSERT INTO improved_dummy_test1 (name) SELECT md5(g::TEXT) FROM generate_series(1, 100) as g; +SELECT create_range_partitions('improved_dummy_test1', 'id', 1, 10); +INSERT INTO improved_dummy_test1 (name) VALUES ('test'); /* spawns new partition */ +ALTER TABLE improved_dummy_1 ADD CHECK (name != 'ib'); /* make improved_dummy_1 disappear */ + +CREATE TABLE test_improved_dummy_test2 (val INT NOT NULL); +SELECT create_range_partitions('test_improved_dummy_test2', 'val', + generate_range_bounds(1, 1, 2), + partition_names := '{p1, p2}'); + +CREATE TABLE insert_into_select(val int NOT NULL); +INSERT INTO insert_into_select SELECT generate_series(1, 100); +SELECT create_range_partitions('insert_into_select', 'val', 1, 20); +CREATE TABLE insert_into_select_copy (LIKE insert_into_select); /* INSERT INTO ... SELECT ... */ + +# just a lot of actions + +SELECT split_range_partition('num_range_rel_1', 500); +SELECT split_range_partition('range_rel_1', '2015-01-15'::DATE); + +/* Merge two partitions into one */ +SELECT merge_range_partitions('num_range_rel_1', 'num_range_rel_' || currval('num_range_rel_seq')); +SELECT merge_range_partitions('range_rel_1', 'range_rel_' || currval('range_rel_seq')); + +/* Append and prepend partitions */ +SELECT append_range_partition('num_range_rel'); +SELECT prepend_range_partition('num_range_rel'); +SELECT drop_range_partition('num_range_rel_7'); + +SELECT drop_range_partition_expand_next('num_range_rel_4'); +SELECT drop_range_partition_expand_next('num_range_rel_6'); + +SELECT append_range_partition('range_rel'); +SELECT prepend_range_partition('range_rel'); +SELECT drop_range_partition('range_rel_7'); +SELECT add_range_partition('range_rel', '2014-12-01'::DATE, '2015-01-02'::DATE); +SELECT add_range_partition('range_rel', '2014-12-01'::DATE, '2015-01-01'::DATE); + +CREATE TABLE range_rel_archive (LIKE range_rel INCLUDING ALL); +SELECT attach_range_partition('range_rel', 'range_rel_archive', '2014-01-01'::DATE, '2015-01-01'::DATE); +SELECT attach_range_partition('range_rel', 'range_rel_archive', '2014-01-01'::DATE, '2014-12-01'::DATE); +SELECT detach_range_partition('range_rel_archive'); + +CREATE TABLE range_rel_test1 ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP, + txt TEXT, + abc INTEGER); +SELECT attach_range_partition('range_rel', 'range_rel_test1', '2013-01-01'::DATE, '2014-01-01'::DATE); +CREATE TABLE range_rel_test2 ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP); +SELECT attach_range_partition('range_rel', 'range_rel_test2', '2013-01-01'::DATE, '2014-01-01'::DATE); + +/* Half open ranges */ +SELECT add_range_partition('range_rel', NULL, '2014-12-01'::DATE, 'range_rel_minus_infinity'); +SELECT add_range_partition('range_rel', '2015-06-01'::DATE, NULL, 'range_rel_plus_infinity'); +SELECT append_range_partition('range_rel'); +SELECT prepend_range_partition('range_rel'); + +CREATE TABLE range_rel_minus_infinity (LIKE range_rel INCLUDING ALL); +SELECT attach_range_partition('range_rel', 'range_rel_minus_infinity', NULL, '2014-12-01'::DATE); +INSERT INTO range_rel (dt) VALUES ('2012-06-15'); +INSERT INTO range_rel (dt) VALUES ('2015-12-15'); + +CREATE TABLE zero( + id SERIAL PRIMARY KEY, + value INT NOT NULL); +INSERT INTO zero SELECT g, g FROM generate_series(1, 100) as g; +SELECT create_range_partitions('zero', 'value', 50, 10, 0); +SELECT append_range_partition('zero', 'zero_0'); +SELECT prepend_range_partition('zero', 'zero_1'); +SELECT add_range_partition('zero', 50, 70, 'zero_50'); +SELECT append_range_partition('zero', 'zero_appended'); +SELECT prepend_range_partition('zero', 'zero_prepended'); +SELECT split_range_partition('zero_50', 60, 'zero_60'); + +CREATE TABLE hash_rel_extern (LIKE hash_rel INCLUDING ALL); +SELECT replace_hash_partition('hash_rel_0', 'hash_rel_extern'); + +-- automatic partitions creation +CREATE TABLE range_rel_test1 ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + data TEXT); +SELECT create_range_partitions('range_rel_test1', 'dt', '2015-01-01'::DATE, '10 days'::INTERVAL, 1); +INSERT INTO range_rel_test1 (dt) +SELECT generate_series('2015-01-01', '2015-04-30', '1 day'::interval); + +INSERT INTO range_rel_test1 (dt) +SELECT generate_series('2014-12-31', '2014-12-01', '-1 day'::interval); + +/* CaMeL cAsE table names and attributes */ +CREATE TABLE "TeSt" (a INT NOT NULL, b INT); +SELECT create_hash_partitions('TeSt', 'a', 3); +SELECT create_hash_partitions('"TeSt"', 'a', 3); +INSERT INTO "TeSt" VALUES (1, 1); +INSERT INTO "TeSt" VALUES (2, 2); +INSERT INTO "TeSt" VALUES (3, 3); + +CREATE TABLE "RangeRel" ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +INSERT INTO "RangeRel" (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-01-03', '1 day'::interval) as g; +SELECT create_range_partitions('"RangeRel"', 'dt', '2015-01-01'::DATE, '1 day'::INTERVAL); +SELECT append_range_partition('"RangeRel"'); +SELECT prepend_range_partition('"RangeRel"'); +SELECT merge_range_partitions('"RangeRel_1"', '"RangeRel_' || currval('"RangeRel_seq"') || '"'); +SELECT split_range_partition('"RangeRel_1"', '2015-01-01'::DATE); + +CREATE TABLE hash_rel_next1 ( + id SERIAL PRIMARY KEY, + value INTEGER NOT NULL); +INSERT INTO hash_rel_next1 (value) SELECT g FROM generate_series(1, 10000) as g; +SELECT create_hash_partitions('hash_rel_next1', 'value', 3); + +CREATE TABLE range_rel_next1 ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + value INTEGER); +INSERT INTO range_rel_next1 (dt, value) SELECT g, extract(day from g) FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') as g; +SELECT create_range_partitions('range_rel_next1', 'dt', '2010-01-01'::date, '1 month'::interval, 12); +SELECT merge_range_partitions('range_rel_1', 'range_rel_2'); +SELECT split_range_partition('range_rel_1', '2010-02-15'::date); +SELECT append_range_partition('range_rel_next1'); +SELECT prepend_range_partition('range_rel_next1'); +''' + +@contextlib.contextmanager +def cwd(path): + print("cwd: ", path) + curdir = os.getcwd() + os.chdir(path) + + try: + yield + finally: + print("cwd:", curdir) + os.chdir(curdir) + +dump1_file = '/tmp/dump1.sql' +dump2_file = '/tmp/dump2.sql' + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='pg_pathman update checker') + parser.add_argument('branches', nargs=2, + help='specify branches ("main rel_1.5")') + + args = parser.parse_args() + + with open('dump_pathman_objects.sql') as f: + dump_sql = f.read() + + with cwd(repo_dir): + subprocess.check_output("git checkout %s" % args.branches[0], shell=True) + subprocess.check_output(compilation, shell=True) + + with testgres.get_new_node('updated') as node: + node.init() + node.append_conf("shared_preload_libraries='pg_pathman'\n") + + node.start() + node.safe_psql('postgres', run_sql) + node.dump(dump1_file, 'postgres') + node.stop() + + subprocess.check_output("git checkout %s" % args.branches[1], shell=True) + subprocess.check_output(compilation, shell=True) + + version = None + with open('pg_pathman.control') as f: + for line in f.readlines(): + if line.startswith('default_version'): + version = line.split('=').strip() + + if version is None: + print("cound not find version in second branch") + exit(1) + + node.start() + node.safe_psql("postgres", "alter extension pg_pathman update to %s" % version) + dumped_objects_old = node.safe_psql("postgres", dump_sql) + node.stop() + + # now make clean install + with testgres.get_new_node('from_scratch') as node: + node.init() + node.append_conf("shared_preload_libraries='pg_pathman'\n") + node.start() + node.safe_psql('postgres', run_sql) + dumped_objects_new = node.safe_psql("postgres", dump_sql) + node.dump(dump2_file, 'postgres') + + # check dumps + node.safe_psql('postgres', 'create database d1') + node.restore(dump1_file, 'd1') + + node.safe_psql('postgres', 'create database d2') + node.restore(dump2_file, 'd2') + node.stop() + + if dumped_objects != dumped_objects_new: + pass From 08eb0f439bb307afafedeab3cdfa14faf5f08b1d Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Thu, 20 Sep 2018 19:44:10 +0300 Subject: [PATCH 357/528] Fix update checking script --- tests/update/check_update.py | 99 +++++++++++++----------------------- 1 file changed, 36 insertions(+), 63 deletions(-) diff --git a/tests/update/check_update.py b/tests/update/check_update.py index f1ac3cef..be5f2aa2 100755 --- a/tests/update/check_update.py +++ b/tests/update/check_update.py @@ -1,6 +1,7 @@ #!/usr/bin/env python #coding: utf-8 +import shutil import os import contextlib import sys @@ -9,7 +10,9 @@ import subprocess import difflib -repo_dir = os.path.abspath(os.path.join('../..', os.path.dirname(__file__))) +my_dir = os.path.dirname(os.path.abspath(__file__)) +repo_dir = os.path.abspath(os.path.join(my_dir, '../../')) +print(repo_dir) compilation = ''' make USE_PGXS=1 clean @@ -31,7 +34,7 @@ CREATE TABLE range_rel ( id SERIAL PRIMARY KEY, - dt TIMESTAMP, + dt TIMESTAMP not null, txt TEXT); CREATE INDEX ON range_rel (dt); INSERT INTO range_rel (dt, txt) @@ -49,7 +52,7 @@ INSERT INTO improved_dummy_test1 (name) SELECT md5(g::TEXT) FROM generate_series(1, 100) as g; SELECT create_range_partitions('improved_dummy_test1', 'id', 1, 10); INSERT INTO improved_dummy_test1 (name) VALUES ('test'); /* spawns new partition */ -ALTER TABLE improved_dummy_1 ADD CHECK (name != 'ib'); /* make improved_dummy_1 disappear */ +ALTER TABLE improved_dummy_test1 ADD CHECK (name != 'ib'); CREATE TABLE test_improved_dummy_test2 (val INT NOT NULL); SELECT create_range_partitions('test_improved_dummy_test2', 'val', @@ -61,7 +64,7 @@ SELECT create_range_partitions('insert_into_select', 'val', 1, 20); CREATE TABLE insert_into_select_copy (LIKE insert_into_select); /* INSERT INTO ... SELECT ... */ -# just a lot of actions +-- just a lot of actions SELECT split_range_partition('num_range_rel_1', 500); SELECT split_range_partition('range_rel_1', '2015-01-15'::DATE); @@ -81,48 +84,13 @@ SELECT append_range_partition('range_rel'); SELECT prepend_range_partition('range_rel'); SELECT drop_range_partition('range_rel_7'); -SELECT add_range_partition('range_rel', '2014-12-01'::DATE, '2015-01-02'::DATE); SELECT add_range_partition('range_rel', '2014-12-01'::DATE, '2015-01-01'::DATE); -CREATE TABLE range_rel_archive (LIKE range_rel INCLUDING ALL); -SELECT attach_range_partition('range_rel', 'range_rel_archive', '2014-01-01'::DATE, '2015-01-01'::DATE); -SELECT attach_range_partition('range_rel', 'range_rel_archive', '2014-01-01'::DATE, '2014-12-01'::DATE); -SELECT detach_range_partition('range_rel_archive'); - -CREATE TABLE range_rel_test1 ( - id SERIAL PRIMARY KEY, - dt TIMESTAMP, - txt TEXT, - abc INTEGER); -SELECT attach_range_partition('range_rel', 'range_rel_test1', '2013-01-01'::DATE, '2014-01-01'::DATE); -CREATE TABLE range_rel_test2 ( - id SERIAL PRIMARY KEY, - dt TIMESTAMP); -SELECT attach_range_partition('range_rel', 'range_rel_test2', '2013-01-01'::DATE, '2014-01-01'::DATE); - -/* Half open ranges */ -SELECT add_range_partition('range_rel', NULL, '2014-12-01'::DATE, 'range_rel_minus_infinity'); -SELECT add_range_partition('range_rel', '2015-06-01'::DATE, NULL, 'range_rel_plus_infinity'); -SELECT append_range_partition('range_rel'); -SELECT prepend_range_partition('range_rel'); - CREATE TABLE range_rel_minus_infinity (LIKE range_rel INCLUDING ALL); SELECT attach_range_partition('range_rel', 'range_rel_minus_infinity', NULL, '2014-12-01'::DATE); INSERT INTO range_rel (dt) VALUES ('2012-06-15'); INSERT INTO range_rel (dt) VALUES ('2015-12-15'); -CREATE TABLE zero( - id SERIAL PRIMARY KEY, - value INT NOT NULL); -INSERT INTO zero SELECT g, g FROM generate_series(1, 100) as g; -SELECT create_range_partitions('zero', 'value', 50, 10, 0); -SELECT append_range_partition('zero', 'zero_0'); -SELECT prepend_range_partition('zero', 'zero_1'); -SELECT add_range_partition('zero', 50, 70, 'zero_50'); -SELECT append_range_partition('zero', 'zero_appended'); -SELECT prepend_range_partition('zero', 'zero_prepended'); -SELECT split_range_partition('zero_50', 60, 'zero_60'); - CREATE TABLE hash_rel_extern (LIKE hash_rel INCLUDING ALL); SELECT replace_hash_partition('hash_rel_0', 'hash_rel_extern'); @@ -140,7 +108,6 @@ /* CaMeL cAsE table names and attributes */ CREATE TABLE "TeSt" (a INT NOT NULL, b INT); -SELECT create_hash_partitions('TeSt', 'a', 3); SELECT create_hash_partitions('"TeSt"', 'a', 3); INSERT INTO "TeSt" VALUES (1, 1); INSERT INTO "TeSt" VALUES (2, 2); @@ -163,17 +130,6 @@ value INTEGER NOT NULL); INSERT INTO hash_rel_next1 (value) SELECT g FROM generate_series(1, 10000) as g; SELECT create_hash_partitions('hash_rel_next1', 'value', 3); - -CREATE TABLE range_rel_next1 ( - id SERIAL PRIMARY KEY, - dt TIMESTAMP NOT NULL, - value INTEGER); -INSERT INTO range_rel_next1 (dt, value) SELECT g, extract(day from g) FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') as g; -SELECT create_range_partitions('range_rel_next1', 'dt', '2010-01-01'::date, '1 month'::interval, 12); -SELECT merge_range_partitions('range_rel_1', 'range_rel_2'); -SELECT split_range_partition('range_rel_1', '2010-02-15'::date); -SELECT append_range_partition('range_rel_next1'); -SELECT prepend_range_partition('range_rel_next1'); ''' @contextlib.contextmanager @@ -188,6 +144,10 @@ def cwd(path): print("cwd:", curdir) os.chdir(curdir) +def shell(cmd): + print(cmd) + subprocess.check_output(cmd, shell=True) + dump1_file = '/tmp/dump1.sql' dump2_file = '/tmp/dump2.sql' @@ -198,12 +158,17 @@ def cwd(path): args = parser.parse_args() - with open('dump_pathman_objects.sql') as f: + with open(os.path.join(my_dir, 'dump_pathman_objects.sql'), 'r') as f: dump_sql = f.read() - with cwd(repo_dir): - subprocess.check_output("git checkout %s" % args.branches[0], shell=True) - subprocess.check_output(compilation, shell=True) + shutil.rmtree('/tmp/pg_pathman') + shutil.copytree(repo_dir, '/tmp/pg_pathman') + + with cwd('/tmp/pg_pathman'): + shell("git clean -fdx") + shell("git reset --hard") + shell("git checkout %s" % args.branches[0]) + shell(compilation) with testgres.get_new_node('updated') as node: node.init() @@ -214,22 +179,24 @@ def cwd(path): node.dump(dump1_file, 'postgres') node.stop() - subprocess.check_output("git checkout %s" % args.branches[1], shell=True) - subprocess.check_output(compilation, shell=True) + shell("git clean -fdx") + shell("git checkout %s" % args.branches[1]) + shell(compilation) version = None with open('pg_pathman.control') as f: for line in f.readlines(): if line.startswith('default_version'): - version = line.split('=').strip() + version = line.split('=')[1].strip() if version is None: print("cound not find version in second branch") exit(1) node.start() - node.safe_psql("postgres", "alter extension pg_pathman update to %s" % version) - dumped_objects_old = node.safe_psql("postgres", dump_sql) + p = subprocess.Popen(["psql", "postgres"], stdin=subprocess.PIPE, + stdout=subprocess.PIPE) + dumped_objects_old = p.communicate(input=dump_sql.encode())[0].decode() node.stop() # now make clean install @@ -238,7 +205,9 @@ def cwd(path): node.append_conf("shared_preload_libraries='pg_pathman'\n") node.start() node.safe_psql('postgres', run_sql) - dumped_objects_new = node.safe_psql("postgres", dump_sql) + p = subprocess.Popen(["psql", "postgres"], stdin=subprocess.PIPE, + stdout=subprocess.PIPE) + dumped_objects_new = p.communicate(input=dump_sql.encode())[0].decode() node.dump(dump2_file, 'postgres') # check dumps @@ -249,5 +218,9 @@ def cwd(path): node.restore(dump2_file, 'd2') node.stop() - if dumped_objects != dumped_objects_new: - pass + if dumped_objects_old != dumped_objects_new: + print("\nDIFF:") + for line in difflib.context_diff(dumped_objects_old.split('\n'), dumped_objects_new.split('\n')): + print(line) + else: + print("\nUPDATE CHECK: ALL GOOD") From 6a089e80f32320e0cb599ada66b8c5b9d5dfe937 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Mon, 24 Sep 2018 15:02:24 +0300 Subject: [PATCH 358/528] Add tests for concurrent updates --- Makefile | 2 +- tests/python/Makefile | 6 ++- tests/python/partitioning_test.py | 67 +++++++++++++++++++++++++++++-- 3 files changed, 70 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index efd0cbc5..7292cd43 100644 --- a/Makefile +++ b/Makefile @@ -93,7 +93,7 @@ isolationcheck: | submake-isolation $(ISOLATIONCHECKS) python_tests: - $(MAKE) -C tests/python partitioning_tests + $(MAKE) -C tests/python partitioning_tests CASE=$(CASE) cmocka_tests: $(MAKE) -C tests/cmocka check diff --git a/tests/python/Makefile b/tests/python/Makefile index ee650ea4..f8a71e41 100644 --- a/tests/python/Makefile +++ b/tests/python/Makefile @@ -1,2 +1,6 @@ partitioning_tests: - python -m unittest --verbose --failfast partitioning_test.py +ifneq ($(CASE),) + python partitioning_test.py Tests.$(CASE) +else + python partitioning_test.py +endif diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index cb1282c6..0e3d1492 100644 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -7,15 +7,18 @@ Copyright (c) 2015-2017, Postgres Professional """ +import functools import json import math +import multiprocessing import os +import random import re import subprocess +import sys import threading import time import unittest -import functools from distutils.version import LooseVersion from testgres import get_new_node, get_pg_version @@ -85,10 +88,17 @@ def set_trace(self, con, command="pg_debug"): p = subprocess.Popen([command], stdin=subprocess.PIPE) p.communicate(str(pid).encode()) - def start_new_pathman_cluster(self, allow_streaming=False, test_data=False): + def start_new_pathman_cluster(self, + allow_streaming=False, + test_data=False, + enable_partitionrouter=False): + node = get_new_node() node.init(allow_streaming=allow_streaming) node.append_conf("shared_preload_libraries='pg_pathman'\n") + if enable_partitionrouter: + node.append_conf("pg_pathman.enable_partitionrouter=on\n") + node.start() node.psql('create extension pg_pathman') @@ -1065,6 +1075,57 @@ def test_update_node_plan1(self): node.psql('postgres', 'DROP SCHEMA test_update_node CASCADE;') node.psql('postgres', 'DROP EXTENSION pg_pathman CASCADE;') + def test_concurrent_updates(self): + ''' + Test whether conncurrent updates work correctly between + partitions. + ''' + + create_sql = ''' + CREATE TABLE test1(id INT, b INT NOT NULL); + INSERT INTO test1 + SELECT i, i FROM generate_series(1, 100) i; + SELECT create_range_partitions('test1', 'b', 1, 5); + ''' + + with self.start_new_pathman_cluster(enable_partitionrouter=True) as node: + node.safe_psql(create_sql) + + pool = multiprocessing.Pool(processes=4) + for count in range(1, 200): + pool.apply_async(make_updates, (node, count, )) + + pool.close() + pool.join() + + # check all data is there and not duplicated + with node.connect() as con: + for i in range(1, 100): + row = con.execute("select count(*) from test1 where id = %d" % i)[0] + self.assertEqual(row[0], 1) + + self.assertEqual(node.execute("select count(*) from test1")[0][0], 100) + + +def make_updates(node, count): + update_sql = ''' + BEGIN; + UPDATE test1 SET b = trunc(random() * 100 + 1) WHERE id in (%s); + COMMIT; + ''' + + with node.connect() as con: + for i in range(count): + rows_to_update = random.randint(20, 50) + ids = set([str(random.randint(1, 100)) for i in range(rows_to_update)]) + con.execute(update_sql % ','.join(ids)) + if __name__ == "__main__": - unittest.main() + if len(sys.argv) > 1: + suite = unittest.TestLoader().loadTestsFromName(sys.argv[1], + module=sys.modules[__name__]) + else: + suite = unittest.TestLoader().loadTestsFromTestCase(Tests) + + unittest.TextTestRunner(verbosity=2, failfast=True).run(suite) From 3d98a8e62c7d72b82408e07474a5091277021e9c Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Mon, 24 Sep 2018 17:25:29 +0300 Subject: [PATCH 359/528] Update README --- README.md | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 2a2796d7..2bf95a2e 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@ The `pg_pathman` module provides optimized partitioning mechanism and functions The extension is compatible with: - * PostgreSQL 9.5, 9.6, 10; + * PostgreSQL 9.5, 9.6, 10, 11; * Postgres Pro Standard 9.5, 9.6; * Postgres Pro Enterprise; @@ -63,7 +63,7 @@ More interesting features are yet to come. Stay tuned! * Effective query planning for partitioned tables (JOINs, subselects etc); * `RuntimeAppend` & `RuntimeMergeAppend` custom plan nodes to pick partitions at runtime; * [`PartitionFilter`](#custom-plan-nodes): an efficient drop-in replacement for INSERT triggers; - * [`PartitionRouter`](#custom-plan-nodes) for cross-partition UPDATE queries (instead of triggers); + * [`PartitionRouter`](#custom-plan-nodes) and [`PartitionOverseer`](#custom-plan-nodes) for cross-partition UPDATE queries (instead of triggers); * Automatic partition creation for new INSERTed data (only for RANGE partitioning); * Improved `COPY FROM` statement that is able to insert rows directly into partitions; * [User-defined callbacks](#additional-parameters) for partition creation event handling; @@ -105,7 +105,7 @@ In order to update pg_pathman: 3. Execute the following queries: ```plpgsql -/* only required for major releases, e.g. 1.3 -> 1.4 */ +/* only required for major releases, e.g. 1.4 -> 1.5 */ ALTER EXTENSION pg_pathman UPDATE; SET pg_pathman.enable = t; ``` @@ -417,6 +417,7 @@ Shows memory consumption of various caches. - `RuntimeAppend` (overrides `Append` plan node) - `RuntimeMergeAppend` (overrides `MergeAppend` plan node) - `PartitionFilter` (drop-in replacement for INSERT triggers) +- `PartitionOverseer` (implements cross-partition UPDATEs) - `PartitionRouter` (implements cross-partition UPDATEs) `PartitionFilter` acts as a *proxy node* for INSERT's child scan, which means it can redirect output tuples to the corresponding partition: @@ -434,20 +435,27 @@ SELECT generate_series(1, 10), random(); (4 rows) ``` -`PartitionRouter` is another *proxy node* used in conjunction with `PartitionFilter` to enable cross-partition UPDATEs (i.e. when update of partitioning key requires that we move row to another partition). Since this node has a great deal of side effects (ordinary `UPDATE` becomes slower; cross-partition `UPDATE` is transformed into `DELETE + INSERT`), it is disabled by default. To enable it, refer to the list of [GUCs](#disabling-pg_pathman) below. +`PartitionOverseer` and `PartitionRouter` are another *proxy nodes* used +in conjunction with `PartitionFilter` to enable cross-partition UPDATEs +(i.e. when update of partitioning key requires that we move row to another +partition). Since this node has a great deal of side effects (ordinary `UPDATE` becomes slower; +cross-partition `UPDATE` is transformed into `DELETE + INSERT`), +it is disabled by default. +To enable it, refer to the list of [GUCs](#disabling-pg_pathman) below. ```plpgsql EXPLAIN (COSTS OFF) UPDATE partitioned_table SET value = value + 1 WHERE value = 2; - QUERY PLAN ---------------------------------------------------- - Update on partitioned_table_0 - -> Custom Scan (PartitionRouter) + QUERY PLAN +--------------------------------------------------------- + Custom Scan (PartitionOverseer) + -> Update on partitioned_table_2 -> Custom Scan (PartitionFilter) - -> Seq Scan on partitioned_table_0 - Filter: (value = 2) -(5 rows) + -> Custom Scan (PartitionRouter) + -> Seq Scan on partitioned_table_2 + Filter: (value = 2) +(6 rows) ``` `RuntimeAppend` and `RuntimeMergeAppend` have much in common: they come in handy in a case when WHERE condition takes form of: From c69f0f2f63ffa3f4edce1edbafbd1f6677afb473 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Tue, 25 Sep 2018 14:27:35 +0300 Subject: [PATCH 360/528] Fix compilation error on older GCCs --- src/partition_overseer.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/partition_overseer.c b/src/partition_overseer.c index 2456f6aa..41590425 100644 --- a/src/partition_overseer.c +++ b/src/partition_overseer.c @@ -68,12 +68,13 @@ partition_overseer_create_scan_state(CustomScan *node) static void set_mt_state_for_router(PlanState *state, void *context) { + int i; ModifyTableState *mt_state = (ModifyTableState *) state; if (!IsA(state, ModifyTableState)) return; - for (int i = 0; i < mt_state->mt_nplans; i++) + for (i = 0; i < mt_state->mt_nplans; i++) { CustomScanState *pf_state = (CustomScanState *) mt_state->mt_plans[i]; PartitionRouterState *pr_state; From a4eab851be6ad4af99fe597d1baddf5ff82e7877 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Tue, 25 Sep 2018 22:04:07 +0300 Subject: [PATCH 361/528] Bump version to 1.5.1 --- META.json | 4 ++-- expected/pathman_calamity.out | 2 +- src/include/init.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/META.json b/META.json index 4c40be86..744310c2 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_pathman", "abstract": "Fast partitioning tool for PostgreSQL", "description": "pg_pathman provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.5.0", + "version": "1.5.1", "maintainer": [ "Dmitry Ivanov ", "Ildus Kurbangaliev " @@ -23,7 +23,7 @@ "pg_pathman": { "file": "pg_pathman--1.5.sql", "docfile": "README.md", - "version": "1.5.0", + "version": "1.5.1", "abstract": "Effective partitioning tool for PostgreSQL 9.5 and higher" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index e28777bf..6b73351e 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -13,7 +13,7 @@ SELECT debug_capture(); SELECT pathman_version(); pathman_version ----------------- - 1.5.0 + 1.5.1 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index 99426810..1c8c1584 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -158,7 +158,7 @@ simplify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT "1.5.0" /* Current version of native C library */ -#define CURRENT_LIB_VERSION "1.5.0" +#define CURRENT_LIB_VERSION "1.5.1" void *pathman_cache_search_relid(HTAB *cache_table, From 50c078b45e7245d7758cde7ded98852a139bc396 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Wed, 26 Sep 2018 13:50:11 +0300 Subject: [PATCH 362/528] Add PGDLLIMPORT for FrontendProtocol --- src/utility_stmt_hooking.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index c90a01da..3f1772a1 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -35,7 +35,7 @@ /* we avoid includig libpq.h because it requires openssl.h */ #include "libpq/pqcomm.h" -extern ProtocolVersion FrontendProtocol; +extern PGDLLIMPORT ProtocolVersion FrontendProtocol; extern void pq_endmsgread(void); /* Determine whether we should enable COPY or not (PostgresPro has a fix) */ From 84682ff795c668862d6c40edb6ac3c6455855dc2 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Fri, 23 Jun 2017 13:02:13 +0300 Subject: [PATCH 363/528] Add support for ALTER TABLE .. ATTACH PARTITION --- Makefile | 2 +- src/declarative.c | 237 ++++++++++++++++++++++++ src/hooks.c | 31 ++++ src/include/declarative.h | 20 ++ src/include/planner_tree_modification.h | 3 +- src/planner_tree_modification.c | 33 ++++ src/utility_stmt_hooking.c | 1 - 7 files changed, 324 insertions(+), 3 deletions(-) create mode 100644 src/declarative.c create mode 100644 src/include/declarative.h diff --git a/Makefile b/Makefile index 82b0fc28..a754f7aa 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ OBJS = src/init.o src/relation_info.o src/utils.o src/partition_filter.o \ src/hooks.o src/nodes_common.o src/xact_handling.o src/utility_stmt_hooking.o \ src/planner_tree_modification.o src/debug_print.o src/partition_creation.o \ src/compat/pg_compat.o src/compat/rowmarks_fix.o src/partition_router.o \ - src/partition_overseer.o $(WIN32RES) + src/partition_overseer.o src/declarative.o $(WIN32RES) ifdef USE_PGXS override PG_CPPFLAGS += -I$(CURDIR)/src/include diff --git a/src/declarative.c b/src/declarative.c new file mode 100644 index 00000000..48992f6d --- /dev/null +++ b/src/declarative.c @@ -0,0 +1,237 @@ +#include "declarative.h" +#include "utils.h" + +#include "fmgr.h" +#include "access/htup_details.h" +#include "catalog/namespace.h" +#include "catalog/pg_type.h" +#include "catalog/pg_proc.h" +#include "nodes/nodeFuncs.h" +#include "parser/parse_func.h" +#include "parser/parse_coerce.h" +#include "utils/int8.h" +#include "utils/lsyscache.h" +#include "utils/builtins.h" +#include "utils/int8.h" +#include "utils/lsyscache.h" +#include "utils/syscache.h" +#include "utils/varbit.h" + +/* + * Modifies query of declarative partitioning commands, + * There is a little hack here, ATTACH PARTITION command + * expects relation with REL_PARTITIONED_TABLE relkind. + * To avoid this check we negate subtype, and then after the checks + * we set it back (look `is_pathman_related_partitioning_cmd`) + */ +void +modify_declative_partitioning_query(Query *query) +{ + if (query->commandType != CMD_UTILITY) + return; + + if (IsA(query->utilityStmt, AlterTableStmt)) + { + ListCell *lcmd; + Oid relid; + + AlterTableStmt *stmt = (AlterTableStmt *) query->utilityStmt; + relid = RangeVarGetRelid(stmt->relation, NoLock, true); + if (get_pathman_relation_info(relid) != NULL) + { + foreach(lcmd, stmt->cmds) + { + AlterTableCmd *cmd = (AlterTableCmd *) lfirst(lcmd); + switch (cmd->subtype) + { + case AT_AttachPartition: + case AT_DetachPartition: + cmd->subtype = -cmd->subtype; + break; + default: + break; + } + } + } + } +} + +/* is it one of declarative partitioning commands? */ +bool is_pathman_related_partitioning_cmd(Node *parsetree) +{ + if (IsA(parsetree, AlterTableStmt)) + { + ListCell *lc; + AlterTableStmt *stmt = (AlterTableStmt *) parsetree; + int cnt = 0; + + foreach(lc, stmt->cmds) + { + AlterTableCmd *cmd = (AlterTableCmd *) lfirst(lc); + int subtype = cmd->subtype; + + if (subtype < 0) + subtype = -subtype; + + switch (subtype) + { + case AT_AttachPartition: + case AT_DetachPartition: + /* + * we need to fix all subtypes, + * possibly we're not going to handle this + */ + cmd->subtype = -(cmd->subtype); + continue; + default: + cnt++; + } + } + + return (cnt == 0); + } + return false; +} + +static FuncExpr * +make_fn_expr(Oid funcOid, List *args) +{ + FuncExpr *fn_expr; + HeapTuple procTup; + Form_pg_proc procStruct; + + procTup = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcOid)); + if (!HeapTupleIsValid(procTup)) + elog(ERROR, "cache lookup failed for function %u", funcOid); + procStruct = (Form_pg_proc) GETSTRUCT(procTup); + + fn_expr = makeFuncExpr(funcOid, procStruct->prorettype, args, + InvalidOid, InvalidOid, COERCE_EXPLICIT_CALL); + ReleaseSysCache(procTup); + return fn_expr; +} + +/* + * Transform one constant in a partition bound spec + */ +static Const * +transform_bound_value(ParseState *pstate, A_Const *con, + Oid colType, int32 colTypmod) +{ + Node *value; + + /* Make it into a Const */ + value = (Node *) make_const(pstate, &con->val, con->location); + + /* Coerce to correct type */ + value = coerce_to_target_type(pstate, + value, exprType(value), + colType, + colTypmod, + COERCION_ASSIGNMENT, + COERCE_IMPLICIT_CAST, + -1); + + if (value == NULL) + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("specified value cannot be cast to type %s", + format_type_be(colType)), + parser_errposition(pstate, con->location))); + + /* Simplify the expression, in case we had a coercion */ + if (!IsA(value, Const)) + value = (Node *) expression_planner((Expr *) value); + + /* Fail if we don't have a constant (i.e., non-immutable coercion) */ + if (!IsA(value, Const)) + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("specified value cannot be cast to type %s", + format_type_be(colType)), + errdetail("The cast requires a non-immutable conversion."), + errhint("Try putting the literal value in single quotes."), + parser_errposition(pstate, con->location))); + + return (Const *) value; +} + +/* handle ALTER TABLE .. ATTACH PARTITION command */ +void handle_attach_partition(AlterTableStmt *stmt, AlterTableCmd *cmd) +{ + Oid parent_relid, + partition_relid, + proc_args[] = { REGCLASSOID, REGCLASSOID, + ANYELEMENTOID, ANYELEMENTOID }; + + List *proc_name; + FmgrInfo proc_flinfo; + FunctionCallInfoData proc_fcinfo; + char *pathman_schema; + PartitionRangeDatum *ldatum, + *rdatum; + Const *lval, + *rval; + A_Const *con; + List *fn_args; + ParseState *pstate = make_parsestate(NULL); + const PartRelationInfo *prel; + + PartitionCmd *pcmd = (PartitionCmd *) cmd->def; + + Assert(cmd->subtype == AT_AttachPartition); + + parent_relid = RangeVarGetRelid(stmt->relation, NoLock, false); + if ((prel = get_pathman_relation_info(parent_relid)) == NULL) + elog(ERROR, "relation is not partitioned"); + + partition_relid = RangeVarGetRelid(pcmd->name, NoLock, false); + + /* Fetch pg_pathman's schema */ + pathman_schema = get_namespace_name(get_pathman_schema()); + + /* Build function's name */ + proc_name = list_make2(makeString(pathman_schema), + makeString(CppAsString(attach_range_partition))); + + ldatum = (PartitionRangeDatum *) linitial(pcmd->bound->lowerdatums); + con = castNode(A_Const, ldatum->value); + lval = transform_bound_value(pstate, con, prel->ev_type, prel->ev_typmod); + + rdatum = (PartitionRangeDatum *) linitial(pcmd->bound->upperdatums); + con = castNode(A_Const, rdatum->value); + rval = transform_bound_value(pstate, con, prel->ev_type, prel->ev_typmod); + + /* Lookup function's Oid and get FmgrInfo */ + fmgr_info(LookupFuncName(proc_name, 4, proc_args, false), &proc_flinfo); + + InitFunctionCallInfoData(proc_fcinfo, &proc_flinfo, + 4, InvalidOid, NULL, NULL); + proc_fcinfo.arg[0] = ObjectIdGetDatum(parent_relid); + proc_fcinfo.argnull[0] = false; + proc_fcinfo.arg[1] = ObjectIdGetDatum(partition_relid); + proc_fcinfo.argnull[1] = false; + + /* Make function expression, we will need it to determine argument types */ + fn_args = list_make4(NULL, NULL, lval, rval); + proc_fcinfo.flinfo->fn_expr = + (Node *) make_fn_expr(proc_fcinfo.flinfo->fn_oid, fn_args); + + if ((!list_length(pcmd->bound->lowerdatums)) || + (!list_length(pcmd->bound->upperdatums))) + elog(ERROR, "provide start and end value for range partition"); + + proc_fcinfo.arg[2] = lval->constvalue; + proc_fcinfo.argnull[2] = ldatum->infinite || lval->constisnull; + proc_fcinfo.arg[3] = rval->constvalue; + proc_fcinfo.argnull[3] = rdatum->infinite || rval->constisnull; + + /* Invoke the callback */ + FunctionCallInvoke(&proc_fcinfo); +} + +/* handle ALTER TABLE .. DETACH PARTITION command */ +void handle_detach_partition(AlterTableStmt *stmt, AlterTableCmd *cmd) +{ + Assert(cmd->subtype == AT_DetachPartition); +} diff --git a/src/hooks.c b/src/hooks.c index 5cd3e14c..4aa5bf40 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -13,6 +13,7 @@ #include "compat/pg_compat.h" #include "compat/rowmarks_fix.h" +#include "declarative.h" #include "hooks.h" #include "init.h" #include "partition_filter.h" @@ -766,6 +767,8 @@ pathman_post_parse_analyze_hook(ParseState *pstate, Query *query) { load_config(); /* perform main cache initialization */ } + if (!IsPathmanReady()) + return; /* Process inlined SQL functions (we've already entered planning stage) */ if (IsPathmanReady() && get_planner_calls_count() > 0) @@ -812,7 +815,10 @@ pathman_post_parse_analyze_hook(ParseState *pstate, Query *query) /* Modify query tree if needed */ pathman_transform_query(query, NULL); + return; } + + pathman_post_analyze_query(query); } /* @@ -950,6 +956,31 @@ pathman_process_utility_hook(Node *first_arg, get_attname_compat(relation_oid, attr_number), get_rel_name(relation_oid)))); } + else if (is_pathman_related_partitioning_cmd(parsetree)) + { + /* we can handle all the partitioning commands */ + if (IsA(parsetree, AlterTableStmt)) + { + ListCell *lc; + AlterTableStmt *stmt = (AlterTableStmt *) parsetree; + + foreach(lc, stmt->cmds) + { + AlterTableCmd *cmd = (AlterTableCmd *) lfirst(lc); + switch (cmd->subtype) + { + case AT_AttachPartition: + handle_attach_partition(stmt, cmd); + return; + case AT_DetachPartition: + handle_detach_partition(stmt, cmd); + return; + default: + elog(ERROR, "can't handle this command"); + } + } + } + } } /* Finally call process_utility_hook_next or standard_ProcessUtility */ diff --git a/src/include/declarative.h b/src/include/declarative.h new file mode 100644 index 00000000..56ce0ed7 --- /dev/null +++ b/src/include/declarative.h @@ -0,0 +1,20 @@ +#ifndef DECLARATIVE_H +#define DECLARATIVE_H + +#include "postgres.h" +#include "nodes/nodes.h" +#include "nodes/parsenodes.h" + +typedef enum DeclarativeCommandType { + DP_ATTACH, /* ALTER TABLE .. ATTACH PARTITION */ + DP_DETACH /* ALTER TABLE .. DETACH PARTITION */ +} DeclarativeCommandType; + +void modify_declative_partitioning_query(Query *query); +bool is_pathman_related_partitioning_cmd(Node *parsetree); + +/* actual actions */ +void handle_attach_partition(AlterTableStmt *stmt, AlterTableCmd *cmd); +void handle_detach_partition(AlterTableStmt *stmt, AlterTableCmd *cmd); + +#endif /* DECLARATIVE_H */ diff --git a/src/include/planner_tree_modification.h b/src/include/planner_tree_modification.h index 43f7a24b..4e33ca34 100644 --- a/src/include/planner_tree_modification.h +++ b/src/include/planner_tree_modification.h @@ -34,8 +34,9 @@ void state_tree_visitor(PlanState *state, void (*visitor) (PlanState *state, void *context), void *context); -/* Query tree rewriting utility */ +/* Query tree rewriting utilities */ void pathman_transform_query(Query *parse, ParamListInfo params); +void pathman_post_analyze_query(Query *parse); /* These functions scribble on Plan tree */ Plan *add_partition_filters(List *rtable, Plan *plan); diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index a3b06873..d1412835 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -12,6 +12,7 @@ #include "compat/rowmarks_fix.h" +#include "declarative.h" #include "partition_filter.h" #include "partition_router.h" #include "partition_overseer.h" @@ -107,6 +108,7 @@ typedef struct } adjust_appendrel_varnos_cxt; static bool pathman_transform_query_walker(Node *node, void *context); +static bool pathman_post_analyze_query_walker(Node *node, void *context); static void disable_standard_inheritance(Query *parse, transform_query_cxt *context); static void handle_modification_query(Query *parse, transform_query_cxt *context); @@ -337,6 +339,12 @@ pathman_transform_query(Query *parse, ParamListInfo params) pathman_transform_query_walker((Node *) parse, (void *) &context); } +void +pathman_post_analyze_query(Query *parse) +{ + pathman_post_analyze_query_walker((Node *) parse, NULL); +} + /* Walker for pathman_transform_query() */ static bool pathman_transform_query_walker(Node *node, void *context) @@ -410,6 +418,31 @@ pathman_transform_query_walker(Node *node, void *context) context); } +static bool +pathman_post_analyze_query_walker(Node *node, void *context) +{ + if (node == NULL) + return false; + + else if (IsA(node, Query)) + { + Query *query = (Query *) node; + + /* Make changes for declarative syntax */ + modify_declative_partitioning_query(query); + + /* Handle Query node */ + return query_tree_walker(query, + pathman_post_analyze_query_walker, + context, + 0); + } + + /* Handle expression subtree */ + return expression_tree_walker(node, + pathman_post_analyze_query_walker, + context); +} /* * ---------------------- diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 3f1772a1..9683914b 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -271,7 +271,6 @@ is_pathman_related_alter_column_type(Node *parsetree, return result; } - /* * CopyGetAttnums - build an integer list of attnums to be copied * From 722ddb012b8f66756f40040f53bb90f86ca27986 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Fri, 23 Jun 2017 15:00:32 +0300 Subject: [PATCH 364/528] Add support of ALTER TABLE .. DETACH PARTITION --- src/declarative.c | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/src/declarative.c b/src/declarative.c index 48992f6d..0fdbf1a0 100644 --- a/src/declarative.c +++ b/src/declarative.c @@ -233,5 +233,32 @@ void handle_attach_partition(AlterTableStmt *stmt, AlterTableCmd *cmd) /* handle ALTER TABLE .. DETACH PARTITION command */ void handle_detach_partition(AlterTableStmt *stmt, AlterTableCmd *cmd) { + List *proc_name; + FmgrInfo proc_flinfo; + FunctionCallInfoData proc_fcinfo; + char *pathman_schema; + Oid partition_relid, + args = REGCLASSOID; + PartitionCmd *pcmd = (PartitionCmd *) cmd->def; + Assert(cmd->subtype == AT_DetachPartition); + partition_relid = RangeVarGetRelid(pcmd->name, NoLock, false); + + /* Fetch pg_pathman's schema */ + pathman_schema = get_namespace_name(get_pathman_schema()); + + /* Build function's name */ + proc_name = list_make2(makeString(pathman_schema), + makeString(CppAsString(detach_range_partition))); + + /* Lookup function's Oid and get FmgrInfo */ + fmgr_info(LookupFuncName(proc_name, 1, &args, false), &proc_flinfo); + + InitFunctionCallInfoData(proc_fcinfo, &proc_flinfo, + 4, InvalidOid, NULL, NULL); + proc_fcinfo.arg[0] = ObjectIdGetDatum(partition_relid); + proc_fcinfo.argnull[0] = false; + + /* Invoke the callback */ + FunctionCallInvoke(&proc_fcinfo); } From fe70668a0863f9fadf3fbd94ad535222da526985 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Fri, 23 Jun 2017 17:47:19 +0300 Subject: [PATCH 365/528] Add test files for declarative syntax --- Makefile | 6 ++++++ expected/pathman_declarative.out | 0 sql/pathman_declarative.sql | 30 ++++++++++++++++++++++++++++++ 3 files changed, 36 insertions(+) create mode 100644 expected/pathman_declarative.out create mode 100644 sql/pathman_declarative.sql diff --git a/Makefile b/Makefile index a754f7aa..17d241e5 100644 --- a/Makefile +++ b/Makefile @@ -67,9 +67,15 @@ EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add EXTRA_CLEAN = pg_pathman--$(EXTVERSION).sql ./isolation_output +DECL_CHECK_VERSIONS = 10beta1 + ifdef USE_PGXS PG_CONFIG = pg_config PGXS := $(shell $(PG_CONFIG) --pgxs) +VNUM := $(shell $(PG_CONFIG) --version | awk '{print $$2}') +ifeq ($(VNUM),$(filter $(VNUM), $(DECL_CHECK_VERSIONS))) + REGRESS += pathman_declarative +endif include $(PGXS) else subdir = contrib/pg_pathman diff --git a/expected/pathman_declarative.out b/expected/pathman_declarative.out new file mode 100644 index 00000000..e69de29b diff --git a/sql/pathman_declarative.sql b/sql/pathman_declarative.sql new file mode 100644 index 00000000..183be291 --- /dev/null +++ b/sql/pathman_declarative.sql @@ -0,0 +1,30 @@ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; + +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt DATE NOT NULL +); + +INSERT INTO test.range_rel (dt) +SELECT g FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2015-01-01'::DATE, '1 month'::INTERVAL); + +SELECT * FROM pathman_partition_list; +CREATE TABLE test.r2 LIKE (test.range_rel); +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES FROM ('2015-05-01') TO ('2015-06-01'); +SELECT * FROM pathman_partition_list; +\d+ test.r2; +ALTER TABLE test.range_rel DETACH PARTITION test.r2; +SELECT * FROM pathman_partition_list; +\d+ test.r2; + +DROP SCHEMA test CASCADE; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman CASCADE; From ca9f9ebe86281b4443ce2e30eba48ad85a831728 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Fri, 23 Jun 2017 18:37:32 +0300 Subject: [PATCH 366/528] Add tests for ATTACH and DETACH PARTITION commands --- expected/pathman_declarative.out | 72 ++++++++++++++++++++++++++++++++ sql/pathman_declarative.sql | 8 ++-- 2 files changed, 76 insertions(+), 4 deletions(-) diff --git a/expected/pathman_declarative.out b/expected/pathman_declarative.out index e69de29b..9dc5cb93 100644 --- a/expected/pathman_declarative.out +++ b/expected/pathman_declarative.out @@ -0,0 +1,72 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt DATE NOT NULL +); +INSERT INTO test.range_rel (dt) +SELECT g FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +SELECT * FROM pathman.pathman_partition_list; + parent | partition | parttype | expr | range_min | range_max +----------------+------------------+----------+------+------------+------------ + test.range_rel | test.range_rel_1 | 2 | dt | 01-01-2015 | 02-01-2015 + test.range_rel | test.range_rel_2 | 2 | dt | 02-01-2015 | 03-01-2015 + test.range_rel | test.range_rel_3 | 2 | dt | 03-01-2015 | 04-01-2015 + test.range_rel | test.range_rel_4 | 2 | dt | 04-01-2015 | 05-01-2015 +(4 rows) + +CREATE TABLE test.r2 (LIKE test.range_rel); +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES FROM ('2015-05-01') TO ('2015-06-01'); +SELECT * FROM pathman.pathman_partition_list; + parent | partition | parttype | expr | range_min | range_max +----------------+------------------+----------+------+------------+------------ + test.range_rel | test.range_rel_1 | 2 | dt | 01-01-2015 | 02-01-2015 + test.range_rel | test.range_rel_2 | 2 | dt | 02-01-2015 | 03-01-2015 + test.range_rel | test.range_rel_3 | 2 | dt | 03-01-2015 | 04-01-2015 + test.range_rel | test.range_rel_4 | 2 | dt | 04-01-2015 | 05-01-2015 + test.range_rel | test.r2 | 2 | dt | 05-01-2015 | 06-01-2015 +(5 rows) + +\d+ test.r2; + Table "test.r2" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + id | integer | | not null | | plain | | + dt | date | | not null | | plain | | +Check constraints: + "pathman_r2_check" CHECK (dt >= '05-01-2015'::date AND dt < '06-01-2015'::date) +Inherits: test.range_rel + +ALTER TABLE test.range_rel DETACH PARTITION test.r2; +NOTICE: trigger "range_rel_upd_trig" for relation "test.r2" does not exist, skipping +SELECT * FROM pathman.pathman_partition_list; + parent | partition | parttype | expr | range_min | range_max +----------------+------------------+----------+------+------------+------------ + test.range_rel | test.range_rel_1 | 2 | dt | 01-01-2015 | 02-01-2015 + test.range_rel | test.range_rel_2 | 2 | dt | 02-01-2015 | 03-01-2015 + test.range_rel | test.range_rel_3 | 2 | dt | 03-01-2015 | 04-01-2015 + test.range_rel | test.range_rel_4 | 2 | dt | 04-01-2015 | 05-01-2015 +(4 rows) + +\d+ test.r2; + Table "test.r2" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + id | integer | | not null | | plain | | + dt | date | | not null | | plain | | + +DROP SCHEMA test CASCADE; +NOTICE: drop cascades to 7 other objects +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman CASCADE; diff --git a/sql/pathman_declarative.sql b/sql/pathman_declarative.sql index 183be291..4bb6b2b8 100644 --- a/sql/pathman_declarative.sql +++ b/sql/pathman_declarative.sql @@ -15,14 +15,14 @@ SELECT g FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) AS SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL); -SELECT * FROM pathman_partition_list; -CREATE TABLE test.r2 LIKE (test.range_rel); +SELECT * FROM pathman.pathman_partition_list; +CREATE TABLE test.r2 (LIKE test.range_rel); ALTER TABLE test.range_rel ATTACH PARTITION test.r2 FOR VALUES FROM ('2015-05-01') TO ('2015-06-01'); -SELECT * FROM pathman_partition_list; +SELECT * FROM pathman.pathman_partition_list; \d+ test.r2; ALTER TABLE test.range_rel DETACH PARTITION test.r2; -SELECT * FROM pathman_partition_list; +SELECT * FROM pathman.pathman_partition_list; \d+ test.r2; DROP SCHEMA test CASCADE; From e426c71947478af6c5429f8db9ad85de3fd3198a Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 26 Jun 2017 09:38:35 +0300 Subject: [PATCH 367/528] Enable compilation of declarative syntax only for pg10+ --- Makefile | 5 +++-- src/hooks.c | 8 ++++++++ src/include/declarative.h | 5 ----- src/planner_tree_modification.c | 2 ++ 4 files changed, 13 insertions(+), 7 deletions(-) diff --git a/Makefile b/Makefile index 17d241e5..4fbe4b19 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ OBJS = src/init.o src/relation_info.o src/utils.o src/partition_filter.o \ src/hooks.o src/nodes_common.o src/xact_handling.o src/utility_stmt_hooking.o \ src/planner_tree_modification.o src/debug_print.o src/partition_creation.o \ src/compat/pg_compat.o src/compat/rowmarks_fix.o src/partition_router.o \ - src/partition_overseer.o src/declarative.o $(WIN32RES) + src/partition_overseer.o $(WIN32RES) ifdef USE_PGXS override PG_CPPFLAGS += -I$(CURDIR)/src/include @@ -67,7 +67,7 @@ EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add EXTRA_CLEAN = pg_pathman--$(EXTVERSION).sql ./isolation_output -DECL_CHECK_VERSIONS = 10beta1 +DECL_CHECK_VERSIONS = 10 11 ifdef USE_PGXS PG_CONFIG = pg_config @@ -75,6 +75,7 @@ PGXS := $(shell $(PG_CONFIG) --pgxs) VNUM := $(shell $(PG_CONFIG) --version | awk '{print $$2}') ifeq ($(VNUM),$(filter $(VNUM), $(DECL_CHECK_VERSIONS))) REGRESS += pathman_declarative + OBJS += src/declarative.o endif include $(PGXS) else diff --git a/src/hooks.c b/src/hooks.c index 4aa5bf40..cd49d60e 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -818,7 +818,13 @@ pathman_post_parse_analyze_hook(ParseState *pstate, Query *query) return; } +#if PG_VERSION_NUM >= 100000 + /* + * for now this call works only for declarative partitioning so + * we disabled it + */ pathman_post_analyze_query(query); +#endif } /* @@ -956,6 +962,7 @@ pathman_process_utility_hook(Node *first_arg, get_attname_compat(relation_oid, attr_number), get_rel_name(relation_oid)))); } +#if PG_VERSION_NUM >= 100000 else if (is_pathman_related_partitioning_cmd(parsetree)) { /* we can handle all the partitioning commands */ @@ -981,6 +988,7 @@ pathman_process_utility_hook(Node *first_arg, } } } +#endif } /* Finally call process_utility_hook_next or standard_ProcessUtility */ diff --git a/src/include/declarative.h b/src/include/declarative.h index 56ce0ed7..b38eebaa 100644 --- a/src/include/declarative.h +++ b/src/include/declarative.h @@ -5,11 +5,6 @@ #include "nodes/nodes.h" #include "nodes/parsenodes.h" -typedef enum DeclarativeCommandType { - DP_ATTACH, /* ALTER TABLE .. ATTACH PARTITION */ - DP_DETACH /* ALTER TABLE .. DETACH PARTITION */ -} DeclarativeCommandType; - void modify_declative_partitioning_query(Query *query); bool is_pathman_related_partitioning_cmd(Node *parsetree); diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index d1412835..b3391bbf 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -429,7 +429,9 @@ pathman_post_analyze_query_walker(Node *node, void *context) Query *query = (Query *) node; /* Make changes for declarative syntax */ +#if PG_VERSION_NUM >= 100000 modify_declative_partitioning_query(query); +#endif /* Handle Query node */ return query_tree_walker(query, From 48df7a378d44555d90c593f287e89143a775164b Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 26 Jun 2017 10:06:08 +0300 Subject: [PATCH 368/528] Fix Makefile --- Makefile | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/Makefile b/Makefile index 4fbe4b19..ed2d624b 100644 --- a/Makefile +++ b/Makefile @@ -2,6 +2,19 @@ MODULE_big = pg_pathman +# versions of postgresql with declarative partitioning +DECL_CHECK_VERSIONS = 10 11 + +ifdef USE_PGXS +PG_CONFIG = pg_config +VNUM := $(shell $(PG_CONFIG) --version | awk '{print $$2}') +ifeq ($(VNUM),$(filter $(VNUM), $(DECL_CHECK_VERSIONS))) + EXTRA_REGRESS = pathman_declarative + EXTRA_OBJS = src/declarative.o +endif +endif +include $(PGXS) + OBJS = src/init.o src/relation_info.o src/utils.o src/partition_filter.o \ src/runtime_append.o src/runtime_merge_append.o src/pg_pathman.o src/rangeset.o \ src/pl_funcs.o src/pl_range_funcs.o src/pl_hash_funcs.o src/pathman_workers.o \ @@ -60,23 +73,15 @@ REGRESS = pathman_array_qual \ pathman_update_triggers \ pathman_upd_del \ pathman_utility_stmt \ - pathman_views - + pathman_views ${EXTRA_REGRESS} EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add EXTRA_CLEAN = pg_pathman--$(EXTVERSION).sql ./isolation_output -DECL_CHECK_VERSIONS = 10 11 - ifdef USE_PGXS PG_CONFIG = pg_config PGXS := $(shell $(PG_CONFIG) --pgxs) -VNUM := $(shell $(PG_CONFIG) --version | awk '{print $$2}') -ifeq ($(VNUM),$(filter $(VNUM), $(DECL_CHECK_VERSIONS))) - REGRESS += pathman_declarative - OBJS += src/declarative.o -endif include $(PGXS) else subdir = contrib/pg_pathman From e8521d31e875fd5929f91a09f2aab10909c1d361 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Tue, 27 Jun 2017 10:43:11 +0300 Subject: [PATCH 369/528] Add support for CREATE TABLE .. PARTITION OF --- expected/pathman_declarative.out | 33 ++++++++- sql/pathman_declarative.sql | 16 ++++- src/declarative.c | 119 +++++++++++++++++++++++++++---- src/hooks.c | 13 ++-- src/include/declarative.h | 7 +- 5 files changed, 164 insertions(+), 24 deletions(-) diff --git a/expected/pathman_declarative.out b/expected/pathman_declarative.out index 9dc5cb93..e853898d 100644 --- a/expected/pathman_declarative.out +++ b/expected/pathman_declarative.out @@ -7,6 +7,10 @@ CREATE TABLE test.range_rel ( id SERIAL PRIMARY KEY, dt DATE NOT NULL ); +CREATE TABLE test.r2 (LIKE test.range_rel); +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES FROM ('2015-05-01') TO ('2015-06-01'); +ERROR: "range_rel" is not partitioned INSERT INTO test.range_rel (dt) SELECT g FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) AS g; SELECT pathman.create_range_partitions('test.range_rel', 'dt', @@ -25,7 +29,12 @@ SELECT * FROM pathman.pathman_partition_list; test.range_rel | test.range_rel_4 | 2 | dt | 04-01-2015 | 05-01-2015 (4 rows) -CREATE TABLE test.r2 (LIKE test.range_rel); +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES IN ('2015-05-01', '2015-06-01'); +ERROR: pg_pathman only supports queries for range partitions +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES FROM ('2014-05-01') TO ('2015-06-01'); +ERROR: specified range [05-01-2014, 06-01-2015) overlaps with existing partitions ALTER TABLE test.range_rel ATTACH PARTITION test.r2 FOR VALUES FROM ('2015-05-01') TO ('2015-06-01'); SELECT * FROM pathman.pathman_partition_list; @@ -66,7 +75,27 @@ SELECT * FROM pathman.pathman_partition_list; id | integer | | not null | | plain | | dt | date | | not null | | plain | | +CREATE TABLE test.r4 PARTITION OF test.range_rel + FOR VALUES IN ('2015-05-01', '2015-06-01'); +ERROR: pg_pathman only supports queries for range partitions +CREATE TABLE test.r4 PARTITION OF test.range_rel + FOR VALUES FROM ('2014-05-01') TO ('2015-06-01'); +ERROR: specified range [05-01-2014, 06-01-2015) overlaps with existing partitions +CREATE TABLE test.r4 PARTITION OF test.range_rel + FOR VALUES FROM ('2015-06-01') TO ('2016-01-01'); +\d+ test.r4; + Table "test.r4" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+--------------------------------------------+---------+--------------+------------- + id | integer | | not null | nextval('test.range_rel_id_seq'::regclass) | plain | | + dt | date | | not null | | plain | | +Indexes: + "r4_pkey" PRIMARY KEY, btree (id) +Check constraints: + "pathman_r4_check" CHECK (dt >= '06-01-2015'::date AND dt < '01-01-2016'::date) +Inherits: test.range_rel + DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 7 other objects +NOTICE: drop cascades to 8 other objects DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman CASCADE; diff --git a/sql/pathman_declarative.sql b/sql/pathman_declarative.sql index 4bb6b2b8..864e3af8 100644 --- a/sql/pathman_declarative.sql +++ b/sql/pathman_declarative.sql @@ -9,6 +9,9 @@ CREATE TABLE test.range_rel ( id SERIAL PRIMARY KEY, dt DATE NOT NULL ); +CREATE TABLE test.r2 (LIKE test.range_rel); +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES FROM ('2015-05-01') TO ('2015-06-01'); INSERT INTO test.range_rel (dt) SELECT g FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) AS g; @@ -16,7 +19,10 @@ SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL); SELECT * FROM pathman.pathman_partition_list; -CREATE TABLE test.r2 (LIKE test.range_rel); +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES IN ('2015-05-01', '2015-06-01'); +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES FROM ('2014-05-01') TO ('2015-06-01'); ALTER TABLE test.range_rel ATTACH PARTITION test.r2 FOR VALUES FROM ('2015-05-01') TO ('2015-06-01'); SELECT * FROM pathman.pathman_partition_list; @@ -25,6 +31,14 @@ ALTER TABLE test.range_rel DETACH PARTITION test.r2; SELECT * FROM pathman.pathman_partition_list; \d+ test.r2; +CREATE TABLE test.r4 PARTITION OF test.range_rel + FOR VALUES IN ('2015-05-01', '2015-06-01'); +CREATE TABLE test.r4 PARTITION OF test.range_rel + FOR VALUES FROM ('2014-05-01') TO ('2015-06-01'); +CREATE TABLE test.r4 PARTITION OF test.range_rel + FOR VALUES FROM ('2015-06-01') TO ('2016-01-01'); +\d+ test.r4; + DROP SCHEMA test CASCADE; DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman CASCADE; diff --git a/src/declarative.c b/src/declarative.c index 0fdbf1a0..02d4a875 100644 --- a/src/declarative.c +++ b/src/declarative.c @@ -1,5 +1,6 @@ #include "declarative.h" #include "utils.h" +#include "partition_creation.h" #include "fmgr.h" #include "access/htup_details.h" @@ -57,7 +58,8 @@ modify_declative_partitioning_query(Query *query) } /* is it one of declarative partitioning commands? */ -bool is_pathman_related_partitioning_cmd(Node *parsetree) +bool +is_pathman_related_partitioning_cmd(Node *parsetree, Oid *parent_relid) { if (IsA(parsetree, AlterTableStmt)) { @@ -65,23 +67,28 @@ bool is_pathman_related_partitioning_cmd(Node *parsetree) AlterTableStmt *stmt = (AlterTableStmt *) parsetree; int cnt = 0; + *parent_relid = RangeVarGetRelid(stmt->relation, NoLock, false); + if (get_pathman_relation_info(*parent_relid) == NULL) + return false; + + /* + * Since cmds can contain multiple commmands but we can handle only + * two of them here, so we need to check that there are only commands + * we can handle. In case if cmds contain other commands we skip all + * commands in this statement. + */ foreach(lc, stmt->cmds) { AlterTableCmd *cmd = (AlterTableCmd *) lfirst(lc); - int subtype = cmd->subtype; - - if (subtype < 0) - subtype = -subtype; - - switch (subtype) + switch (abs(cmd->subtype)) { case AT_AttachPartition: case AT_DetachPartition: /* - * we need to fix all subtypes, + * We need to fix all subtypes, * possibly we're not going to handle this */ - cmd->subtype = -(cmd->subtype); + cmd->subtype = abs(cmd->subtype); continue; default: cnt++; @@ -90,6 +97,26 @@ bool is_pathman_related_partitioning_cmd(Node *parsetree) return (cnt == 0); } + else if (IsA(parsetree, CreateStmt)) + { + /* inhRelations != NULL, partbound != NULL, tableElts == NULL */ + CreateStmt *stmt = (CreateStmt *) parsetree; + + if (stmt->inhRelations && stmt->partbound != NULL) + { + RangeVar *rv = castNode(RangeVar, linitial(stmt->inhRelations)); + *parent_relid = RangeVarGetRelid(rv, NoLock, false); + if (get_pathman_relation_info(*parent_relid) == NULL) + return false; + + if (stmt->tableElts != NIL) + elog(ERROR, "pg_pathman doesn't support column definitions " + "in declarative syntax yet"); + + return true; + + } + } return false; } @@ -157,10 +184,10 @@ transform_bound_value(ParseState *pstate, A_Const *con, } /* handle ALTER TABLE .. ATTACH PARTITION command */ -void handle_attach_partition(AlterTableStmt *stmt, AlterTableCmd *cmd) +void +handle_attach_partition(Oid parent_relid, AlterTableCmd *cmd) { - Oid parent_relid, - partition_relid, + Oid partition_relid, proc_args[] = { REGCLASSOID, REGCLASSOID, ANYELEMENTOID, ANYELEMENTOID }; @@ -181,7 +208,10 @@ void handle_attach_partition(AlterTableStmt *stmt, AlterTableCmd *cmd) Assert(cmd->subtype == AT_AttachPartition); - parent_relid = RangeVarGetRelid(stmt->relation, NoLock, false); + if (pcmd->bound->strategy != PARTITION_STRATEGY_RANGE) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("pg_pathman only supports queries for range partitions"))); + if ((prel = get_pathman_relation_info(parent_relid)) == NULL) elog(ERROR, "relation is not partitioned"); @@ -231,7 +261,8 @@ void handle_attach_partition(AlterTableStmt *stmt, AlterTableCmd *cmd) } /* handle ALTER TABLE .. DETACH PARTITION command */ -void handle_detach_partition(AlterTableStmt *stmt, AlterTableCmd *cmd) +void +handle_detach_partition(AlterTableCmd *cmd) { List *proc_name; FmgrInfo proc_flinfo; @@ -262,3 +293,63 @@ void handle_detach_partition(AlterTableStmt *stmt, AlterTableCmd *cmd) /* Invoke the callback */ FunctionCallInvoke(&proc_fcinfo); } + +/* handle CREATE TABLE .. PARTITION OF FOR VALUES FROM .. TO .. */ +void +handle_create_partition_of(Oid parent_relid, CreateStmt *stmt) +{ + Bound start, + end; + const PartRelationInfo *prel; + ParseState *pstate = make_parsestate(NULL); + PartitionRangeDatum *ldatum, + *rdatum; + Const *lval, + *rval; + A_Const *con; + + /* we show errors earlier for these asserts */ + Assert(stmt->inhRelations != NULL); + Assert(stmt->tableElts == NIL); + + if (stmt->partbound->strategy != PARTITION_STRATEGY_RANGE) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("pg_pathman only supports queries for range partitions"))); + + if ((prel = get_pathman_relation_info(parent_relid)) == NULL) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("table \"%s\" is not partitioned", + get_rel_name_or_relid(parent_relid)))); + + if (prel->parttype != PT_RANGE) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("table \"%s\" is not partitioned by RANGE", + get_rel_name_or_relid(parent_relid)))); + + ldatum = (PartitionRangeDatum *) linitial(stmt->partbound->lowerdatums); + con = castNode(A_Const, ldatum->value); + lval = transform_bound_value(pstate, con, prel->ev_type, prel->ev_typmod); + + rdatum = (PartitionRangeDatum *) linitial(stmt->partbound->upperdatums); + con = castNode(A_Const, rdatum->value); + rval = transform_bound_value(pstate, con, prel->ev_type, prel->ev_typmod); + + start = lval->constisnull? + MakeBoundInf(MINUS_INFINITY) : + MakeBound(lval->constvalue); + + end = rval->constisnull? + MakeBoundInf(PLUS_INFINITY) : + MakeBound(rval->constvalue); + + /* more checks */ + check_range_available(parent_relid, &start, &end, lval->consttype, true); + + /* Create a new RANGE partition and return its Oid */ + create_single_range_partition_internal(parent_relid, + &start, + &end, + lval->consttype, + stmt->relation, + stmt->tablespacename); +} diff --git a/src/hooks.c b/src/hooks.c index cd49d60e..b38d71e3 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -963,9 +963,9 @@ pathman_process_utility_hook(Node *first_arg, get_rel_name(relation_oid)))); } #if PG_VERSION_NUM >= 100000 - else if (is_pathman_related_partitioning_cmd(parsetree)) + else if (is_pathman_related_partitioning_cmd(parsetree, &relation_oid)) { - /* we can handle all the partitioning commands */ + /* we can handle all the partitioning commands in ALTER .. TABLE */ if (IsA(parsetree, AlterTableStmt)) { ListCell *lc; @@ -977,16 +977,21 @@ pathman_process_utility_hook(Node *first_arg, switch (cmd->subtype) { case AT_AttachPartition: - handle_attach_partition(stmt, cmd); + handle_attach_partition(relation_oid, cmd); return; case AT_DetachPartition: - handle_detach_partition(stmt, cmd); + handle_detach_partition(cmd); return; default: elog(ERROR, "can't handle this command"); } } } + else if (IsA(parsetree, CreateStmt)) + { + handle_create_partition_of(relation_oid, (CreateStmt *) parsetree); + return; + } } #endif } diff --git a/src/include/declarative.h b/src/include/declarative.h index b38eebaa..05993c79 100644 --- a/src/include/declarative.h +++ b/src/include/declarative.h @@ -6,10 +6,11 @@ #include "nodes/parsenodes.h" void modify_declative_partitioning_query(Query *query); -bool is_pathman_related_partitioning_cmd(Node *parsetree); +bool is_pathman_related_partitioning_cmd(Node *parsetree, Oid *parent_relid); /* actual actions */ -void handle_attach_partition(AlterTableStmt *stmt, AlterTableCmd *cmd); -void handle_detach_partition(AlterTableStmt *stmt, AlterTableCmd *cmd); +void handle_attach_partition(Oid parent_relid, AlterTableCmd *cmd); +void handle_detach_partition(AlterTableCmd *cmd); +void handle_create_partition_of(Oid parent_relid, CreateStmt *stmt); #endif /* DECLARATIVE_H */ From 61f5c8067383abd1da9035a50d89ec354297b8eb Mon Sep 17 00:00:00 2001 From: Ildus K Date: Tue, 27 Jun 2017 11:33:50 +0300 Subject: [PATCH 370/528] Add compability with REL_10_beta1 branch --- src/declarative.c | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/src/declarative.c b/src/declarative.c index 02d4a875..7517d82d 100644 --- a/src/declarative.c +++ b/src/declarative.c @@ -204,11 +204,14 @@ handle_attach_partition(Oid parent_relid, AlterTableCmd *cmd) ParseState *pstate = make_parsestate(NULL); const PartRelationInfo *prel; - PartitionCmd *pcmd = (PartitionCmd *) cmd->def; + PartitionCmd *pcmd = (PartitionCmd *) cmd->def; + + /* in 10beta1, PartitionCmd->bound is (Node *) */ + PartitionBoundSpec *bound = (PartitionBoundSpec *) pcmd->bound; Assert(cmd->subtype == AT_AttachPartition); - if (pcmd->bound->strategy != PARTITION_STRATEGY_RANGE) + if (bound->strategy != PARTITION_STRATEGY_RANGE) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("pg_pathman only supports queries for range partitions"))); @@ -224,11 +227,15 @@ handle_attach_partition(Oid parent_relid, AlterTableCmd *cmd) proc_name = list_make2(makeString(pathman_schema), makeString(CppAsString(attach_range_partition))); - ldatum = (PartitionRangeDatum *) linitial(pcmd->bound->lowerdatums); + if ((!list_length(bound->lowerdatums)) || + (!list_length(bound->upperdatums))) + elog(ERROR, "provide start and end value for range partition"); + + ldatum = (PartitionRangeDatum *) linitial(bound->lowerdatums); con = castNode(A_Const, ldatum->value); lval = transform_bound_value(pstate, con, prel->ev_type, prel->ev_typmod); - rdatum = (PartitionRangeDatum *) linitial(pcmd->bound->upperdatums); + rdatum = (PartitionRangeDatum *) linitial(bound->upperdatums); con = castNode(A_Const, rdatum->value); rval = transform_bound_value(pstate, con, prel->ev_type, prel->ev_typmod); @@ -247,10 +254,6 @@ handle_attach_partition(Oid parent_relid, AlterTableCmd *cmd) proc_fcinfo.flinfo->fn_expr = (Node *) make_fn_expr(proc_fcinfo.flinfo->fn_oid, fn_args); - if ((!list_length(pcmd->bound->lowerdatums)) || - (!list_length(pcmd->bound->upperdatums))) - elog(ERROR, "provide start and end value for range partition"); - proc_fcinfo.arg[2] = lval->constvalue; proc_fcinfo.argnull[2] = ldatum->infinite || lval->constisnull; proc_fcinfo.arg[3] = rval->constvalue; @@ -308,11 +311,14 @@ handle_create_partition_of(Oid parent_relid, CreateStmt *stmt) *rval; A_Const *con; + /* in 10beta1, PartitionCmd->bound is (Node *) */ + PartitionBoundSpec *bound = (PartitionBoundSpec *) stmt->partbound; + /* we show errors earlier for these asserts */ Assert(stmt->inhRelations != NULL); Assert(stmt->tableElts == NIL); - if (stmt->partbound->strategy != PARTITION_STRATEGY_RANGE) + if (bound->strategy != PARTITION_STRATEGY_RANGE) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("pg_pathman only supports queries for range partitions"))); @@ -326,11 +332,11 @@ handle_create_partition_of(Oid parent_relid, CreateStmt *stmt) errmsg("table \"%s\" is not partitioned by RANGE", get_rel_name_or_relid(parent_relid)))); - ldatum = (PartitionRangeDatum *) linitial(stmt->partbound->lowerdatums); + ldatum = (PartitionRangeDatum *) linitial(bound->lowerdatums); con = castNode(A_Const, ldatum->value); lval = transform_bound_value(pstate, con, prel->ev_type, prel->ev_typmod); - rdatum = (PartitionRangeDatum *) linitial(stmt->partbound->upperdatums); + rdatum = (PartitionRangeDatum *) linitial(bound->upperdatums); con = castNode(A_Const, rdatum->value); rval = transform_bound_value(pstate, con, prel->ev_type, prel->ev_typmod); From 740239fa40adf18f9dfc9b48109ed330577e563d Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Thu, 4 Oct 2018 19:06:52 +0300 Subject: [PATCH 371/528] Fix declarative syntax for pg10 --- Makefile | 10 ++++----- expected/pathman_declarative.out | 1 - src/declarative.c | 37 +++++++++++++++++++++----------- 3 files changed, 29 insertions(+), 19 deletions(-) diff --git a/Makefile b/Makefile index ed2d624b..e0cf5197 100644 --- a/Makefile +++ b/Makefile @@ -2,13 +2,10 @@ MODULE_big = pg_pathman -# versions of postgresql with declarative partitioning -DECL_CHECK_VERSIONS = 10 11 - ifdef USE_PGXS PG_CONFIG = pg_config VNUM := $(shell $(PG_CONFIG) --version | awk '{print $$2}') -ifeq ($(VNUM),$(filter $(VNUM), $(DECL_CHECK_VERSIONS))) +ifeq ($(VNUM),$(filter 10% 11%,$(VNUM))) EXTRA_REGRESS = pathman_declarative EXTRA_OBJS = src/declarative.o endif @@ -21,7 +18,7 @@ OBJS = src/init.o src/relation_info.o src/utils.o src/partition_filter.o \ src/hooks.o src/nodes_common.o src/xact_handling.o src/utility_stmt_hooking.o \ src/planner_tree_modification.o src/debug_print.o src/partition_creation.o \ src/compat/pg_compat.o src/compat/rowmarks_fix.o src/partition_router.o \ - src/partition_overseer.o $(WIN32RES) + src/partition_overseer.o $(EXTRA_OBJS) $(WIN32RES) ifdef USE_PGXS override PG_CPPFLAGS += -I$(CURDIR)/src/include @@ -73,7 +70,8 @@ REGRESS = pathman_array_qual \ pathman_update_triggers \ pathman_upd_del \ pathman_utility_stmt \ - pathman_views ${EXTRA_REGRESS} + pathman_views $(EXTRA_REGRESS) + EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add diff --git a/expected/pathman_declarative.out b/expected/pathman_declarative.out index e853898d..011a0f71 100644 --- a/expected/pathman_declarative.out +++ b/expected/pathman_declarative.out @@ -58,7 +58,6 @@ Check constraints: Inherits: test.range_rel ALTER TABLE test.range_rel DETACH PARTITION test.r2; -NOTICE: trigger "range_rel_upd_trig" for relation "test.r2" does not exist, skipping SELECT * FROM pathman.pathman_partition_list; parent | partition | parttype | expr | range_min | range_max ----------------+------------------+----------+------+------------+------------ diff --git a/src/declarative.c b/src/declarative.c index 7517d82d..891efd62 100644 --- a/src/declarative.c +++ b/src/declarative.c @@ -1,19 +1,22 @@ +#include "pathman.h" #include "declarative.h" #include "utils.h" #include "partition_creation.h" -#include "fmgr.h" #include "access/htup_details.h" #include "catalog/namespace.h" -#include "catalog/pg_type.h" #include "catalog/pg_proc.h" +#include "catalog/pg_type.h" +#include "fmgr.h" +#include "nodes/makefuncs.h" #include "nodes/nodeFuncs.h" -#include "parser/parse_func.h" +#include "optimizer/planner.h" #include "parser/parse_coerce.h" -#include "utils/int8.h" -#include "utils/lsyscache.h" +#include "parser/parse_func.h" #include "utils/builtins.h" #include "utils/int8.h" +#include "utils/int8.h" +#include "utils/lsyscache.h" #include "utils/lsyscache.h" #include "utils/syscache.h" #include "utils/varbit.h" @@ -33,13 +36,16 @@ modify_declative_partitioning_query(Query *query) if (IsA(query->utilityStmt, AlterTableStmt)) { + PartRelationInfo *prel; ListCell *lcmd; Oid relid; AlterTableStmt *stmt = (AlterTableStmt *) query->utilityStmt; relid = RangeVarGetRelid(stmt->relation, NoLock, true); - if (get_pathman_relation_info(relid) != NULL) + if ((prel = get_pathman_relation_info(relid)) != NULL) { + close_pathman_relation_info(prel); + foreach(lcmd, stmt->cmds) { AlterTableCmd *cmd = (AlterTableCmd *) lfirst(lcmd); @@ -61,6 +67,8 @@ modify_declative_partitioning_query(Query *query) bool is_pathman_related_partitioning_cmd(Node *parsetree, Oid *parent_relid) { + PartRelationInfo *prel; + if (IsA(parsetree, AlterTableStmt)) { ListCell *lc; @@ -68,9 +76,11 @@ is_pathman_related_partitioning_cmd(Node *parsetree, Oid *parent_relid) int cnt = 0; *parent_relid = RangeVarGetRelid(stmt->relation, NoLock, false); - if (get_pathman_relation_info(*parent_relid) == NULL) + if ((prel = get_pathman_relation_info(*parent_relid)) == NULL) return false; + close_pathman_relation_info(prel); + /* * Since cmds can contain multiple commmands but we can handle only * two of them here, so we need to check that there are only commands @@ -106,9 +116,10 @@ is_pathman_related_partitioning_cmd(Node *parsetree, Oid *parent_relid) { RangeVar *rv = castNode(RangeVar, linitial(stmt->inhRelations)); *parent_relid = RangeVarGetRelid(rv, NoLock, false); - if (get_pathman_relation_info(*parent_relid) == NULL) + if ((prel = get_pathman_relation_info(*parent_relid)) == NULL) return false; + close_pathman_relation_info(prel); if (stmt->tableElts != NIL) elog(ERROR, "pg_pathman doesn't support column definitions " "in declarative syntax yet"); @@ -202,7 +213,7 @@ handle_attach_partition(Oid parent_relid, AlterTableCmd *cmd) A_Const *con; List *fn_args; ParseState *pstate = make_parsestate(NULL); - const PartRelationInfo *prel; + PartRelationInfo *prel; PartitionCmd *pcmd = (PartitionCmd *) cmd->def; @@ -238,6 +249,7 @@ handle_attach_partition(Oid parent_relid, AlterTableCmd *cmd) rdatum = (PartitionRangeDatum *) linitial(bound->upperdatums); con = castNode(A_Const, rdatum->value); rval = transform_bound_value(pstate, con, prel->ev_type, prel->ev_typmod); + close_pathman_relation_info(prel); /* Lookup function's Oid and get FmgrInfo */ fmgr_info(LookupFuncName(proc_name, 4, proc_args, false), &proc_flinfo); @@ -255,9 +267,9 @@ handle_attach_partition(Oid parent_relid, AlterTableCmd *cmd) (Node *) make_fn_expr(proc_fcinfo.flinfo->fn_oid, fn_args); proc_fcinfo.arg[2] = lval->constvalue; - proc_fcinfo.argnull[2] = ldatum->infinite || lval->constisnull; + proc_fcinfo.argnull[2] = lval->constisnull; proc_fcinfo.arg[3] = rval->constvalue; - proc_fcinfo.argnull[3] = rdatum->infinite || rval->constisnull; + proc_fcinfo.argnull[3] = rval->constisnull; /* Invoke the callback */ FunctionCallInvoke(&proc_fcinfo); @@ -303,7 +315,7 @@ handle_create_partition_of(Oid parent_relid, CreateStmt *stmt) { Bound start, end; - const PartRelationInfo *prel; + PartRelationInfo *prel; ParseState *pstate = make_parsestate(NULL); PartitionRangeDatum *ldatum, *rdatum; @@ -339,6 +351,7 @@ handle_create_partition_of(Oid parent_relid, CreateStmt *stmt) rdatum = (PartitionRangeDatum *) linitial(bound->upperdatums); con = castNode(A_Const, rdatum->value); rval = transform_bound_value(pstate, con, prel->ev_type, prel->ev_typmod); + close_pathman_relation_info(prel); start = lval->constisnull? MakeBoundInf(MINUS_INFINITY) : From e06bbc73bcb4d98eb28cee11a4e742c1055efe3d Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Thu, 4 Oct 2018 19:11:10 +0300 Subject: [PATCH 372/528] Fix declarative test on pg11 --- expected/pathman_declarative_1.out | 100 +++++++++++++++++++++++++++++ 1 file changed, 100 insertions(+) create mode 100644 expected/pathman_declarative_1.out diff --git a/expected/pathman_declarative_1.out b/expected/pathman_declarative_1.out new file mode 100644 index 00000000..8ef4e556 --- /dev/null +++ b/expected/pathman_declarative_1.out @@ -0,0 +1,100 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt DATE NOT NULL +); +CREATE TABLE test.r2 (LIKE test.range_rel); +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES FROM ('2015-05-01') TO ('2015-06-01'); +ERROR: table "range_rel" is not partitioned +INSERT INTO test.range_rel (dt) +SELECT g FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +SELECT * FROM pathman.pathman_partition_list; + parent | partition | parttype | expr | range_min | range_max +----------------+------------------+----------+------+------------+------------ + test.range_rel | test.range_rel_1 | 2 | dt | 01-01-2015 | 02-01-2015 + test.range_rel | test.range_rel_2 | 2 | dt | 02-01-2015 | 03-01-2015 + test.range_rel | test.range_rel_3 | 2 | dt | 03-01-2015 | 04-01-2015 + test.range_rel | test.range_rel_4 | 2 | dt | 04-01-2015 | 05-01-2015 +(4 rows) + +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES IN ('2015-05-01', '2015-06-01'); +ERROR: pg_pathman only supports queries for range partitions +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES FROM ('2014-05-01') TO ('2015-06-01'); +ERROR: specified range [05-01-2014, 06-01-2015) overlaps with existing partitions +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES FROM ('2015-05-01') TO ('2015-06-01'); +SELECT * FROM pathman.pathman_partition_list; + parent | partition | parttype | expr | range_min | range_max +----------------+------------------+----------+------+------------+------------ + test.range_rel | test.range_rel_1 | 2 | dt | 01-01-2015 | 02-01-2015 + test.range_rel | test.range_rel_2 | 2 | dt | 02-01-2015 | 03-01-2015 + test.range_rel | test.range_rel_3 | 2 | dt | 03-01-2015 | 04-01-2015 + test.range_rel | test.range_rel_4 | 2 | dt | 04-01-2015 | 05-01-2015 + test.range_rel | test.r2 | 2 | dt | 05-01-2015 | 06-01-2015 +(5 rows) + +\d+ test.r2; + Table "test.r2" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + id | integer | | not null | | plain | | + dt | date | | not null | | plain | | +Check constraints: + "pathman_r2_check" CHECK (dt >= '05-01-2015'::date AND dt < '06-01-2015'::date) +Inherits: test.range_rel + +ALTER TABLE test.range_rel DETACH PARTITION test.r2; +SELECT * FROM pathman.pathman_partition_list; + parent | partition | parttype | expr | range_min | range_max +----------------+------------------+----------+------+------------+------------ + test.range_rel | test.range_rel_1 | 2 | dt | 01-01-2015 | 02-01-2015 + test.range_rel | test.range_rel_2 | 2 | dt | 02-01-2015 | 03-01-2015 + test.range_rel | test.range_rel_3 | 2 | dt | 03-01-2015 | 04-01-2015 + test.range_rel | test.range_rel_4 | 2 | dt | 04-01-2015 | 05-01-2015 +(4 rows) + +\d+ test.r2; + Table "test.r2" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + id | integer | | not null | | plain | | + dt | date | | not null | | plain | | + +CREATE TABLE test.r4 PARTITION OF test.range_rel + FOR VALUES IN ('2015-05-01', '2015-06-01'); +ERROR: pg_pathman only supports queries for range partitions +CREATE TABLE test.r4 PARTITION OF test.range_rel + FOR VALUES FROM ('2014-05-01') TO ('2015-06-01'); +ERROR: specified range [05-01-2014, 06-01-2015) overlaps with existing partitions +CREATE TABLE test.r4 PARTITION OF test.range_rel + FOR VALUES FROM ('2015-06-01') TO ('2016-01-01'); +\d+ test.r4; + Table "test.r4" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+--------------------------------------------+---------+--------------+------------- + id | integer | | not null | nextval('test.range_rel_id_seq'::regclass) | plain | | + dt | date | | not null | | plain | | +Indexes: + "r4_pkey" PRIMARY KEY, btree (id) +Check constraints: + "pathman_r4_check" CHECK (dt >= '06-01-2015'::date AND dt < '01-01-2016'::date) +Inherits: test.range_rel + +DROP SCHEMA test CASCADE; +NOTICE: drop cascades to 8 other objects +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman CASCADE; From afdf2f5c6803c71d825f491812c487f731a221ae Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Thu, 4 Oct 2018 19:30:46 +0300 Subject: [PATCH 373/528] Add documentation for declarative partitioning --- README.md | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/README.md b/README.md index 2bf95a2e..1a33e01a 100644 --- a/README.md +++ b/README.md @@ -70,6 +70,7 @@ More interesting features are yet to come. Stay tuned! * Non-blocking [concurrent table partitioning](#data-migration); * FDW support (foreign partitions); * Various [GUC](#disabling-pg_pathman) toggles and configurable settings. + * Partial support of [`declarative partitioning`](#declarative-partitioning) (from PostgreSQL 10). ## Installation guide To install `pg_pathman`, execute this in the module's directory: @@ -410,6 +411,26 @@ AS SELECT * FROM @extschema@.show_cache_stats(); ``` Shows memory consumption of various caches. +## Declarative partitioning + +From PostgreSQL 10 `ATTACH PARTITION`, `DETACH PARTITION` +and `CREATE TABLE .. PARTITION OF` commands could be with with tables +partitioned by `pg_pathman`: + +```plpgsql +CREATE TABLE child1 (LIKE partitioned_table); + +--- attach new partition +ALTER TABLE partitioned_table ATTACH PARTITION child1 + FOR VALUES FROM ('2015-05-01') TO ('2015-06-01'); + +--- detach the partition +ALTER TABLE partitioned_table DETACH PARTITION child1; + +-- create a partition +CREATE TABLE child2 PARTITION OF partitioned_table + FOR VALUES IN ('2015-05-01', '2015-06-01'); +``` ## Custom plan nodes `pg_pathman` provides a couple of [custom plan nodes](https://wiki.postgresql.org/wiki/CustomScanAPI) which aim to reduce execution time, namely: From a6bd7b4838c495a58ff4d8b958f011dbc7a5a19f Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Mon, 8 Oct 2018 17:39:10 +0300 Subject: [PATCH 374/528] Fix typo in README --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 1a33e01a..684e37c4 100644 --- a/README.md +++ b/README.md @@ -414,7 +414,7 @@ Shows memory consumption of various caches. ## Declarative partitioning From PostgreSQL 10 `ATTACH PARTITION`, `DETACH PARTITION` -and `CREATE TABLE .. PARTITION OF` commands could be with with tables +and `CREATE TABLE .. PARTITION OF` commands could be used with tables partitioned by `pg_pathman`: ```plpgsql From b33eb4df1bd617c6c06b4446bd260a12d8aef1bb Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Mon, 8 Oct 2018 17:46:52 +0300 Subject: [PATCH 375/528] Bump version to 1.5.2 --- META.json | 4 ++-- expected/pathman_calamity.out | 2 +- src/include/init.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/META.json b/META.json index 744310c2..90e38663 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_pathman", "abstract": "Fast partitioning tool for PostgreSQL", "description": "pg_pathman provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.5.1", + "version": "1.5.2", "maintainer": [ "Dmitry Ivanov ", "Ildus Kurbangaliev " @@ -23,7 +23,7 @@ "pg_pathman": { "file": "pg_pathman--1.5.sql", "docfile": "README.md", - "version": "1.5.1", + "version": "1.5.2", "abstract": "Effective partitioning tool for PostgreSQL 9.5 and higher" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 6b73351e..eadb9a70 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -13,7 +13,7 @@ SELECT debug_capture(); SELECT pathman_version(); pathman_version ----------------- - 1.5.1 + 1.5.2 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index 1c8c1584..3c959a78 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -158,7 +158,7 @@ simplify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT "1.5.0" /* Current version of native C library */ -#define CURRENT_LIB_VERSION "1.5.1" +#define CURRENT_LIB_VERSION "1.5.2" void *pathman_cache_search_relid(HTAB *cache_table, From 8a1d81324d741395b8f5964a40370a5b8f374a9c Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Mon, 8 Oct 2018 18:57:20 +0300 Subject: [PATCH 376/528] Make get_partition_cooked_key work for tables with no partitions. --- src/pl_funcs.c | 29 ++++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 44a5f93f..ea718752 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -142,22 +142,33 @@ get_partition_key_type_pl(PG_FUNCTION_ARGS) } /* - * Return partition key type. + * Return cooked partition key. */ Datum get_partition_cooked_key_pl(PG_FUNCTION_ARGS) { - Oid relid = PG_GETARG_OID(0); - PartRelationInfo *prel; - Datum res; + /* Values extracted from PATHMAN_CONFIG */ + Datum values[Natts_pathman_config]; + bool isnull[Natts_pathman_config]; + + Oid relid = PG_GETARG_OID(0); + char *expr_cstr; + Node *expr; + char *cooked_cstr; + + /* Check that table is registered in PATHMAN_CONFIG */ + if (!pathman_config_contains_relation(relid, values, isnull, NULL, NULL)) + elog(ERROR, "table \"%s\" is not partitioned", + get_rel_name_or_relid(relid)); - prel = get_pathman_relation_info(relid); - shout_if_prel_is_invalid(relid, prel, PT_ANY); + expr_cstr = TextDatumGetCString(values[Anum_pathman_config_expr - 1]); + expr = cook_partitioning_expression(relid, expr_cstr, NULL); + cooked_cstr = nodeToString(expr); - res = CStringGetTextDatum(nodeToString(prel->expr)); - close_pathman_relation_info(prel); + pfree(expr_cstr); + pfree(expr); - PG_RETURN_TEXT_P(res); + PG_RETURN_TEXT_P(CStringGetTextDatum(cooked_cstr)); } /* From 497ab20a2cb119278fca0c68c3c1b7140bdc31d9 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Mon, 8 Oct 2018 20:10:38 +0300 Subject: [PATCH 377/528] Return private function for cached cooked key to fix tests coverage --- expected/pathman_column_type.out | 23 +++++++++++++++++++++-- sql/pathman_column_type.sql | 15 +++++++++++++-- src/pl_funcs.c | 21 +++++++++++++++++++++ 3 files changed, 55 insertions(+), 4 deletions(-) diff --git a/expected/pathman_column_type.out b/expected/pathman_column_type.out index d3022d77..3ae9355c 100644 --- a/expected/pathman_column_type.out +++ b/expected/pathman_column_type.out @@ -28,21 +28,33 @@ SELECT context, entries FROM pathman_cache_stats ORDER BY context; partition status cache | 3 (4 rows) -/* change column's type (should flush caches) */ +/* + * Get parsed and analyzed expression. + */ +CREATE FUNCTION get_cached_partition_cooked_key(REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'get_cached_partition_cooked_key_pl' +LANGUAGE C STRICT; SELECT get_partition_cooked_key('test_column_type.test'::REGCLASS); get_partition_cooked_key ----------------------------------------------------------------------------------------------------------------------- {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} (1 row) +SELECT get_cached_partition_cooked_key('test_column_type.test'::REGCLASS); + get_cached_partition_cooked_key +----------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} +(1 row) + SELECT get_partition_key_type('test_column_type.test'::REGCLASS); get_partition_key_type ------------------------ integer (1 row) +/* change column's type (should also flush caches) */ ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; -/* check that expression has been built */ +/* check that correct expression has been built */ SELECT get_partition_key_type('test_column_type.test'::REGCLASS); get_partition_key_type ------------------------ @@ -55,6 +67,13 @@ SELECT get_partition_cooked_key('test_column_type.test'::REGCLASS); {VAR :varno 1 :varattno 1 :vartype 1700 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} (1 row) +SELECT get_cached_partition_cooked_key('test_column_type.test'::REGCLASS); + get_cached_partition_cooked_key +------------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 1700 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} +(1 row) + +DROP FUNCTION get_cached_partition_cooked_key(REGCLASS); /* make sure that everything works properly */ SELECT * FROM test_column_type.test; val diff --git a/sql/pathman_column_type.sql b/sql/pathman_column_type.sql index ab2b43f1..98c73908 100644 --- a/sql/pathman_column_type.sql +++ b/sql/pathman_column_type.sql @@ -17,14 +17,25 @@ SELECT create_range_partitions('test_column_type.test', 'val', 1, 10, 10); SELECT * FROM test_column_type.test; SELECT context, entries FROM pathman_cache_stats ORDER BY context; -/* change column's type (should flush caches) */ +/* + * Get parsed and analyzed expression. + */ +CREATE FUNCTION get_cached_partition_cooked_key(REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'get_cached_partition_cooked_key_pl' +LANGUAGE C STRICT; + SELECT get_partition_cooked_key('test_column_type.test'::REGCLASS); +SELECT get_cached_partition_cooked_key('test_column_type.test'::REGCLASS); SELECT get_partition_key_type('test_column_type.test'::REGCLASS); + +/* change column's type (should also flush caches) */ ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; -/* check that expression has been built */ +/* check that correct expression has been built */ SELECT get_partition_key_type('test_column_type.test'::REGCLASS); SELECT get_partition_cooked_key('test_column_type.test'::REGCLASS); +SELECT get_cached_partition_cooked_key('test_column_type.test'::REGCLASS); +DROP FUNCTION get_cached_partition_cooked_key(REGCLASS); /* make sure that everything works properly */ SELECT * FROM test_column_type.test; diff --git a/src/pl_funcs.c b/src/pl_funcs.c index ea718752..26d5af8c 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -48,6 +48,7 @@ PG_FUNCTION_INFO_V1( get_number_of_partitions_pl ); PG_FUNCTION_INFO_V1( get_partition_key_type_pl ); PG_FUNCTION_INFO_V1( get_partition_cooked_key_pl ); +PG_FUNCTION_INFO_V1( get_cached_partition_cooked_key_pl ); PG_FUNCTION_INFO_V1( get_parent_of_partition_pl ); PG_FUNCTION_INFO_V1( get_base_type_pl ); PG_FUNCTION_INFO_V1( get_tablespace_pl ); @@ -171,6 +172,26 @@ get_partition_cooked_key_pl(PG_FUNCTION_ARGS) PG_RETURN_TEXT_P(CStringGetTextDatum(cooked_cstr)); } +/* + * Return cached cooked partition key. + * + * Used in tests for invalidation. + */ +Datum +get_cached_partition_cooked_key_pl(PG_FUNCTION_ARGS) +{ + Oid relid = PG_GETARG_OID(0); + PartRelationInfo *prel; + Datum res; + + prel = get_pathman_relation_info(relid); + shout_if_prel_is_invalid(relid, prel, PT_ANY); + res = CStringGetTextDatum(nodeToString(prel->expr)); + close_pathman_relation_info(prel); + + PG_RETURN_TEXT_P(res); +} + /* * Extract basic type of a domain. */ From 085f2362d3f348404f1160eead053d431406a19a Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Tue, 9 Oct 2018 15:32:23 +0300 Subject: [PATCH 378/528] Fix build errors and tests with our standard and enterprise versions --- Makefile | 39 +++++++++++++++++++++------------ README.md | 2 +- sql/pathman_hashjoin.sql | 1 + sql/pathman_mergejoin.sql | 1 - src/declarative.c | 2 +- src/hooks.c | 2 +- src/include/declarative.h | 2 +- src/planner_tree_modification.c | 4 ++-- 8 files changed, 32 insertions(+), 21 deletions(-) diff --git a/Makefile b/Makefile index e0cf5197..c6e800a7 100644 --- a/Makefile +++ b/Makefile @@ -2,23 +2,13 @@ MODULE_big = pg_pathman -ifdef USE_PGXS -PG_CONFIG = pg_config -VNUM := $(shell $(PG_CONFIG) --version | awk '{print $$2}') -ifeq ($(VNUM),$(filter 10% 11%,$(VNUM))) - EXTRA_REGRESS = pathman_declarative - EXTRA_OBJS = src/declarative.o -endif -endif -include $(PGXS) - OBJS = src/init.o src/relation_info.o src/utils.o src/partition_filter.o \ src/runtime_append.o src/runtime_merge_append.o src/pg_pathman.o src/rangeset.o \ src/pl_funcs.o src/pl_range_funcs.o src/pl_hash_funcs.o src/pathman_workers.o \ src/hooks.o src/nodes_common.o src/xact_handling.o src/utility_stmt_hooking.o \ src/planner_tree_modification.o src/debug_print.o src/partition_creation.o \ src/compat/pg_compat.o src/compat/rowmarks_fix.o src/partition_router.o \ - src/partition_overseer.o $(EXTRA_OBJS) $(WIN32RES) + src/partition_overseer.o $(WIN32RES) ifdef USE_PGXS override PG_CPPFLAGS += -I$(CURDIR)/src/include @@ -70,7 +60,7 @@ REGRESS = pathman_array_qual \ pathman_update_triggers \ pathman_upd_del \ pathman_utility_stmt \ - pathman_views $(EXTRA_REGRESS) + pathman_views EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add @@ -78,16 +68,37 @@ EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add EXTRA_CLEAN = pg_pathman--$(EXTVERSION).sql ./isolation_output ifdef USE_PGXS -PG_CONFIG = pg_config +PG_CONFIG=pg_config PGXS := $(shell $(PG_CONFIG) --pgxs) -include $(PGXS) +VNUM := $(shell $(PG_CONFIG) --version | awk '{print $$2}') else subdir = contrib/pg_pathman top_builddir = ../.. include $(top_builddir)/src/Makefile.global +endif + +# our standard version could also use declarative syntax +ifdef PGPRO_EDITION +ifeq ($(PGPRO_EDITION),standard) +VNUM := $(VERSION) +endif +endif + +ifdef VNUM +ifeq ($(VNUM),$(filter 10% 11%,$(VNUM))) +REGRESS += pathman_declarative +OBJS += src/declarative.o +override PG_CPPFLAGS += -DENABLE_DECLARATIVE +endif +endif + +ifdef USE_PGXS +include $(PGXS) +else include $(top_srcdir)/contrib/contrib-global.mk endif + $(EXTENSION)--$(EXTVERSION).sql: init.sql hash.sql range.sql cat $^ > $@ diff --git a/README.md b/README.md index 684e37c4..4fb8e5ac 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ The `pg_pathman` module provides optimized partitioning mechanism and functions The extension is compatible with: * PostgreSQL 9.5, 9.6, 10, 11; - * Postgres Pro Standard 9.5, 9.6; + * Postgres Pro Standard 9.5, 9.6, 10; * Postgres Pro Enterprise; Take a look at our Wiki [out there](https://github.com/postgrespro/pg_pathman/wiki). diff --git a/sql/pathman_hashjoin.sql b/sql/pathman_hashjoin.sql index d3cc1b2b..411e0a7f 100644 --- a/sql/pathman_hashjoin.sql +++ b/sql/pathman_hashjoin.sql @@ -33,6 +33,7 @@ SET enable_seqscan = OFF; SET enable_nestloop = OFF; SET enable_hashjoin = ON; SET enable_mergejoin = OFF; + EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel j1 JOIN test.range_rel j2 on j2.id = j1.id diff --git a/sql/pathman_mergejoin.sql b/sql/pathman_mergejoin.sql index 90bf3166..9b0b95b1 100644 --- a/sql/pathman_mergejoin.sql +++ b/sql/pathman_mergejoin.sql @@ -44,7 +44,6 @@ WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; SET enable_hashjoin = ON; SET enable_nestloop = ON; - DROP SCHEMA test CASCADE; DROP EXTENSION pg_pathman; DROP SCHEMA pathman CASCADE; diff --git a/src/declarative.c b/src/declarative.c index 891efd62..ca4fe165 100644 --- a/src/declarative.c +++ b/src/declarative.c @@ -29,7 +29,7 @@ * we set it back (look `is_pathman_related_partitioning_cmd`) */ void -modify_declative_partitioning_query(Query *query) +modify_declarative_partitioning_query(Query *query) { if (query->commandType != CMD_UTILITY) return; diff --git a/src/hooks.c b/src/hooks.c index b38d71e3..656efe9f 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -962,7 +962,7 @@ pathman_process_utility_hook(Node *first_arg, get_attname_compat(relation_oid, attr_number), get_rel_name(relation_oid)))); } -#if PG_VERSION_NUM >= 100000 +#ifdef ENABLE_DECLARATIVE else if (is_pathman_related_partitioning_cmd(parsetree, &relation_oid)) { /* we can handle all the partitioning commands in ALTER .. TABLE */ diff --git a/src/include/declarative.h b/src/include/declarative.h index 05993c79..ee4ea40b 100644 --- a/src/include/declarative.h +++ b/src/include/declarative.h @@ -5,7 +5,7 @@ #include "nodes/nodes.h" #include "nodes/parsenodes.h" -void modify_declative_partitioning_query(Query *query); +void modify_declarative_partitioning_query(Query *query); bool is_pathman_related_partitioning_cmd(Node *parsetree, Oid *parent_relid); /* actual actions */ diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index b3391bbf..f40c152f 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -429,8 +429,8 @@ pathman_post_analyze_query_walker(Node *node, void *context) Query *query = (Query *) node; /* Make changes for declarative syntax */ -#if PG_VERSION_NUM >= 100000 - modify_declative_partitioning_query(query); +#ifdef ENABLE_DECLARATIVE + modify_declarative_partitioning_query(query); #endif /* Handle Query node */ From dd072e560148ada0fb267443ff9b8b9dc5b4108f Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Tue, 9 Oct 2018 15:32:41 +0300 Subject: [PATCH 379/528] Add forgotten files --- expected/pathman_hashjoin_2.out | 66 ++++++++++++++++++++++++++++++++ expected/pathman_mergejoin_2.out | 65 +++++++++++++++++++++++++++++++ 2 files changed, 131 insertions(+) create mode 100644 expected/pathman_hashjoin_2.out create mode 100644 expected/pathman_mergejoin_2.out diff --git a/expected/pathman_hashjoin_2.out b/expected/pathman_hashjoin_2.out new file mode 100644 index 00000000..d0cba65d --- /dev/null +++ b/expected/pathman_hashjoin_2.out @@ -0,0 +1,66 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) + SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SET pg_pathman.enable_runtimeappend = OFF; +SET pg_pathman.enable_runtimemergeappend = OFF; +VACUUM; +/* + * Hash join + */ +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +SET enable_nestloop = OFF; +SET enable_hashjoin = ON; +SET enable_mergejoin = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Hash Join + Hash Cond: (j3.id = j2.id) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_1 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_2 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_3 + -> Hash + -> Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 j2 + Filter: (id IS NOT NULL) +(13 rows) + +DROP SCHEMA test CASCADE; +NOTICE: drop cascades to 12 other objects +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman CASCADE; diff --git a/expected/pathman_mergejoin_2.out b/expected/pathman_mergejoin_2.out new file mode 100644 index 00000000..acff2247 --- /dev/null +++ b/expected/pathman_mergejoin_2.out @@ -0,0 +1,65 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +INSERT INTO test.num_range_rel SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +/* + * Merge join between 3 partitioned tables + * + * test case for the fix of sorting, merge append and index scan issues + * details in commit 54dd0486fc55b2d25cf7d095f83dee6ff4adee06 + */ +SET enable_hashjoin = OFF; +SET enable_nestloop = OFF; +SET enable_mergejoin = ON; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Merge Join + Merge Cond: (j2.id = j3.id) + -> Merge Append + Sort Key: j2.id + -> Index Scan using range_rel_2_pkey on range_rel_2 j2 + Index Cond: (id IS NOT NULL) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_1 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_2 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_3 +(13 rows) + +SET enable_hashjoin = ON; +SET enable_nestloop = ON; +DROP SCHEMA test CASCADE; +NOTICE: drop cascades to 12 other objects +DROP EXTENSION pg_pathman; +DROP SCHEMA pathman CASCADE; From dd71813ae159e9f9c677adca7d7aaf4058b6f711 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Wed, 10 Oct 2018 01:10:28 +0300 Subject: [PATCH 380/528] Mind that build_check_constraint_name returns quoted name. --- hash.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hash.sql b/hash.sql index 8cf9b19a..0f694882 100644 --- a/hash.sql +++ b/hash.sql @@ -108,7 +108,7 @@ BEGIN /* Fetch definition of old_partition's HASH constraint */ SELECT pg_catalog.pg_get_constraintdef(oid) FROM pg_catalog.pg_constraint - WHERE conrelid = old_partition AND conname = old_constr_name + WHERE conrelid = old_partition AND quote_ident(conname) = old_constr_name INTO old_constr_def; /* Detach old partition */ From 8fe15fc6e9470cf0801510c4361266768df31d25 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Wed, 10 Oct 2018 12:54:35 +0300 Subject: [PATCH 381/528] Disable declarative syntax for in-tree builds --- Makefile | 20 ++++---------------- 1 file changed, 4 insertions(+), 16 deletions(-) diff --git a/Makefile b/Makefile index c6e800a7..80f74e7f 100644 --- a/Makefile +++ b/Makefile @@ -71,34 +71,22 @@ ifdef USE_PGXS PG_CONFIG=pg_config PGXS := $(shell $(PG_CONFIG) --pgxs) VNUM := $(shell $(PG_CONFIG) --version | awk '{print $$2}') -else -subdir = contrib/pg_pathman -top_builddir = ../.. -include $(top_builddir)/src/Makefile.global -endif - -# our standard version could also use declarative syntax -ifdef PGPRO_EDITION -ifeq ($(PGPRO_EDITION),standard) -VNUM := $(VERSION) -endif -endif -ifdef VNUM +# check for declarative syntax ifeq ($(VNUM),$(filter 10% 11%,$(VNUM))) REGRESS += pathman_declarative OBJS += src/declarative.o override PG_CPPFLAGS += -DENABLE_DECLARATIVE endif -endif -ifdef USE_PGXS include $(PGXS) else +subdir = contrib/pg_pathman +top_builddir = ../.. +include $(top_builddir)/src/Makefile.global include $(top_srcdir)/contrib/contrib-global.mk endif - $(EXTENSION)--$(EXTVERSION).sql: init.sql hash.sql range.sql cat $^ > $@ From 02e0db7650d48bdc5b25e6176d29ebaa34e5a921 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Mon, 19 Nov 2018 18:46:28 +0300 Subject: [PATCH 382/528] Check readiness in add_to_pathman_config --- expected/pathman_calamity.out | 2 +- src/pl_funcs.c | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index eadb9a70..08beae66 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -1005,7 +1005,7 @@ SHOW pg_pathman.enable; (1 row) SELECT add_to_pathman_config('calamity.survivor', 'val', '10'); /* not ok */ -ERROR: pg_pathman is not initialized yet +ERROR: pg_pathman is disabled SELECT * FROM pathman_partition_list; /* not ok */ ERROR: pg_pathman is not initialized yet SELECT get_part_range('calamity.survivor', 0, NULL::INT); /* not ok */ diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 26d5af8c..06b1cf56 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -740,6 +740,9 @@ add_to_pathman_config(PG_FUNCTION_ARGS) PathmanInitState init_state; + if (!IsPathmanReady()) + elog(ERROR, "pg_pathman is disabled"); + if (!PG_ARGISNULL(0)) { relid = PG_GETARG_OID(0); From 6c9d435f155453ede59251a36c10a5a6703e8666 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Mon, 17 Dec 2018 12:21:34 +0300 Subject: [PATCH 383/528] Bump version to 1.5.3 --- META.json | 5 ++--- expected/pathman_calamity.out | 2 +- src/include/init.h | 2 +- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/META.json b/META.json index 90e38663..510d6082 100644 --- a/META.json +++ b/META.json @@ -2,9 +2,8 @@ "name": "pg_pathman", "abstract": "Fast partitioning tool for PostgreSQL", "description": "pg_pathman provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.5.2", + "version": "1.5.3", "maintainer": [ - "Dmitry Ivanov ", "Ildus Kurbangaliev " ], "license": "postgresql", @@ -23,7 +22,7 @@ "pg_pathman": { "file": "pg_pathman--1.5.sql", "docfile": "README.md", - "version": "1.5.2", + "version": "1.5.3", "abstract": "Effective partitioning tool for PostgreSQL 9.5 and higher" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 08beae66..ea6827ff 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -13,7 +13,7 @@ SELECT debug_capture(); SELECT pathman_version(); pathman_version ----------------- - 1.5.2 + 1.5.3 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index 3c959a78..4ec6fbe3 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -158,7 +158,7 @@ simplify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT "1.5.0" /* Current version of native C library */ -#define CURRENT_LIB_VERSION "1.5.2" +#define CURRENT_LIB_VERSION "1.5.3" void *pathman_cache_search_relid(HTAB *cache_table, From d1032b0f012a250efca1ede699434a4df93b4608 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Fri, 18 Jan 2019 12:40:57 +0300 Subject: [PATCH 384/528] [PGPRO-2355] Check pathman readiness in split and merge range parts. --- src/pl_range_funcs.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 351926f7..daf6cf4c 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -489,6 +489,9 @@ split_range_partition(PG_FUNCTION_ARGS) char *query; int i; + if (!IsPathmanReady()) + elog(ERROR, "pg_pathman is disabled"); + if (!PG_ARGISNULL(0)) { partition1 = PG_GETARG_OID(0); @@ -652,6 +655,9 @@ merge_range_partitions(PG_FUNCTION_ARGS) FmgrInfo finfo; int i; + if (!IsPathmanReady()) + elog(ERROR, "pg_pathman is disabled"); + /* Validate array type */ Assert(ARR_ELEMTYPE(arr) == REGCLASSOID); From 16810d9211e8e0e4a38bf8fc0d73d07b745ab5a8 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Fri, 1 Feb 2019 16:06:23 +0300 Subject: [PATCH 385/528] Revisit pathman readiness again. Most pathman functions break if pathman is disabled: earlier I put defense checks in split and merge range partitions, now it is drop_range_partition_expand_next. Looks like the reason is that pathman caches are not being invalidated if it is disabled: pathman_relcache_hook exits right away then. This is kinda reasonable: if we want to disable pathman completely, why maintain the caches? So this time try to bury the readiness check deeper, in get_pathman_relation_info itself. BTW, pathman caches are not dropped when it is disabled, which looks suspicious on its own -- probably if we re-enable it later, caches might be inconsistent. --- expected/pathman_calamity.out | 2 +- src/pl_range_funcs.c | 6 ------ src/relation_info.c | 3 +++ 3 files changed, 4 insertions(+), 7 deletions(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index ea6827ff..bef99948 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -1009,7 +1009,7 @@ ERROR: pg_pathman is disabled SELECT * FROM pathman_partition_list; /* not ok */ ERROR: pg_pathman is not initialized yet SELECT get_part_range('calamity.survivor', 0, NULL::INT); /* not ok */ -ERROR: pg_pathman is not initialized yet +ERROR: pg_pathman is disabled EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ QUERY PLAN ------------------------------ diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index daf6cf4c..351926f7 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -489,9 +489,6 @@ split_range_partition(PG_FUNCTION_ARGS) char *query; int i; - if (!IsPathmanReady()) - elog(ERROR, "pg_pathman is disabled"); - if (!PG_ARGISNULL(0)) { partition1 = PG_GETARG_OID(0); @@ -655,9 +652,6 @@ merge_range_partitions(PG_FUNCTION_ARGS) FmgrInfo finfo; int i; - if (!IsPathmanReady()) - elog(ERROR, "pg_pathman is disabled"); - /* Validate array type */ Assert(ARR_ELEMTYPE(arr) == REGCLASSOID); diff --git a/src/relation_info.c b/src/relation_info.c index 8ee74217..9bb8d0db 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -320,6 +320,9 @@ get_pathman_relation_info(Oid relid) { PartStatusInfo *psin; + if (!IsPathmanReady()) + elog(ERROR, "pg_pathman is disabled"); + /* Should always be called in transaction */ Assert(IsTransactionState()); From 64992326d83b75d031827002a8ef5ae7b8c28ff5 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 5 Feb 2019 15:39:54 +0300 Subject: [PATCH 386/528] Bump 1.5.4 lib version. --- META.json | 6 +++--- expected/pathman_calamity.out | 2 +- src/include/init.h | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/META.json b/META.json index 510d6082..9321b82c 100644 --- a/META.json +++ b/META.json @@ -2,9 +2,9 @@ "name": "pg_pathman", "abstract": "Fast partitioning tool for PostgreSQL", "description": "pg_pathman provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.5.3", + "version": "1.5.4", "maintainer": [ - "Ildus Kurbangaliev " + "Arseny Sher " ], "license": "postgresql", "resources": { @@ -22,7 +22,7 @@ "pg_pathman": { "file": "pg_pathman--1.5.sql", "docfile": "README.md", - "version": "1.5.3", + "version": "1.5.4", "abstract": "Effective partitioning tool for PostgreSQL 9.5 and higher" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index bef99948..44f14d96 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -13,7 +13,7 @@ SELECT debug_capture(); SELECT pathman_version(); pathman_version ----------------- - 1.5.3 + 1.5.4 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index 4ec6fbe3..5b133d01 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -158,7 +158,7 @@ simplify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT "1.5.0" /* Current version of native C library */ -#define CURRENT_LIB_VERSION "1.5.3" +#define CURRENT_LIB_VERSION "1.5.4" void *pathman_cache_search_relid(HTAB *cache_table, From 10e6c71f9b870ba2fba59bacd49bb6cc7e8ecb4a Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 5 Feb 2019 15:40:19 +0300 Subject: [PATCH 387/528] Fix upgrade from 1.4 to 1.5. This upgrade drops a column from pg_config. This is problematic, because pg_attribute entry is never actually removed in Postgres and fresh install and upgraded one had different number of attrs. To avoid bothering with this, recreate pg_config during upgrade from scratch. To test this, rewrite check_update.py which was outright broken; now it runs large part of regression tests. Also, test script revealed that update script hasn't included dd71813ae1 fix for replace_hash_partition. --- pg_pathman--1.4--1.5.sql | 129 +++++++++++++++++++ tests/update/README.md | 6 + tests/update/check_update.py | 243 ++++++++++++++++++++++------------- 3 files changed, 287 insertions(+), 91 deletions(-) diff --git a/pg_pathman--1.4--1.5.sql b/pg_pathman--1.4--1.5.sql index a8e7fb21..11406476 100644 --- a/pg_pathman--1.4--1.5.sql +++ b/pg_pathman--1.4--1.5.sql @@ -11,6 +11,41 @@ RETURNS BOOL AS 'pg_pathman', 'validate_interval_value' LANGUAGE C; ALTER TABLE @extschema@.pathman_config DROP COLUMN cooked_expr; +/* + * Dropped columns are never actually purged, entry in pg_attribute remains. + * Since dealing with different number of attrs in C code is cumbersome, + * let's recreate table instead. + */ +CREATE TABLE @extschema@.pathman_config_tmp (LIKE @extschema@.pathman_config INCLUDING ALL); +INSERT INTO @extschema@.pathman_config_tmp SELECT * FROM @extschema@.pathman_config; +ALTER EVENT TRIGGER pathman_ddl_trigger DISABLE; +DROP TABLE @extschema@.pathman_config; +ALTER TABLE @extschema@.pathman_config_tmp RENAME TO pathman_config; +ALTER EVENT TRIGGER pathman_ddl_trigger ENABLE; + +/* + * Get back stuff not preserved by CREATE TABLE LIKE: ACL, RLS and + * pg_extension_config_dump mark. + */ + +GRANT SELECT, INSERT, UPDATE, DELETE +ON @extschema@.pathman_config +TO public; + +/* + * Row security policy to restrict partitioning operations to owner and superusers only + */ +CREATE POLICY deny_modification ON @extschema@.pathman_config +FOR ALL USING (check_security_policy(partrel)); +CREATE POLICY allow_select ON @extschema@.pathman_config FOR SELECT USING (true); +ALTER TABLE @extschema@.pathman_config ENABLE ROW LEVEL SECURITY; + +/* + * Enable dump of config tables with pg_dump. + */ +SELECT pg_catalog.pg_extension_config_dump('@extschema@.pathman_config', ''); + + ALTER TABLE @extschema@.pathman_config ADD CONSTRAINT pathman_config_interval_check CHECK (@extschema@.validate_interval_value(partrel, expr, @@ -505,6 +540,100 @@ BEGIN END $$ LANGUAGE plpgsql; +/* + * Replace hash partition with another one. It could be useful in case when + * someone wants to attach foreign table as a partition. + * + * lock_parent - should we take an exclusive lock? + */ +CREATE OR REPLACE FUNCTION @extschema@.replace_hash_partition( + old_partition REGCLASS, + new_partition REGCLASS, + lock_parent BOOL DEFAULT TRUE) +RETURNS REGCLASS AS $$ +DECLARE + parent_relid REGCLASS; + old_constr_name TEXT; /* name of old_partition's constraint */ + old_constr_def TEXT; /* definition of old_partition's constraint */ + rel_persistence CHAR; + p_init_callback REGPROCEDURE; + +BEGIN + PERFORM @extschema@.validate_relname(old_partition); + PERFORM @extschema@.validate_relname(new_partition); + + /* Parent relation */ + parent_relid := @extschema@.get_parent_of_partition(old_partition); + + IF lock_parent THEN + /* Acquire data modification lock (prevent further modifications) */ + PERFORM @extschema@.prevent_data_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.prevent_part_modification(parent_relid); + END IF; + + /* Acquire data modification lock (prevent further modifications) */ + PERFORM @extschema@.prevent_data_modification(old_partition); + PERFORM @extschema@.prevent_data_modification(new_partition); + + /* Ignore temporary tables */ + SELECT relpersistence FROM pg_catalog.pg_class + WHERE oid = new_partition INTO rel_persistence; + + IF rel_persistence = 't'::CHAR THEN + RAISE EXCEPTION 'temporary table "%" cannot be used as a partition', + new_partition::TEXT; + END IF; + + /* Check that new partition has an equal structure as parent does */ + IF NOT @extschema@.is_tuple_convertible(parent_relid, new_partition) THEN + RAISE EXCEPTION 'partition must have a compatible tuple format'; + END IF; + + /* Check that table is partitioned */ + IF @extschema@.get_partition_key(parent_relid) IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + /* Fetch name of old_partition's HASH constraint */ + old_constr_name = @extschema@.build_check_constraint_name(old_partition::REGCLASS); + + /* Fetch definition of old_partition's HASH constraint */ + SELECT pg_catalog.pg_get_constraintdef(oid) FROM pg_catalog.pg_constraint + WHERE conrelid = old_partition AND quote_ident(conname) = old_constr_name + INTO old_constr_def; + + /* Detach old partition */ + EXECUTE format('ALTER TABLE %s NO INHERIT %s', old_partition, parent_relid); + EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', + old_partition, + old_constr_name); + + /* Attach the new one */ + EXECUTE format('ALTER TABLE %s INHERIT %s', new_partition, parent_relid); + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s %s', + new_partition, + @extschema@.build_check_constraint_name(new_partition::REGCLASS), + old_constr_def); + + /* Fetch init_callback from 'params' table */ + WITH stub_callback(stub) as (values (0)) + SELECT init_callback + FROM stub_callback + LEFT JOIN @extschema@.pathman_config_params AS params + ON params.partrel = parent_relid + INTO p_init_callback; + + /* Finally invoke init_callback */ + PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, + new_partition, + p_init_callback); + + RETURN new_partition; +END +$$ LANGUAGE plpgsql; + /* * Disable pathman partitioning for specified relation. */ diff --git a/tests/update/README.md b/tests/update/README.md index f31f4116..fd042822 100644 --- a/tests/update/README.md +++ b/tests/update/README.md @@ -9,3 +9,9 @@ PG_CONFIG=... ./dump_pathman_objects %DBNAME% diff file_1 file_2 ``` + +check_update.py script tries to verify that update is ok automatically. For +instance, +```bash +tests/update/check_update.py d34a77e worktree +``` diff --git a/tests/update/check_update.py b/tests/update/check_update.py index be5f2aa2..9ac4db62 100755 --- a/tests/update/check_update.py +++ b/tests/update/check_update.py @@ -8,17 +8,12 @@ import argparse import testgres import subprocess -import difflib +import time my_dir = os.path.dirname(os.path.abspath(__file__)) repo_dir = os.path.abspath(os.path.join(my_dir, '../../')) print(repo_dir) -compilation = ''' -make USE_PGXS=1 clean -make USE_PGXS=1 install -''' - # just bunch of tables to create run_sql = ''' CREATE EXTENSION pg_pathman; @@ -132,95 +127,161 @@ SELECT create_hash_partitions('hash_rel_next1', 'value', 3); ''' -@contextlib.contextmanager -def cwd(path): - print("cwd: ", path) - curdir = os.getcwd() - os.chdir(path) - - try: - yield - finally: - print("cwd:", curdir) - os.chdir(curdir) - def shell(cmd): print(cmd) - subprocess.check_output(cmd, shell=True) + cp = subprocess.run(cmd, shell=True) + if cp.returncode != 0: + raise subprocess.CalledProcessError(cp.returncode, cmd) + # print(subprocess.check_output(cmd, shell=True).decode("utf-8")) -dump1_file = '/tmp/dump1.sql' -dump2_file = '/tmp/dump2.sql' +def shell_call(cmd): + print(cmd) + return subprocess.run(cmd, shell=True) + +def reinstall_pathman(tmp_pathman_path, revision): + if revision == 'worktree': + shutil.rmtree(tmp_pathman_path) + shutil.copytree(repo_dir, tmp_pathman_path) + os.chdir(tmp_pathman_path) + else: + os.chdir(tmp_pathman_path) + shell("git clean -fdx") + shell("git reset --hard") + shell("git checkout %s" % revision) + shell('make USE_PGXS=1 clean && make USE_PGXS=1 install -j4') if __name__ == '__main__': - parser = argparse.ArgumentParser(description='pg_pathman update checker') + parser = argparse.ArgumentParser(description=''' + pg_pathman update checker. Testgres is used. Junks into /tmp/pathman_check_update. + First do some partitioned stuff on new version. Save full database dump to + dump_new.sql and pathman object definitions to pathman_objects_new.sql. + Then run old version, do the same stuff. Upgrade and make dumps. Ensure + dumps are the same. Finally, run regressions tests on upgraded version. + ''') parser.add_argument('branches', nargs=2, - help='specify branches ("main rel_1.5")') - + help='specify branches , e.g. "d34a77e master". Special value "worktree" means, well, working tree.') args = parser.parse_args() - - with open(os.path.join(my_dir, 'dump_pathman_objects.sql'), 'r') as f: - dump_sql = f.read() - - shutil.rmtree('/tmp/pg_pathman') - shutil.copytree(repo_dir, '/tmp/pg_pathman') - - with cwd('/tmp/pg_pathman'): - shell("git clean -fdx") - shell("git reset --hard") - shell("git checkout %s" % args.branches[0]) - shell(compilation) - - with testgres.get_new_node('updated') as node: - node.init() - node.append_conf("shared_preload_libraries='pg_pathman'\n") - - node.start() - node.safe_psql('postgres', run_sql) - node.dump(dump1_file, 'postgres') - node.stop() - - shell("git clean -fdx") - shell("git checkout %s" % args.branches[1]) - shell(compilation) - - version = None - with open('pg_pathman.control') as f: - for line in f.readlines(): - if line.startswith('default_version'): - version = line.split('=')[1].strip() - - if version is None: - print("cound not find version in second branch") - exit(1) - - node.start() - p = subprocess.Popen(["psql", "postgres"], stdin=subprocess.PIPE, - stdout=subprocess.PIPE) - dumped_objects_old = p.communicate(input=dump_sql.encode())[0].decode() - node.stop() - - # now make clean install - with testgres.get_new_node('from_scratch') as node: - node.init() - node.append_conf("shared_preload_libraries='pg_pathman'\n") - node.start() - node.safe_psql('postgres', run_sql) - p = subprocess.Popen(["psql", "postgres"], stdin=subprocess.PIPE, - stdout=subprocess.PIPE) - dumped_objects_new = p.communicate(input=dump_sql.encode())[0].decode() - node.dump(dump2_file, 'postgres') - - # check dumps - node.safe_psql('postgres', 'create database d1') - node.restore(dump1_file, 'd1') - - node.safe_psql('postgres', 'create database d2') - node.restore(dump2_file, 'd2') - node.stop() - - if dumped_objects_old != dumped_objects_new: - print("\nDIFF:") - for line in difflib.context_diff(dumped_objects_old.split('\n'), dumped_objects_new.split('\n')): - print(line) - else: - print("\nUPDATE CHECK: ALL GOOD") + old_branch, new_branch = args.branches[0], args.branches[1] + + pathman_objs_script = os.path.join(my_dir, 'dump_pathman_objects.sql') + + data_prefix = "/tmp/pathman_check_update" + if os.path.isdir(data_prefix): + shutil.rmtree(data_prefix) + dump_new_path = os.path.join(data_prefix, 'dump_new.sql') + dump_updated_path = os.path.join(data_prefix, 'dump_updated.sql') + dump_diff_path = os.path.join(data_prefix, 'dump.diff') + pathman_objs_new_path = os.path.join(data_prefix, 'pathman_objects_new.sql') + pathman_objs_updated_path = os.path.join(data_prefix, 'pathman_objects_updated.sql') + pathman_objs_diff_path = os.path.join(data_prefix, 'pathman_objs.diff') + tmp_pathman_path = os.path.join(data_prefix, "pg_pathman") + + shutil.copytree(repo_dir, tmp_pathman_path) + + reinstall_pathman(tmp_pathman_path, new_branch) + with testgres.get_new_node('brand_new') as node: + node.init() + node.append_conf("shared_preload_libraries='pg_pathman'\n") + node.start() + node.safe_psql('postgres', run_sql) + node.dump(dump_new_path, 'postgres') + # default user is current OS one + shell("psql -p {} -h {} -f {} -X -q -a -At > {} 2>&1".format(node.port, node.host, pathman_objs_script, pathman_objs_new_path)) + node.stop() + + # now install old version... + reinstall_pathman(tmp_pathman_path, old_branch) + with testgres.get_new_node('updated') as node: + node.init() + node.append_conf("shared_preload_libraries='pg_pathman'\n") + + node.start() + # do the same stuff... + node.safe_psql('postgres', run_sql) + # and prepare regression db, see below + node.safe_psql('postgres', 'create database contrib_regression') + node.safe_psql('contrib_regression', 'create extension pg_pathman') + + # and upgrade pathman + node.stop() + reinstall_pathman(tmp_pathman_path, new_branch) + node.start() + print("Running updated db on port {}, datadir {}".format(node.port, node.base_dir)) + node.safe_psql('postgres', "alter extension pg_pathman update") + node.safe_psql('postgres', "set pg_pathman.enable = t;") + + # regression tests db, see below + node.safe_psql('contrib_regression', "alter extension pg_pathman update") + node.safe_psql('contrib_regression', "set pg_pathman.enable = t;") + + node.dump(dump_updated_path, 'postgres') + # time.sleep(432432) + # default user is current OS one + shell("psql -p {} -h {} -f {} -X -q -a -At > {} 2>&1".format(node.port, node.host, pathman_objs_script, pathman_objs_updated_path)) + + # check diffs + shell_call("diff -U3 {} {} > {} 2>&1".format(dump_updated_path, dump_new_path, dump_diff_path)) + if os.stat(dump_diff_path).st_size != 0: + msg = "DB dumps are not equal, check out the diff at {}\nProbably that's actually ok, please eyeball the diff manually and say, continue?".format(dump_diff_path) + if input("%s (y/N) " % msg).lower() != 'y': + sys.exit(1) + shell_call("diff -U3 {} {} > {} 2>&1".format(pathman_objs_updated_path, pathman_objs_new_path, pathman_objs_diff_path)) + if os.stat(pathman_objs_diff_path).st_size != 0: + print("pathman objects dumps are not equal, check out the diff at {}".format(pathman_objs_diff_path)) + # sys.exit(1) + + print("just in case, checking that dump can be restored...") + node.safe_psql('postgres', 'create database tmp') + node.restore(dump_updated_path, 'tmp') + + print("finally, run (some) pathman regression tests") + # This is a bit tricky because we want to run tests on exactly this + # installation of extension. It means we must create db beforehand, + # tell pg_regress not create it and discard all create/drop extension + # from tests. + # Not all tests can be thus adapted instantly, so I think that's enough + # for now. + # generated with smth like ls ~/postgres/pg_pathman/sql/ | sort | sed 's/.sql//' | xargs -n 1 printf "'%s',\n" + os.chdir(tmp_pathman_path) + REGRESS = ['pathman_array_qual', + 'pathman_bgw', + 'pathman_callbacks', + 'pathman_column_type', + 'pathman_cte', + 'pathman_domains', + 'pathman_dropped_cols', + 'pathman_expressions', + 'pathman_foreign_keys', + 'pathman_gaps', + 'pathman_inserts', + 'pathman_interval', + 'pathman_lateral', + 'pathman_only', + 'pathman_param_upd_del', + 'pathman_permissions', + 'pathman_rebuild_deletes', + 'pathman_rebuild_updates', + 'pathman_rowmarks', + 'pathman_subpartitions', + 'pathman_update_node', + 'pathman_update_triggers', + 'pathman_utility_stmt', + 'pathman_views' + ] + outfiles = os.listdir(os.path.join(tmp_pathman_path, 'expected')) + for tname in REGRESS: + shell("sed -i '/CREATE EXTENSION pg_pathman;/d' sql/{}.sql".format(tname)) + # CASCADE also removed + shell("sed -i '/DROP EXTENSION pg_pathman/d' sql/{}.sql".format(tname)) + # there might be more then one .out file + for outfile in outfiles: + if outfile.startswith(tname): + shell("sed -i '/CREATE EXTENSION pg_pathman;/d' expected/{}".format(outfile)) + shell("sed -i '/DROP EXTENSION pg_pathman/d' expected/{}".format(outfile)) + + # time.sleep(43243242) + shell("make USE_PGXS=1 PGPORT={} EXTRA_REGRESS_OPTS=--use-existing REGRESS='{}' installcheck 2>&1".format(node.port, " ".join(REGRESS))) + + node.stop() + + print("It's Twelve O'clock and All's Well.") From 85fc5ccf1216a5d26e5d144fd7fc67e092587940 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 25 Feb 2019 13:56:56 +0300 Subject: [PATCH 388/528] Blow out cached bounds of all children when parent is invalidated. A concrete example where leaving them is not ok is - Range partition table - Delete entry from pathman_config (psin was blown, but bounds not) - Now hash partition table; bounds cache with uninitialized hash_idx is used. While here, also spawn relcache inval message on delete from pathman_config, not only from pathman_config_params. --- init.sql | 6 +++++- pg_pathman--1.4--1.5.sql | 4 ++++ src/hooks.c | 2 +- src/include/relation_info.h | 2 +- src/pl_funcs.c | 17 +++++++++++----- src/relation_info.c | 40 ++++++++++++++++++++++++++++++++++++- 6 files changed, 62 insertions(+), 9 deletions(-) diff --git a/init.sql b/init.sql index fdb774db..7dab67d8 100644 --- a/init.sql +++ b/init.sql @@ -111,7 +111,7 @@ ALTER TABLE @extschema@.pathman_config ENABLE ROW LEVEL SECURITY; ALTER TABLE @extschema@.pathman_config_params ENABLE ROW LEVEL SECURITY; /* - * Invalidate relcache every time someone changes parameters config. + * Invalidate relcache every time someone changes parameters config or pathman_config */ CREATE OR REPLACE FUNCTION @extschema@.pathman_config_params_trigger_func() RETURNS TRIGGER AS 'pg_pathman', 'pathman_config_params_trigger_func' @@ -121,6 +121,10 @@ CREATE TRIGGER pathman_config_params_trigger AFTER INSERT OR UPDATE OR DELETE ON @extschema@.pathman_config_params FOR EACH ROW EXECUTE PROCEDURE @extschema@.pathman_config_params_trigger_func(); +CREATE TRIGGER pathman_config_trigger +AFTER INSERT OR UPDATE OR DELETE ON @extschema@.pathman_config +FOR EACH ROW EXECUTE PROCEDURE @extschema@.pathman_config_params_trigger_func(); + /* * Enable dump of config tables with pg_dump. */ diff --git a/pg_pathman--1.4--1.5.sql b/pg_pathman--1.4--1.5.sql index 11406476..2aa02bf9 100644 --- a/pg_pathman--1.4--1.5.sql +++ b/pg_pathman--1.4--1.5.sql @@ -52,6 +52,10 @@ ALTER TABLE @extschema@.pathman_config ADD CONSTRAINT pathman_config_interval_ch parttype, range_interval)); +CREATE TRIGGER pathman_config_trigger +AFTER INSERT OR UPDATE OR DELETE ON @extschema@.pathman_config +FOR EACH ROW EXECUTE PROCEDURE @extschema@.pathman_config_params_trigger_func(); + /* * Get parsed and analyzed expression. */ diff --git a/src/hooks.c b/src/hooks.c index 656efe9f..90d84dc4 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -874,7 +874,7 @@ pathman_relcache_hook(Datum arg, Oid relid) else if (relid >= FirstNormalObjectId) { /* Invalidate PartBoundInfo entry if needed */ - forget_bounds_of_partition(relid); + forget_bounds_of_rel(relid); /* Invalidate PartParentInfo entry if needed */ forget_parent_of_partition(relid); diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 6b9ffa92..5b23cd3b 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -367,7 +367,7 @@ void shout_if_prel_is_invalid(const Oid parent_oid, const PartType expected_part_type); /* Bounds cache */ -void forget_bounds_of_partition(Oid partition); +void forget_bounds_of_rel(Oid partition); PartBoundInfo *get_bounds_of_partition(Oid partition, const PartRelationInfo *prel); Expr *get_partition_constraint_expr(Oid partition, bool raise_error); void invalidate_bounds_cache(void); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 06b1cf56..7ca2ec0a 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -904,14 +904,16 @@ pathman_config_params_trigger_func(PG_FUNCTION_ARGS) { TriggerData *trigdata = (TriggerData *) fcinfo->context; Oid pathman_config_params; + Oid pathman_config; Oid partrel; Datum partrel_datum; bool partrel_isnull; /* Fetch Oid of PATHMAN_CONFIG_PARAMS */ pathman_config_params = get_pathman_config_params_relid(true); + pathman_config = get_pathman_config_relid(true); - /* Handle "pg_pathman.enabled = t" case */ + /* Handle "pg_pathman.enabled = f" case */ if (!OidIsValid(pathman_config_params)) goto pathman_config_params_trigger_func_return; @@ -925,12 +927,17 @@ pathman_config_params_trigger_func(PG_FUNCTION_ARGS) trigdata->tg_trigger->tgname); /* Handle wrong relation */ - if (RelationGetRelid(trigdata->tg_relation) != pathman_config_params) - elog(ERROR, "%s: must be fired for relation \"%s\"", + if (RelationGetRelid(trigdata->tg_relation) != pathman_config_params && + RelationGetRelid(trigdata->tg_relation) != pathman_config) + elog(ERROR, "%s: must be fired for relation \"%s\" or \"%s\"", trigdata->tg_trigger->tgname, - get_rel_name(pathman_config_params)); + get_rel_name(pathman_config_params), + get_rel_name(pathman_config)); - /* Extract partitioned relation's Oid */ + /* + * Extract partitioned relation's Oid. + * Hacky: 1 is attrnum of relid for both pathman_config and pathman_config_params + */ partrel_datum = heap_getattr(trigdata->tg_trigtuple, Anum_pathman_config_params_partrel, RelationGetDescr(trigdata->tg_relation), diff --git a/src/relation_info.c b/src/relation_info.c index 9bb8d0db..d524c168 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -160,6 +160,8 @@ static void fill_pbin_with_bounds(PartBoundInfo *pbin, static int cmp_range_entries(const void *p1, const void *p2, void *arg); +static void forget_bounds_of_partition(Oid partition); + static bool query_contains_subqueries(Node *node, void *context); @@ -929,7 +931,7 @@ PrelExpressionAttributesMap(const PartRelationInfo *prel, */ /* Remove partition's constraint from cache */ -void +static void forget_bounds_of_partition(Oid partition) { PartBoundInfo *pbin; @@ -953,6 +955,42 @@ forget_bounds_of_partition(Oid partition) HASH_REMOVE, NULL); } + +} + +/* + * Remove rel's constraint from cache, if relid is partition; + * Remove all children constraints, if it is parent. + */ +void +forget_bounds_of_rel(Oid relid) +{ + PartStatusInfo *psin; + + forget_bounds_of_partition(relid); + + /* + * If it was the parent who got invalidated, purge children's bounds. + * We assume here that if bounds_cache has something, parent must be also + * in status_cache. Fragile, but seems better then blowing out full bounds + * cache or digging pathman_config on each relcache invalidation. + */ + + /* Find status cache entry for this relation */ + psin = pathman_cache_search_relid(status_cache, + relid, HASH_FIND, + NULL); + if (psin != NULL && psin->prel != NULL) + { + uint32 i; + PartRelationInfo *prel = psin->prel; + Oid *children = PrelGetChildrenArray(prel); + + for (i = 0; i < PrelChildrenCount(prel); i++) + { + forget_bounds_of_partition(children[i]); + } + } } /* Return partition's constraint as expression tree */ From dd7483b78fe07498dc2709380a2affc50db04543 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 25 Feb 2019 15:00:18 +0300 Subject: [PATCH 389/528] Bump lib version 1.5.5. --- META.json | 2 +- expected/pathman_calamity.out | 2 +- src/include/init.h | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/META.json b/META.json index 9321b82c..fa06948c 100644 --- a/META.json +++ b/META.json @@ -22,7 +22,7 @@ "pg_pathman": { "file": "pg_pathman--1.5.sql", "docfile": "README.md", - "version": "1.5.4", + "version": "1.5.5", "abstract": "Effective partitioning tool for PostgreSQL 9.5 and higher" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 44f14d96..a9305a9e 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -13,7 +13,7 @@ SELECT debug_capture(); SELECT pathman_version(); pathman_version ----------------- - 1.5.4 + 1.5.5 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index 5b133d01..63586f6b 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -158,7 +158,7 @@ simplify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT "1.5.0" /* Current version of native C library */ -#define CURRENT_LIB_VERSION "1.5.4" +#define CURRENT_LIB_VERSION "1.5.5" void *pathman_cache_search_relid(HTAB *cache_table, From 0299398bdbf2ebd35cf6b957c50d73dab5132561 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Thu, 28 Feb 2019 18:46:48 +0300 Subject: [PATCH 390/528] Wait 100 seconds, not 10 in concurrent partitioning test. Looks like RaspberryPi doesn't like boundary cache blowing added in 85fc5ccf121. --- expected/pathman_bgw.out | 2 +- sql/pathman_bgw.sql | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/expected/pathman_bgw.out b/expected/pathman_bgw.out index 4166ef4e..5d5d2b21 100644 --- a/expected/pathman_bgw.out +++ b/expected/pathman_bgw.out @@ -212,7 +212,7 @@ BEGIN EXIT; -- exit loop END IF; - IF i > 50 THEN + IF i > 500 THEN RAISE WARNING 'looks like partitioning bgw is stuck!'; EXIT; -- exit loop END IF; diff --git a/sql/pathman_bgw.sql b/sql/pathman_bgw.sql index e05a829d..28f922e6 100644 --- a/sql/pathman_bgw.sql +++ b/sql/pathman_bgw.sql @@ -126,7 +126,7 @@ BEGIN EXIT; -- exit loop END IF; - IF i > 50 THEN + IF i > 500 THEN RAISE WARNING 'looks like partitioning bgw is stuck!'; EXIT; -- exit loop END IF; From debe43dba40c80a869521a0f28756bce57e31146 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 4 Mar 2019 18:02:58 +0300 Subject: [PATCH 391/528] Forbid to partition tables with children. Since pathman doesn't check for children existence anyway and duplicates them. This doesn't explain 'attempted to update invisible tuple' in PGPRO-2507 though, but let's leave this for another time. --- init.sql | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/init.sql b/init.sql index 7dab67d8..16ec0b8f 100644 --- a/init.sql +++ b/init.sql @@ -455,6 +455,10 @@ BEGIN RAISE EXCEPTION 'table "%" has already been partitioned', parent_relid; END IF; + IF EXISTS (SELECT 1 FROM pg_inherits WHERE inhparent = parent_relid) THEN + RAISE EXCEPTION 'can''t partition table "%" with existing children', parent_relid; + END IF; + /* Check if there are foreign keys that reference the relation */ FOR constr_name IN (SELECT conname FROM pg_catalog.pg_constraint WHERE confrelid = parent_relid::REGCLASS::OID) From dbf8262dca5edfa375bafa6b7c36073fb0bfd6cc Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 11 Mar 2019 21:01:43 +0300 Subject: [PATCH 392/528] A couple of sanity checks. One of them is pretty ugly -- we are checking out pathman_config each time during create_single_range_partition_internal. Also invalidate prel cache after manual add_to_pathman_config, just in case. --- src/partition_creation.c | 17 +++++++++++++++++ src/pl_funcs.c | 7 +++++++ 2 files changed, 24 insertions(+) diff --git a/src/partition_creation.c b/src/partition_creation.c index fc950c4f..b41b2541 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -112,6 +112,23 @@ create_single_range_partition_internal(Oid parent_relid, init_callback_params callback_params; List *trigger_columns = NIL; Node *expr; + Datum values[Natts_pathman_config]; + bool isnull[Natts_pathman_config]; + + + /* + * Sanity check. Probably needed only if some absurd init_callback + * decides to drop the table while we are creating partitions. + * It seems much better to use prel cache here, but this doesn't work + * because it regards tables with no partitions as not partitioned at all + * (build_pathman_relation_info returns NULL), and if I comment out that, + * tests fail for not immediately obvious reasons. Don't want to dig + * into this now. + */ + if (!pathman_config_contains_relation(parent_relid, values, isnull, NULL, NULL)) + { + elog(ERROR, "Can't create range partition: relid %u doesn't exist or not partitioned", parent_relid); + } /* Generate a name if asked to */ if (!partition_rv) diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 7ca2ec0a..2f96b53f 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -252,6 +252,11 @@ show_cache_stats_internal(PG_FUNCTION_ARGS) funccxt = SRF_FIRSTCALL_INIT(); + if (!TopPathmanContext) + { + elog(ERROR, "pg_pathman's memory contexts are not initialized yet"); + } + old_mcxt = MemoryContextSwitchTo(funccxt->multi_call_memory_ctx); usercxt = (show_cache_stats_cxt *) palloc(sizeof(show_cache_stats_cxt)); @@ -893,6 +898,8 @@ add_to_pathman_config(PG_FUNCTION_ARGS) } } + CacheInvalidateRelcacheByRelid(relid); + PG_RETURN_BOOL(true); } From 50c8348639a299e6d2132e0e166df5b6a88f0acc Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 1 Apr 2019 17:59:37 +0300 Subject: [PATCH 393/528] PG_TRY without PG_RE_THROW is no-no: remove it from is_tuple_convertible. Noticed by errordata_stack_depth overflow: log wasn't flushed. --- hash.sql | 6 ++++-- range.sql | 6 ++++-- src/pl_funcs.c | 30 ++++++++++++------------------ 3 files changed, 20 insertions(+), 22 deletions(-) diff --git a/hash.sql b/hash.sql index 0f694882..45c9b71d 100644 --- a/hash.sql +++ b/hash.sql @@ -94,9 +94,11 @@ BEGIN END IF; /* Check that new partition has an equal structure as parent does */ - IF NOT @extschema@.is_tuple_convertible(parent_relid, new_partition) THEN + BEGIN + PERFORM @extschema@.is_tuple_convertible(parent_relid, new_partition); + EXCEPTION WHEN OTHERS THEN RAISE EXCEPTION 'partition must have a compatible tuple format'; - END IF; + END; /* Check that table is partitioned */ IF @extschema@.get_partition_key(parent_relid) IS NULL THEN diff --git a/range.sql b/range.sql index a014ed0f..5aeaad58 100644 --- a/range.sql +++ b/range.sql @@ -639,9 +639,11 @@ BEGIN /* Check range overlap */ PERFORM @extschema@.check_range_available(parent_relid, start_value, end_value); - IF NOT @extschema@.is_tuple_convertible(parent_relid, partition_relid) THEN + BEGIN + PERFORM @extschema@.is_tuple_convertible(parent_relid, partition_relid); + EXCEPTION WHEN OTHERS THEN RAISE EXCEPTION 'partition must have a compatible tuple format'; - END IF; + END; part_expr := @extschema@.get_partition_key(parent_relid); part_type := @extschema@.get_partition_type(parent_relid); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 2f96b53f..99f53bd5 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -661,38 +661,32 @@ is_date_type(PG_FUNCTION_ARGS) PG_RETURN_BOOL(is_date_type_internal(PG_GETARG_OID(0))); } +/* + * Bail out with ERROR if rel1 tuple can't be converted to rel2 tuple. + */ Datum is_tuple_convertible(PG_FUNCTION_ARGS) { Relation rel1, rel2; - bool res = true; + void *map; /* we don't actually need it */ rel1 = heap_open(PG_GETARG_OID(0), AccessShareLock); rel2 = heap_open(PG_GETARG_OID(1), AccessShareLock); - PG_TRY(); - { - void *map; /* we don't actually need it */ - - /* Try to build a conversion map */ - map = convert_tuples_by_name_map(RelationGetDescr(rel1), - RelationGetDescr(rel2), - ERR_PART_DESC_CONVERT); + /* Try to build a conversion map */ + map = convert_tuples_by_name_map(RelationGetDescr(rel1), + RelationGetDescr(rel2), + ERR_PART_DESC_CONVERT); - /* Now free map */ - pfree(map); - } - PG_CATCH(); - { - res = false; - } - PG_END_TRY(); + /* Now free map */ + pfree(map); heap_close(rel1, AccessShareLock); heap_close(rel2, AccessShareLock); - PG_RETURN_BOOL(res); + /* still return true to avoid changing tests */ + PG_RETURN_BOOL(true); } From 5f6de2310a886ae39aa26057a89b94fce0108bc7 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 2 Apr 2019 11:46:22 +0300 Subject: [PATCH 394/528] Protect from ATX. --- src/hooks.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/hooks.c b/src/hooks.c index 90d84dc4..462d4c8c 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -27,6 +27,7 @@ #include "xact_handling.h" #include "access/transam.h" +#include "access/xact.h" #include "catalog/pg_authid.h" #include "miscadmin.h" #include "optimizer/cost.h" @@ -770,6 +771,11 @@ pathman_post_parse_analyze_hook(ParseState *pstate, Query *query) if (!IsPathmanReady()) return; +#if defined(PGPRO_EE) + if (getNestLevelATX() != 0) + elog(FATAL, "pg_pathman extension is not compatible with autonomous transactions and connection pooling"); +#endif /* PGPRO_EE */ + /* Process inlined SQL functions (we've already entered planning stage) */ if (IsPathmanReady() && get_planner_calls_count() > 0) { From 9f51be4fc9a5637a478b8fc71779b2bded1efcc3 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 2 Apr 2019 15:48:14 +0300 Subject: [PATCH 395/528] Purge prel_resowner hashtable in fini_local_cache. Wobbling with it in 'resonwner_prel_callback' after all cache was purged is an error. --- src/include/relation_info.h | 3 ++- src/init.c | 6 ++++++ src/relation_info.c | 3 ++- 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 5b23cd3b..80b92740 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -401,6 +401,7 @@ void init_relation_info_static_data(void); /* For pg_pathman.enable_bounds_cache GUC */ extern bool pg_pathman_enable_bounds_cache; +extern HTAB *prel_resowner; /* This allows us to track leakers of PartRelationInfo */ #ifdef USE_RELINFO_LEAK_TRACKER @@ -419,7 +420,7 @@ extern int prel_resowner_line; close_pathman_relation_info(prel); \ prel = NULL; \ } while (0) -#endif +#endif /* USE_RELINFO_LEAK_TRACKER */ #endif /* RELATION_INFO_H */ diff --git a/src/init.c b/src/init.c index f6ddbdae..c80f118f 100644 --- a/src/init.c +++ b/src/init.c @@ -389,6 +389,12 @@ fini_local_cache(void) status_cache = NULL; bounds_cache = NULL; + if (prel_resowner != NULL) + { + hash_destroy(prel_resowner); + prel_resowner = NULL; + } + /* Now we can clear allocations */ MemoryContextReset(PathmanParentsCacheContext); MemoryContextReset(PathmanStatusCacheContext); diff --git a/src/relation_info.c b/src/relation_info.c index d524c168..30853165 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -129,8 +129,9 @@ static bool delayed_shutdown = false; /* pathman was dropped */ /* * PartRelationInfo is controlled by ResourceOwner; + * resowner -> List of controlled PartRelationInfos by this ResourceOwner */ -static HTAB *prel_resowner = NULL; +HTAB *prel_resowner = NULL; /* Handy wrappers for Oids */ From dddaa24ce4cc3dcad11dec02f1f423c00f69193b Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 2 Apr 2019 15:55:36 +0300 Subject: [PATCH 396/528] Relax FATAL to ERROR in atx check. --- src/hooks.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/hooks.c b/src/hooks.c index 462d4c8c..8db5b1c5 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -773,7 +773,7 @@ pathman_post_parse_analyze_hook(ParseState *pstate, Query *query) #if defined(PGPRO_EE) if (getNestLevelATX() != 0) - elog(FATAL, "pg_pathman extension is not compatible with autonomous transactions and connection pooling"); + elog(ERROR, "pg_pathman extension is not compatible with autonomous transactions"); #endif /* PGPRO_EE */ /* Process inlined SQL functions (we've already entered planning stage) */ From cbdde20507397f54e54ee7eb815b0f98c4b54133 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 2 Apr 2019 19:00:30 +0300 Subject: [PATCH 397/528] One more atx check inside executor. --- src/partition_filter.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/partition_filter.c b/src/partition_filter.c index 098a72a5..64685e0f 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -17,6 +17,7 @@ #include "utils.h" #include "access/htup_details.h" +#include "access/xact.h" #include "catalog/pg_class.h" #include "catalog/pg_type.h" #include "foreign/fdwapi.h" @@ -733,6 +734,12 @@ partition_filter_exec(CustomScanState *node) PlanState *child_ps = (PlanState *) linitial(node->custom_ps); TupleTableSlot *slot; + /* If statement is prepared, parse_analyze hook won't catch this */ +#if defined(PGPRO_EE) + if (getNestLevelATX() != 0) + elog(ERROR, "pg_pathman extension is not compatible with autonomous transactions"); +#endif /* PGPRO_EE */ + slot = ExecProcNode(child_ps); if (!TupIsNull(slot)) From 7aa7d1c28b5d51a5c5f2278dd4e2a5ec92823ae7 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Sun, 7 Apr 2019 08:18:08 +0300 Subject: [PATCH 398/528] Allow atx back again. --- src/hooks.c | 5 ----- src/include/partition_filter.h | 1 + src/partition_filter.c | 6 ------ 3 files changed, 1 insertion(+), 11 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 8db5b1c5..8409d4cf 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -771,11 +771,6 @@ pathman_post_parse_analyze_hook(ParseState *pstate, Query *query) if (!IsPathmanReady()) return; -#if defined(PGPRO_EE) - if (getNestLevelATX() != 0) - elog(ERROR, "pg_pathman extension is not compatible with autonomous transactions"); -#endif /* PGPRO_EE */ - /* Process inlined SQL functions (we've already entered planning stage) */ if (IsPathmanReady() && get_planner_calls_count() > 0) { diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index b3ecffeb..bf03433c 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -76,6 +76,7 @@ struct ResultPartsStorage EState *estate; /* pointer to executor's state */ CmdType command_type; /* INSERT | UPDATE */ + /* partition relid -> ResultRelInfoHolder */ HTAB *result_rels_table; HASHCTL result_rels_table_config; diff --git a/src/partition_filter.c b/src/partition_filter.c index 64685e0f..f905470e 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -734,12 +734,6 @@ partition_filter_exec(CustomScanState *node) PlanState *child_ps = (PlanState *) linitial(node->custom_ps); TupleTableSlot *slot; - /* If statement is prepared, parse_analyze hook won't catch this */ -#if defined(PGPRO_EE) - if (getNestLevelATX() != 0) - elog(ERROR, "pg_pathman extension is not compatible with autonomous transactions"); -#endif /* PGPRO_EE */ - slot = ExecProcNode(child_ps); if (!TupIsNull(slot)) From e251d2a4086befc76ec6c43f10b8c4a02a6c0e15 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Sun, 7 Apr 2019 11:27:36 +0300 Subject: [PATCH 399/528] Forbid 0 oid as partition_relid in build_range_condition. --- src/pl_range_funcs.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 351926f7..0d3ca9d7 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -15,6 +15,7 @@ #include "utils.h" #include "xact_handling.h" +#include "access/transam.h" #include "access/xact.h" #include "catalog/heap.h" #include "catalog/namespace.h" @@ -1072,6 +1073,9 @@ build_range_condition(PG_FUNCTION_ARGS) } else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("'partition_relid' should not be NULL"))); + if (partition_relid < FirstNormalObjectId) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'partition_relid' must be normal object oid"))); if (!PG_ARGISNULL(1)) { From 079797e0d5efe171c3ae10733d3c2f4977e37689 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Thu, 11 Apr 2019 12:57:24 +0300 Subject: [PATCH 400/528] Purge potentially created bounds cache entries if build_pathman_relation_info failed. Otherwise they might contain obsolete data, e.g. - create range partitioned table T with a couple of partitions - make pathman forget about it - create another table P inherited from previously partitioned one, but with no pathman constraints - attempt to add_to_pathman_config T as range partitioned table it will fail as P has no constraint, but might register other partitions in bounds cache - now add_to_pathman_config T as hash partitioned table will fail in attempt to use this cache (part_idx not initialized) --- src/relation_info.c | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/src/relation_info.c b/src/relation_info.c index 30853165..848fd521 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -408,7 +408,7 @@ build_pathman_relation_info(Oid relid, Datum *values) prel->fresh = true; prel->mcxt = prel_mcxt; - /* Memory leak protection */ + /* Memory leak and cache protection */ PG_TRY(); { MemoryContext old_mcxt; @@ -496,6 +496,32 @@ build_pathman_relation_info(Oid relid, Datum *values) } PG_CATCH(); { + /* + * If we managed to create some children but failed later, bounds + * cache now might have obsolete data for something that probably is + * not a partitioned table at all. Remove it. + */ + if (prel->children != NULL) + { + uint32 i; + + for (i = 0; i < PrelChildrenCount(prel); i++) + { + Oid child; + + /* + * We rely on children and ranges array allocated with 0s, not + * random data + */ + if (prel->parttype == PT_HASH) + child = prel->children[i]; + else if (prel->parttype == PT_RANGE) + child = prel->ranges[i].child_oid; + + forget_bounds_of_partition(child); + } + } + /* Free this entry */ free_pathman_relation_info(prel); From 2ec39238ce76e7cdc3f6948ff6c2fae366cace05 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Thu, 18 Apr 2019 13:36:14 +0300 Subject: [PATCH 401/528] Check that pathman is initialized in get_pathman_config_relid. Also, check hash partitioned table consistency always, not under assert checking. --- src/pg_pathman.c | 28 ++++++++++++++++++---------- src/relation_info.c | 2 -- 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 1b65a832..daa1afdc 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -333,13 +333,17 @@ _PG_init(void) Oid get_pathman_config_relid(bool invalid_is_ok) { + if (!IsPathmanInitialized()) + { + if (invalid_is_ok) + return InvalidOid; + elog(ERROR, "pg_pathman is not initialized yet"); + } + /* Raise ERROR if Oid is invalid */ if (!OidIsValid(pathman_config_relid) && !invalid_is_ok) - elog(ERROR, - (!IsPathmanInitialized() ? - "pg_pathman is not initialized yet" : - "unexpected error in function " - CppAsString(get_pathman_config_relid))); + elog(ERROR, "unexpected error in function " + CppAsString(get_pathman_config_relid)); return pathman_config_relid; } @@ -348,13 +352,17 @@ get_pathman_config_relid(bool invalid_is_ok) Oid get_pathman_config_params_relid(bool invalid_is_ok) { + if (!IsPathmanInitialized()) + { + if (invalid_is_ok) + return InvalidOid; + elog(ERROR, "pg_pathman is not initialized yet"); + } + /* Raise ERROR if Oid is invalid */ if (!OidIsValid(pathman_config_relid) && !invalid_is_ok) - elog(ERROR, - (!IsPathmanInitialized() ? - "pg_pathman is not initialized yet" : - "unexpected error in function " - CppAsString(get_pathman_config_params_relid))); + elog(ERROR, "unexpected error in function " + CppAsString(get_pathman_config_params_relid)); return pathman_config_params_relid; } diff --git a/src/relation_info.c b/src/relation_info.c index 848fd521..0acb2e6d 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -802,7 +802,6 @@ fill_prel_with_partitions(PartRelationInfo *prel, prel->children[i] = prel->ranges[i].child_oid; } -#ifdef USE_ASSERT_CHECKING /* Check that each partition Oid has been assigned properly */ if (prel->parttype == PT_HASH) for (i = 0; i < PrelChildrenCount(prel); i++) @@ -815,7 +814,6 @@ fill_prel_with_partitions(PartRelationInfo *prel, get_rel_name_or_relid(PrelParentRelid(prel))); } } -#endif } /* qsort() comparison function for RangeEntries */ From de0197de0de7c5c9a16382e0bd6399baa9a5de04 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Thu, 18 Apr 2019 18:12:17 +0300 Subject: [PATCH 402/528] Warn that pathman is disabled in case of broken hash partitioned table. --- src/relation_info.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/relation_info.c b/src/relation_info.c index 0acb2e6d..5eb9fb40 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -809,9 +809,10 @@ fill_prel_with_partitions(PartRelationInfo *prel, if (!OidIsValid(prel->children[i])) { DisablePathman(); /* disable pg_pathman since config is broken */ - elog(ERROR, "pg_pathman's cache for relation \"%s\" " - "has not been properly initialized", - get_rel_name_or_relid(PrelParentRelid(prel))); + ereport(ERROR, (errmsg("pg_pathman's cache for relation \"%s\" " + "has not been properly initialized", + get_rel_name_or_relid(PrelParentRelid(prel))), + errhint(INIT_ERROR_HINT))); } } } From e4025126474db48dd72f901f208fe1df6d548d53 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Fri, 19 Apr 2019 13:16:09 +0300 Subject: [PATCH 403/528] Typos in attempting to find parent in partcache during partstatus invalildation. --- src/hooks.c | 6 +++--- src/relation_info.c | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 8409d4cf..bf9d0525 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -877,11 +877,11 @@ pathman_relcache_hook(Datum arg, Oid relid) /* Invalidate PartBoundInfo entry if needed */ forget_bounds_of_rel(relid); - /* Invalidate PartParentInfo entry if needed */ - forget_parent_of_partition(relid); - /* Invalidate PartStatusInfo entry if needed */ forget_status_of_relation(relid); + + /* Invalidate PartParentInfo entry if needed */ + forget_parent_of_partition(relid); } } diff --git a/src/relation_info.c b/src/relation_info.c index 5eb9fb40..988873d6 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -215,7 +215,7 @@ forget_status_of_relation(Oid relid) { /* Find status cache entry for parent */ psin = pathman_cache_search_relid(status_cache, - relid, HASH_FIND, + ppar->parent_relid, HASH_FIND, NULL); if (psin) invalidate_psin_entry(psin); From e2d29d2695cfb72f31c98e250f0ea0c5584ae9a4 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Fri, 19 Apr 2019 13:32:45 +0300 Subject: [PATCH 404/528] Check for children array size sanity in fill_prel_with_partitions. ... which might be violated if has partition was dropped. --- src/relation_info.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/relation_info.c b/src/relation_info.c index 988873d6..bb86cc6a 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -755,6 +755,18 @@ fill_prel_with_partitions(PartRelationInfo *prel, switch (prel->parttype) { case PT_HASH: + /* + * This might be the case if hash part was dropped, and thus + * children array alloc'ed smaller than needed, but parts + * bound cache still keeps entries with high indexes. + */ + if (pbin->part_idx >= PrelChildrenCount(prel)) + ereport(ERROR, (errmsg("pg_pathman's cache for relation \"%s\" " + "has not been properly initialized. " + "Looks like one of hash partitions was dropped.", + get_rel_name_or_relid(PrelParentRelid(prel))), + errhint(INIT_ERROR_HINT))); + prel->children[pbin->part_idx] = pbin->child_relid; break; From a53a60726c870f8cd556ef3de7238f6b7c9425f4 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Fri, 19 Apr 2019 13:44:59 +0300 Subject: [PATCH 405/528] Fix previous commit: actually disable pathman in case of trouble. --- src/relation_info.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/relation_info.c b/src/relation_info.c index bb86cc6a..9e20e93d 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -761,11 +761,14 @@ fill_prel_with_partitions(PartRelationInfo *prel, * bound cache still keeps entries with high indexes. */ if (pbin->part_idx >= PrelChildrenCount(prel)) + { + DisablePathman(); /* disable pg_pathman since config is broken */ ereport(ERROR, (errmsg("pg_pathman's cache for relation \"%s\" " "has not been properly initialized. " "Looks like one of hash partitions was dropped.", get_rel_name_or_relid(PrelParentRelid(prel))), errhint(INIT_ERROR_HINT))); + } prel->children[pbin->part_idx] = pbin->child_relid; break; From 3556ae3165917a5e05410c0866c9dcca95d9cdf0 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Fri, 19 Apr 2019 17:49:49 +0300 Subject: [PATCH 406/528] A couple of tests for recent commits. --- expected/pathman_basic.out | 58 ++++++++++++++++++++++++++++++++++++++ sql/pathman_basic.sql | 30 ++++++++++++++++++++ 2 files changed, 88 insertions(+) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 0ae1ae6a..289b239e 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1804,6 +1804,64 @@ ORDER BY partition; DROP TABLE test.provided_part_names CASCADE; NOTICE: drop cascades to 2 other objects +-- is pathman (caches, in particular) strong enough to carry out this? +-- 079797e0d5 +CREATE TABLE test.part_test(val serial); +INSERT INTO test.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('test.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT set_interval('test.part_test', 100); + set_interval +-------------- + +(1 row) + +DELETE FROM pathman_config WHERE partrel = 'test.part_test'::REGCLASS; +SELECT drop_partitions('test.part_test'); +ERROR: table "test.part_test" has no partitions +SELECT disable_pathman_for('test.part_test'); + disable_pathman_for +--------------------- + +(1 row) + +CREATE TABLE test.wrong_partition (LIKE test.part_test) INHERITS (test.part_test); +NOTICE: merging column "val" with inherited definition +SELECT add_to_pathman_config('test.part_test', 'val', '10'); +ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist +SELECT add_to_pathman_config('test.part_test', 'val'); +ERROR: wrong constraint format for HASH partition "part_test_1" +DROP TABLE test.part_test CASCADE; +NOTICE: drop cascades to 5 other objects +-- +-- 85fc5ccf121 +CREATE TABLE test.part_test(val serial); +INSERT INTO test.part_test SELECT generate_series(1, 3000); +SELECT create_range_partitions('test.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 300 +(1 row) + +SELECT append_range_partition('test.part_test'); + append_range_partition +------------------------ + test.part_test_301 +(1 row) + +DELETE FROM test.part_test; +SELECT create_single_range_partition('test.part_test', NULL::INT4, NULL); /* not ok */ +ERROR: cannot create partition with range (-inf, +inf) +DELETE FROM pathman_config WHERE partrel = 'test.part_test'::REGCLASS; +SELECT create_hash_partitions('test.part_test', 'val', 2, partition_names := ARRAY[]::TEXT[]); /* not ok */ +ERROR: can't partition table "test.part_test" with existing children +DROP TABLE test.part_test CASCADE; +NOTICE: drop cascades to 302 other objects +-- DROP SCHEMA test CASCADE; NOTICE: drop cascades to 28 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 5a0c471d..a9c6f1a3 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -546,6 +546,36 @@ ORDER BY partition; DROP TABLE test.provided_part_names CASCADE; +-- is pathman (caches, in particular) strong enough to carry out this? + +-- 079797e0d5 +CREATE TABLE test.part_test(val serial); +INSERT INTO test.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('test.part_test', 'val', 1, 10); +SELECT set_interval('test.part_test', 100); +DELETE FROM pathman_config WHERE partrel = 'test.part_test'::REGCLASS; +SELECT drop_partitions('test.part_test'); +SELECT disable_pathman_for('test.part_test'); + +CREATE TABLE test.wrong_partition (LIKE test.part_test) INHERITS (test.part_test); +SELECT add_to_pathman_config('test.part_test', 'val', '10'); +SELECT add_to_pathman_config('test.part_test', 'val'); + +DROP TABLE test.part_test CASCADE; +-- + +-- 85fc5ccf121 +CREATE TABLE test.part_test(val serial); +INSERT INTO test.part_test SELECT generate_series(1, 3000); +SELECT create_range_partitions('test.part_test', 'val', 1, 10); +SELECT append_range_partition('test.part_test'); +DELETE FROM test.part_test; +SELECT create_single_range_partition('test.part_test', NULL::INT4, NULL); /* not ok */ +DELETE FROM pathman_config WHERE partrel = 'test.part_test'::REGCLASS; +SELECT create_hash_partitions('test.part_test', 'val', 2, partition_names := ARRAY[]::TEXT[]); /* not ok */ + +DROP TABLE test.part_test CASCADE; +-- DROP SCHEMA test CASCADE; DROP EXTENSION pg_pathman CASCADE; From 9c2ab5fa0c20148937dc663961245b533a128ba3 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 22 Apr 2019 19:18:26 +0300 Subject: [PATCH 407/528] Unify unload_config and DisablePathman and do it (purge caches) on enable = false. Probably there used to be an idea that with 'set pg_pathman = false' you can just temporary disable planning hooks etc without resetting pathman caches, but that never worked properly. At least, pathman_relcache_hook refuses to inval cache if it is disabled. Also, if extension is disabled we never get to unload_config, which means you could - disable pathman (guc disabled, but caches not destroyed) - drop extension (still caches are here) - create it back again, now cached relids are wrong With some care we could totally separate them by maintaining caches even when pathman is disabled, but is it worth it? --- Makefile | 1 + expected/pathman_basic.out | 58 ------------------------ expected/pathman_cache_pranks.out | 75 +++++++++++++++++++++++++++++++ sql/pathman_basic.sql | 31 ------------- sql/pathman_cache_pranks.sql | 49 ++++++++++++++++++++ src/hooks.c | 20 ++++++++- src/include/init.h | 2 +- src/init.c | 9 +++- src/pg_pathman.c | 13 ++---- src/pl_funcs.c | 2 +- src/relation_info.c | 7 +++ 11 files changed, 164 insertions(+), 103 deletions(-) create mode 100644 expected/pathman_cache_pranks.out create mode 100644 sql/pathman_cache_pranks.sql diff --git a/Makefile b/Makefile index 80f74e7f..c1281871 100644 --- a/Makefile +++ b/Makefile @@ -33,6 +33,7 @@ PGFILEDESC = "pg_pathman - partitioning tool for PostgreSQL" REGRESS = pathman_array_qual \ pathman_basic \ pathman_bgw \ + pathman_cache_pranks \ pathman_calamity \ pathman_callbacks \ pathman_column_type \ diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 289b239e..0ae1ae6a 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1804,64 +1804,6 @@ ORDER BY partition; DROP TABLE test.provided_part_names CASCADE; NOTICE: drop cascades to 2 other objects --- is pathman (caches, in particular) strong enough to carry out this? --- 079797e0d5 -CREATE TABLE test.part_test(val serial); -INSERT INTO test.part_test SELECT generate_series(1, 30); -SELECT create_range_partitions('test.part_test', 'val', 1, 10); - create_range_partitions -------------------------- - 3 -(1 row) - -SELECT set_interval('test.part_test', 100); - set_interval --------------- - -(1 row) - -DELETE FROM pathman_config WHERE partrel = 'test.part_test'::REGCLASS; -SELECT drop_partitions('test.part_test'); -ERROR: table "test.part_test" has no partitions -SELECT disable_pathman_for('test.part_test'); - disable_pathman_for ---------------------- - -(1 row) - -CREATE TABLE test.wrong_partition (LIKE test.part_test) INHERITS (test.part_test); -NOTICE: merging column "val" with inherited definition -SELECT add_to_pathman_config('test.part_test', 'val', '10'); -ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist -SELECT add_to_pathman_config('test.part_test', 'val'); -ERROR: wrong constraint format for HASH partition "part_test_1" -DROP TABLE test.part_test CASCADE; -NOTICE: drop cascades to 5 other objects --- --- 85fc5ccf121 -CREATE TABLE test.part_test(val serial); -INSERT INTO test.part_test SELECT generate_series(1, 3000); -SELECT create_range_partitions('test.part_test', 'val', 1, 10); - create_range_partitions -------------------------- - 300 -(1 row) - -SELECT append_range_partition('test.part_test'); - append_range_partition ------------------------- - test.part_test_301 -(1 row) - -DELETE FROM test.part_test; -SELECT create_single_range_partition('test.part_test', NULL::INT4, NULL); /* not ok */ -ERROR: cannot create partition with range (-inf, +inf) -DELETE FROM pathman_config WHERE partrel = 'test.part_test'::REGCLASS; -SELECT create_hash_partitions('test.part_test', 'val', 2, partition_names := ARRAY[]::TEXT[]); /* not ok */ -ERROR: can't partition table "test.part_test" with existing children -DROP TABLE test.part_test CASCADE; -NOTICE: drop cascades to 302 other objects --- DROP SCHEMA test CASCADE; NOTICE: drop cascades to 28 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_cache_pranks.out b/expected/pathman_cache_pranks.out new file mode 100644 index 00000000..3ed9570f --- /dev/null +++ b/expected/pathman_cache_pranks.out @@ -0,0 +1,75 @@ +\set VERBOSITY terse +-- is pathman (caches, in particular) strong enough to carry out this? +SET search_path = 'public'; +-- wobble with create-drop ext: tests cached relids sanity +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = true; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +DROP EXTENSION pg_pathman; +-- create it for further tests +CREATE EXTENSION pg_pathman; +-- 079797e0d5 +CREATE TABLE part_test(val serial); +INSERT INTO part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT set_interval('part_test', 100); + set_interval +-------------- + +(1 row) + +DELETE FROM pathman_config WHERE partrel = 'part_test'::REGCLASS; +SELECT drop_partitions('part_test'); +ERROR: table "part_test" has no partitions +SELECT disable_pathman_for('part_test'); + disable_pathman_for +--------------------- + +(1 row) + +CREATE TABLE wrong_partition (LIKE part_test) INHERITS (part_test); +NOTICE: merging column "val" with inherited definition +SELECT add_to_pathman_config('part_test', 'val', '10'); +ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist +SELECT add_to_pathman_config('part_test', 'val'); +ERROR: wrong constraint format for HASH partition "part_test_1" +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 5 other objects +-- +-- 85fc5ccf121 +CREATE TABLE part_test(val serial); +INSERT INTO part_test SELECT generate_series(1, 3000); +SELECT create_range_partitions('part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 300 +(1 row) + +SELECT append_range_partition('part_test'); + append_range_partition +------------------------ + part_test_301 +(1 row) + +DELETE FROM part_test; +SELECT create_single_range_partition('part_test', NULL::INT4, NULL); /* not ok */ +ERROR: cannot create partition with range (-inf, +inf) +DELETE FROM pathman_config WHERE partrel = 'part_test'::REGCLASS; +SELECT create_hash_partitions('part_test', 'val', 2, partition_names := ARRAY[]::TEXT[]); /* not ok */ +ERROR: can't partition table "part_test" with existing children +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 302 other objects +-- +-- finalize +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index a9c6f1a3..6d2e52e1 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -546,37 +546,6 @@ ORDER BY partition; DROP TABLE test.provided_part_names CASCADE; --- is pathman (caches, in particular) strong enough to carry out this? - --- 079797e0d5 -CREATE TABLE test.part_test(val serial); -INSERT INTO test.part_test SELECT generate_series(1, 30); -SELECT create_range_partitions('test.part_test', 'val', 1, 10); -SELECT set_interval('test.part_test', 100); -DELETE FROM pathman_config WHERE partrel = 'test.part_test'::REGCLASS; -SELECT drop_partitions('test.part_test'); -SELECT disable_pathman_for('test.part_test'); - -CREATE TABLE test.wrong_partition (LIKE test.part_test) INHERITS (test.part_test); -SELECT add_to_pathman_config('test.part_test', 'val', '10'); -SELECT add_to_pathman_config('test.part_test', 'val'); - -DROP TABLE test.part_test CASCADE; --- - --- 85fc5ccf121 -CREATE TABLE test.part_test(val serial); -INSERT INTO test.part_test SELECT generate_series(1, 3000); -SELECT create_range_partitions('test.part_test', 'val', 1, 10); -SELECT append_range_partition('test.part_test'); -DELETE FROM test.part_test; -SELECT create_single_range_partition('test.part_test', NULL::INT4, NULL); /* not ok */ -DELETE FROM pathman_config WHERE partrel = 'test.part_test'::REGCLASS; -SELECT create_hash_partitions('test.part_test', 'val', 2, partition_names := ARRAY[]::TEXT[]); /* not ok */ - -DROP TABLE test.part_test CASCADE; --- - DROP SCHEMA test CASCADE; DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman CASCADE; diff --git a/sql/pathman_cache_pranks.sql b/sql/pathman_cache_pranks.sql new file mode 100644 index 00000000..f21f0594 --- /dev/null +++ b/sql/pathman_cache_pranks.sql @@ -0,0 +1,49 @@ +\set VERBOSITY terse +-- is pathman (caches, in particular) strong enough to carry out this? + +SET search_path = 'public'; + +-- wobble with create-drop ext: tests cached relids sanity +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = f; +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = true; +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +DROP EXTENSION pg_pathman; + +-- create it for further tests +CREATE EXTENSION pg_pathman; + +-- 079797e0d5 +CREATE TABLE part_test(val serial); +INSERT INTO part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('part_test', 'val', 1, 10); +SELECT set_interval('part_test', 100); +DELETE FROM pathman_config WHERE partrel = 'part_test'::REGCLASS; +SELECT drop_partitions('part_test'); +SELECT disable_pathman_for('part_test'); + +CREATE TABLE wrong_partition (LIKE part_test) INHERITS (part_test); +SELECT add_to_pathman_config('part_test', 'val', '10'); +SELECT add_to_pathman_config('part_test', 'val'); + +DROP TABLE part_test CASCADE; +-- + +-- 85fc5ccf121 +CREATE TABLE part_test(val serial); +INSERT INTO part_test SELECT generate_series(1, 3000); +SELECT create_range_partitions('part_test', 'val', 1, 10); +SELECT append_range_partition('part_test'); +DELETE FROM part_test; +SELECT create_single_range_partition('part_test', NULL::INT4, NULL); /* not ok */ +DELETE FROM pathman_config WHERE partrel = 'part_test'::REGCLASS; +SELECT create_hash_partitions('part_test', 'val', 2, partition_names := ARRAY[]::TEXT[]); /* not ok */ + +DROP TABLE part_test CASCADE; +-- + +-- finalize +DROP EXTENSION pg_pathman; diff --git a/src/hooks.c b/src/hooks.c index bf9d0525..2a0543bc 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -615,6 +615,12 @@ pathman_enable_assign_hook(bool newval, void *extra) "RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes " "and some other options have been %s", newval ? "enabled" : "disabled"); + + /* Purge caches if pathman was disabled */ + if (!newval) + { + unload_config(); + } } static void @@ -850,6 +856,8 @@ pathman_shmem_startup_hook(void) void pathman_relcache_hook(Datum arg, Oid relid) { + Oid pathman_config_relid; + /* See cook_partitioning_expression() */ if (!pathman_hooks_enabled) return; @@ -863,10 +871,18 @@ pathman_relcache_hook(Datum arg, Oid relid) invalidate_bounds_cache(); invalidate_parents_cache(); invalidate_status_cache(); + delay_pathman_shutdown(); /* see below */ } - /* Invalidation event for PATHMAN_CONFIG table (probably DROP) */ - if (relid == get_pathman_config_relid(false)) + /* + * Invalidation event for PATHMAN_CONFIG table (probably DROP EXTENSION). + * Digging catalogs here is expensive and probably illegal, so we take + * cached relid. It is possible that we don't know it atm (e.g. pathman + * was disabled). However, in this case caches must have been cleaned + * on disable, and there is no DROP-specific additional actions. + */ + pathman_config_relid = get_pathman_config_relid(true); + if (relid == pathman_config_relid) { delay_pathman_shutdown(); } diff --git a/src/include/init.h b/src/include/init.h index 63586f6b..25432840 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -139,7 +139,7 @@ simplify_mcxt_name(MemoryContext mcxt) pathman_init_state.pg_pathman_enable = false; \ pathman_init_state.auto_partition = false; \ pathman_init_state.override_copy = false; \ - pathman_init_state.initialization_needed = true; \ + unload_config(); \ } while (0) diff --git a/src/init.c b/src/init.c index c80f118f..f1ed689c 100644 --- a/src/init.c +++ b/src/init.c @@ -134,7 +134,14 @@ save_pathman_init_state(PathmanInitState *temp_init_state) void restore_pathman_init_state(const PathmanInitState *temp_init_state) { - pathman_init_state = *temp_init_state; + /* + * initialization_needed is not restored: it is not just a setting but + * internal thing, caches must be inited when it is set. Better would be + * to separate it from this struct entirely. + */ + pathman_init_state.pg_pathman_enable = temp_init_state->pg_pathman_enable; + pathman_init_state.auto_partition = temp_init_state->auto_partition; + pathman_init_state.override_copy = temp_init_state->override_copy; } /* diff --git a/src/pg_pathman.c b/src/pg_pathman.c index daa1afdc..3511a243 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -284,8 +284,6 @@ estimate_paramsel_using_prel(const PartRelationInfo *prel, int strategy) void _PG_init(void) { - PathmanInitState temp_init_state; - if (!process_shared_preload_libraries_in_progress) { elog(ERROR, "pg_pathman module must be initialized by Postmaster. " @@ -297,13 +295,10 @@ _PG_init(void) RequestAddinShmemSpace(estimate_pathman_shmem_size()); /* Assign pg_pathman's initial state */ - temp_init_state.pg_pathman_enable = DEFAULT_PATHMAN_ENABLE; - temp_init_state.auto_partition = DEFAULT_PATHMAN_AUTO; - temp_init_state.override_copy = DEFAULT_PATHMAN_OVERRIDE_COPY; - temp_init_state.initialization_needed = true; /* ofc it's needed! */ - - /* Apply initial state */ - restore_pathman_init_state(&temp_init_state); + pathman_init_state.pg_pathman_enable = DEFAULT_PATHMAN_ENABLE; + pathman_init_state.auto_partition = DEFAULT_PATHMAN_AUTO; + pathman_init_state.override_copy = DEFAULT_PATHMAN_OVERRIDE_COPY; + pathman_init_state.initialization_needed = true; /* ofc it's needed! */ /* Set basic hooks */ pathman_set_rel_pathlist_hook_next = set_rel_pathlist_hook; diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 99f53bd5..c302089e 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -859,7 +859,7 @@ add_to_pathman_config(PG_FUNCTION_ARGS) } PG_CATCH(); { - /* We have to restore all changed flags */ + /* We have to restore changed flags */ restore_pathman_init_state(&init_state); /* Rethrow ERROR */ diff --git a/src/relation_info.c b/src/relation_info.c index 9e20e93d..d2d95351 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -501,6 +501,13 @@ build_pathman_relation_info(Oid relid, Datum *values) * cache now might have obsolete data for something that probably is * not a partitioned table at all. Remove it. */ + if (!IsPathmanInitialized()) + /* + * ... unless failure was so hard that caches were already destoyed, + * i.e. extension disabled + */ + PG_RE_THROW(); + if (prel->children != NULL) { uint32 i; From 09914f44f11548394966986d81d5fe8755382d25 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 23 Apr 2019 07:22:07 +0300 Subject: [PATCH 408/528] Reset internal ctxs in fini_local_cache only when they actually exist. --- expected/pathman_cache_pranks.out | 5 +++++ sql/pathman_cache_pranks.sql | 4 ++++ src/init.c | 9 ++++++--- 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/expected/pathman_cache_pranks.out b/expected/pathman_cache_pranks.out index 3ed9570f..581063bc 100644 --- a/expected/pathman_cache_pranks.out +++ b/expected/pathman_cache_pranks.out @@ -14,6 +14,11 @@ CREATE EXTENSION pg_pathman; DROP EXTENSION pg_pathman; -- create it for further tests CREATE EXTENSION pg_pathman; +-- make sure nothing breaks on disable/enable +SET pg_pathman.enable = false; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SET pg_pathman.enable = true; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled -- 079797e0d5 CREATE TABLE part_test(val serial); INSERT INTO part_test SELECT generate_series(1, 30); diff --git a/sql/pathman_cache_pranks.sql b/sql/pathman_cache_pranks.sql index f21f0594..3f3dd714 100644 --- a/sql/pathman_cache_pranks.sql +++ b/sql/pathman_cache_pranks.sql @@ -16,6 +16,10 @@ DROP EXTENSION pg_pathman; -- create it for further tests CREATE EXTENSION pg_pathman; +-- make sure nothing breaks on disable/enable +SET pg_pathman.enable = false; +SET pg_pathman.enable = true; + -- 079797e0d5 CREATE TABLE part_test(val serial); INSERT INTO part_test SELECT generate_series(1, 30); diff --git a/src/init.c b/src/init.c index f1ed689c..92d2d213 100644 --- a/src/init.c +++ b/src/init.c @@ -403,9 +403,12 @@ fini_local_cache(void) } /* Now we can clear allocations */ - MemoryContextReset(PathmanParentsCacheContext); - MemoryContextReset(PathmanStatusCacheContext); - MemoryContextReset(PathmanBoundsCacheContext); + if (TopPathmanContext) + { + MemoryContextReset(PathmanParentsCacheContext); + MemoryContextReset(PathmanStatusCacheContext); + MemoryContextReset(PathmanBoundsCacheContext); + } } From 2dd78a56cc0d20aff33e37346670d0289c2ea345 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 23 Apr 2019 07:37:28 +0300 Subject: [PATCH 409/528] Make test from previous commit more useful. --- expected/pathman_cache_pranks.out | 10 +++++----- sql/pathman_cache_pranks.sql | 8 ++++---- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/expected/pathman_cache_pranks.out b/expected/pathman_cache_pranks.out index 581063bc..5493ae96 100644 --- a/expected/pathman_cache_pranks.out +++ b/expected/pathman_cache_pranks.out @@ -1,6 +1,11 @@ \set VERBOSITY terse -- is pathman (caches, in particular) strong enough to carry out this? SET search_path = 'public'; +-- make sure nothing breaks on disable/enable when nothing was initialized yet +SET pg_pathman.enable = false; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SET pg_pathman.enable = true; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled -- wobble with create-drop ext: tests cached relids sanity CREATE EXTENSION pg_pathman; SET pg_pathman.enable = f; @@ -14,11 +19,6 @@ CREATE EXTENSION pg_pathman; DROP EXTENSION pg_pathman; -- create it for further tests CREATE EXTENSION pg_pathman; --- make sure nothing breaks on disable/enable -SET pg_pathman.enable = false; -NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled -SET pg_pathman.enable = true; -NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled -- 079797e0d5 CREATE TABLE part_test(val serial); INSERT INTO part_test SELECT generate_series(1, 30); diff --git a/sql/pathman_cache_pranks.sql b/sql/pathman_cache_pranks.sql index 3f3dd714..782ef7f0 100644 --- a/sql/pathman_cache_pranks.sql +++ b/sql/pathman_cache_pranks.sql @@ -3,6 +3,10 @@ SET search_path = 'public'; +-- make sure nothing breaks on disable/enable when nothing was initialized yet +SET pg_pathman.enable = false; +SET pg_pathman.enable = true; + -- wobble with create-drop ext: tests cached relids sanity CREATE EXTENSION pg_pathman; SET pg_pathman.enable = f; @@ -16,10 +20,6 @@ DROP EXTENSION pg_pathman; -- create it for further tests CREATE EXTENSION pg_pathman; --- make sure nothing breaks on disable/enable -SET pg_pathman.enable = false; -SET pg_pathman.enable = true; - -- 079797e0d5 CREATE TABLE part_test(val serial); INSERT INTO part_test SELECT generate_series(1, 30); From 32fd6116fe9ee895981156a211d44a139d152159 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 23 Apr 2019 12:26:30 +0300 Subject: [PATCH 410/528] Remove wrong fastpath in pathman_enable_assign_hook. If one of options is disabled, but pathman generally enabled and disable SET comes, we actually need to take some actions. --- src/hooks.c | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 2a0543bc..30be29e7 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -594,16 +594,6 @@ pathman_enable_assign_hook(bool newval, void *extra) elog(DEBUG2, "pg_pathman_enable_assign_hook() [newval = %s] triggered", newval ? "true" : "false"); - /* Return quickly if nothing has changed */ - if (newval == (pathman_init_state.pg_pathman_enable && - pathman_init_state.auto_partition && - pathman_init_state.override_copy && - pg_pathman_enable_runtimeappend && - pg_pathman_enable_runtime_merge_append && - pg_pathman_enable_partition_filter && - pg_pathman_enable_bounds_cache)) - return; - pathman_init_state.auto_partition = newval; pathman_init_state.override_copy = newval; pg_pathman_enable_runtimeappend = newval; From 38bf80ef86676f5bc0ecade1ead49074c4f0f0ff Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 23 Apr 2019 13:18:24 +0300 Subject: [PATCH 411/528] Previous commit fix: but still notify about change. --- src/hooks.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 30be29e7..854d422b 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -594,6 +594,21 @@ pathman_enable_assign_hook(bool newval, void *extra) elog(DEBUG2, "pg_pathman_enable_assign_hook() [newval = %s] triggered", newval ? "true" : "false"); + if (!(newval == pathman_init_state.pg_pathman_enable && + newval == pathman_init_state.auto_partition && + newval == pathman_init_state.override_copy && + newval == pg_pathman_enable_runtimeappend && + newval == pg_pathman_enable_runtime_merge_append && + newval == pg_pathman_enable_partition_filter && + newval == pg_pathman_enable_bounds_cache)) + { + elog(NOTICE, + "RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes " + "and some other options have been %s", + newval ? "enabled" : "disabled"); + } + + pathman_init_state.auto_partition = newval; pathman_init_state.override_copy = newval; pg_pathman_enable_runtimeappend = newval; @@ -601,11 +616,6 @@ pathman_enable_assign_hook(bool newval, void *extra) pg_pathman_enable_partition_filter = newval; pg_pathman_enable_bounds_cache = newval; - elog(NOTICE, - "RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes " - "and some other options have been %s", - newval ? "enabled" : "disabled"); - /* Purge caches if pathman was disabled */ if (!newval) { From ce72bc725bed3bd7e4f3a98087908cd6123f22d7 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 29 Apr 2019 13:15:21 +0300 Subject: [PATCH 412/528] Don't use wiped memory for reporting offended rel in fill_prel_with_partitions. --- src/relation_info.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/relation_info.c b/src/relation_info.c index d2d95351..f2d17371 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -769,11 +769,14 @@ fill_prel_with_partitions(PartRelationInfo *prel, */ if (pbin->part_idx >= PrelChildrenCount(prel)) { + /* purged caches will destoy prel, save oid for reporting */ + Oid parent_relid = PrelParentRelid(prel); + DisablePathman(); /* disable pg_pathman since config is broken */ - ereport(ERROR, (errmsg("pg_pathman's cache for relation \"%s\" " + ereport(ERROR, (errmsg("pg_pathman's cache for relation %d " "has not been properly initialized. " "Looks like one of hash partitions was dropped.", - get_rel_name_or_relid(PrelParentRelid(prel))), + parent_relid), errhint(INIT_ERROR_HINT))); } From d7cce4732a058c5fb9618577b41a8df44205bc8f Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 29 Apr 2019 17:04:48 +0300 Subject: [PATCH 413/528] Silence clang checker in build_pathman_relation_info bounds cleanup. --- src/relation_info.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/relation_info.c b/src/relation_info.c index f2d17371..2a2548d1 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -522,8 +522,11 @@ build_pathman_relation_info(Oid relid, Datum *values) */ if (prel->parttype == PT_HASH) child = prel->children[i]; - else if (prel->parttype == PT_RANGE) + else + { + Assert(prel->parttype == PT_RANGE) child = prel->ranges[i].child_oid; + } forget_bounds_of_partition(child); } From 2a13ed7d7f73b19807e48ee271345546a22939fa Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 29 Apr 2019 17:08:03 +0300 Subject: [PATCH 414/528] Fixup for previous blind commit. --- src/relation_info.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/relation_info.c b/src/relation_info.c index 2a2548d1..d24af71d 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -524,7 +524,7 @@ build_pathman_relation_info(Oid relid, Datum *values) child = prel->children[i]; else { - Assert(prel->parttype == PT_RANGE) + Assert(prel->parttype == PT_RANGE); child = prel->ranges[i].child_oid; } From ba302016ce1a65c23228dff64d99aa02bb12b708 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Sun, 2 Jun 2019 22:31:37 +0300 Subject: [PATCH 415/528] Add tableoid junk column to processed_tlist only for top-level parent. (Note that the column is added to RelOptInfo reltarget of all nodes in the hierarchy anyway; but only tle with top-level varno must get to *main* tlist, i.e. processed_tlist.) Previously it was added for each parent in the tree, i.e. multiple times in case of multi-level partitioning, leading to ERROR: variable not found in subplan target lists errors in setref. As comments say, this code better be rewritten to actually let parent deal with its isParent flag. And to recurce with toplever rc. Probably should be done if adjacent bugs arise. --- expected/pathman_subpartitions.out | 33 ++++++++++++++++++++++++++++++ sql/pathman_subpartitions.sql | 18 +++++++++++++++- src/include/rangeset.h | 6 ++++-- src/pg_pathman.c | 17 ++++++++++++++- 4 files changed, 70 insertions(+), 4 deletions(-) diff --git a/expected/pathman_subpartitions.out b/expected/pathman_subpartitions.out index 4dd5f5dd..a876b457 100644 --- a/expected/pathman_subpartitions.out +++ b/expected/pathman_subpartitions.out @@ -424,6 +424,39 @@ UPDATE subpartitions.abc SET id1 = -1, id2 = -1 RETURNING tableoid::regclass, *; DROP TABLE subpartitions.abc CASCADE; NOTICE: drop cascades to 9 other objects +--- basic check how rowmark plays along with subparts; PGPRO-2755 +CREATE TABLE subpartitions.a1(n1 integer); +CREATE TABLE subpartitions.a2(n1 integer not null, n2 integer not null); +SELECT create_range_partitions('subpartitions.a2', 'n1', 1, 2, 0); + create_range_partitions +------------------------- + 0 +(1 row) + +SELECT add_range_partition('subpartitions.a2', 10, 20, 'subpartitions.a2_1020'); + add_range_partition +----------------------- + subpartitions.a2_1020 +(1 row) + +SELECT create_range_partitions('subpartitions.a2_1020'::regclass, 'n2'::text, array[30,40], array['subpartitions.a2_1020_3040']); + create_range_partitions +------------------------- + 1 +(1 row) + +INSERT INTO subpartitions.a2 VALUES (10, 30), (11, 31), (12, 32), (19, 39); +INSERT INTO subpartitions.a1 VALUES (12), (19), (20); +SELECT a2.* FROM subpartitions.a1 JOIN subpartitions.a2 ON a2.n1=a1.n1 FOR UPDATE; + n1 | n2 +----+---- + 12 | 32 + 19 | 39 +(2 rows) + +DROP TABLE subpartitions.a2 CASCADE; +NOTICE: drop cascades to 4 other objects +DROP TABLE subpartitions.a1; DROP SCHEMA subpartitions CASCADE; NOTICE: drop cascades to function subpartitions.partitions_tree(regclass,text) DROP EXTENSION pg_pathman; diff --git a/sql/pathman_subpartitions.sql b/sql/pathman_subpartitions.sql index b790c20e..05ac9614 100644 --- a/sql/pathman_subpartitions.sql +++ b/sql/pathman_subpartitions.sql @@ -139,8 +139,24 @@ SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY id1, id2, val; SET pg_pathman.enable_partitionrouter = ON; UPDATE subpartitions.abc SET id1 = -1, id2 = -1 RETURNING tableoid::regclass, *; +DROP TABLE subpartitions.abc CASCADE; + + +--- basic check how rowmark plays along with subparts; PGPRO-2755 +CREATE TABLE subpartitions.a1(n1 integer); +CREATE TABLE subpartitions.a2(n1 integer not null, n2 integer not null); +SELECT create_range_partitions('subpartitions.a2', 'n1', 1, 2, 0); + +SELECT add_range_partition('subpartitions.a2', 10, 20, 'subpartitions.a2_1020'); +SELECT create_range_partitions('subpartitions.a2_1020'::regclass, 'n2'::text, array[30,40], array['subpartitions.a2_1020_3040']); +INSERT INTO subpartitions.a2 VALUES (10, 30), (11, 31), (12, 32), (19, 39); +INSERT INTO subpartitions.a1 VALUES (12), (19), (20); + +SELECT a2.* FROM subpartitions.a1 JOIN subpartitions.a2 ON a2.n1=a1.n1 FOR UPDATE; + +DROP TABLE subpartitions.a2 CASCADE; +DROP TABLE subpartitions.a1; -DROP TABLE subpartitions.abc CASCADE; DROP SCHEMA subpartitions CASCADE; DROP EXTENSION pg_pathman; diff --git a/src/include/rangeset.h b/src/include/rangeset.h index 96d6bc21..39db6a53 100644 --- a/src/include/rangeset.h +++ b/src/include/rangeset.h @@ -1,7 +1,6 @@ /* ------------------------------------------------------------------------ * * rangeset.h - * IndexRange functions * * Copyright (c) 2015-2016, Postgres Professional * @@ -17,7 +16,10 @@ /* - * IndexRange contains a set of selected partitions. + * IndexRange is essentially a segment [lower; upper]. This module provides + * functions for efficient working (intersection, union) with Lists of + * IndexRange's; this is used for quick selection of partitions. Numbers are + * indexes of partitions in PartRelationInfo's children. */ typedef struct { /* lossy == should we use quals? */ diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 3511a243..2cd17c6a 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -437,7 +437,7 @@ append_child_relation(PlannerInfo *root, Relation child_relation; AppendRelInfo *appinfo; Index child_rti; - PlanRowMark *child_rowmark; + PlanRowMark *child_rowmark = NULL; Node *childqual; List *childquals; ListCell *lc1, @@ -493,6 +493,10 @@ append_child_relation(PlannerInfo *root, /* Create rowmarks required for child rels */ + /* + * XXX: vanilla recurses down with *top* rowmark, not immediate parent one. + * Not sure about example where this matters though. + */ if (parent_rowmark) { child_rowmark = makeNode(PlanRowMark); @@ -511,6 +515,13 @@ append_child_relation(PlannerInfo *root, root->rowMarks = lappend(root->rowMarks, child_rowmark); /* Adjust tlist for RowMarks (see planner.c) */ + /* + * XXX Saner approach seems to + * 1) Add tle to top parent and processed_tlist once in rel_pathlist_hook. + * 2) Mark isParent = true + * *parent* knows it is parent, after all; why should child bother? + * 3) Recursion (code executed in childs) starts at 2) + */ if (!parent_rowmark->isParent && !root->parse->setOperations) { append_tle_for_rowmark(root, parent_rowmark); @@ -636,6 +647,10 @@ append_child_relation(PlannerInfo *root, if (parent_rte->relid != child_oid && child_relation->rd_rel->relhassubclass) { + /* See XXX above */ + if (child_rowmark) + child_rowmark->isParent = true; + pathman_rel_pathlist_hook(root, child_rel, child_rti, From b530684c8c6208d795ee18351f20202bf48f85f6 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Sun, 2 Jun 2019 22:46:01 +0300 Subject: [PATCH 416/528] Bump 1.5.6 lib version. --- META.json | 4 ++-- expected/pathman_calamity.out | 2 +- src/include/init.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/META.json b/META.json index fa06948c..544b130d 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_pathman", "abstract": "Fast partitioning tool for PostgreSQL", "description": "pg_pathman provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.5.4", + "version": "1.5.6", "maintainer": [ "Arseny Sher " ], @@ -22,7 +22,7 @@ "pg_pathman": { "file": "pg_pathman--1.5.sql", "docfile": "README.md", - "version": "1.5.5", + "version": "1.5.6", "abstract": "Effective partitioning tool for PostgreSQL 9.5 and higher" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index a9305a9e..5e72a8e1 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -13,7 +13,7 @@ SELECT debug_capture(); SELECT pathman_version(); pathman_version ----------------- - 1.5.5 + 1.5.6 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index 25432840..8431f70d 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -158,7 +158,7 @@ simplify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT "1.5.0" /* Current version of native C library */ -#define CURRENT_LIB_VERSION "1.5.5" +#define CURRENT_LIB_VERSION "1.5.6" void *pathman_cache_search_relid(HTAB *cache_table, From 43fd918a8dac582c39f4d29e573621b13f68af26 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 3 Jun 2019 12:21:30 +0300 Subject: [PATCH 417/528] Heal segfault in handle_modification_query. To encounter this, you need to parse utility query after entering planning. --- src/pg_pathman.c | 1 + src/planner_tree_modification.c | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 2cd17c6a..7764aa94 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -1965,6 +1965,7 @@ make_inh_translation_list(Relation oldrelation, Relation newrelation, /* * set_append_rel_pathlist * Build access paths for an "append relation" + * Similar to PG function with the same name. * * NOTE: this function is 'public' (used in hooks.c) */ diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index f40c152f..4766ded1 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -516,7 +516,7 @@ handle_modification_query(Query *parse, transform_query_cxt *context) { RangeTblEntry *rte; Oid child; - Node *quals = parse->jointree->quals; + Node *quals; Index result_rti = parse->resultRelation; ParamListInfo params = context->query_params; @@ -525,6 +525,8 @@ handle_modification_query(Query *parse, transform_query_cxt *context) parse->commandType != CMD_DELETE)) return; + /* can't set earlier because CMD_UTILITY doesn't have jointree */ + quals = parse->jointree->quals; rte = rt_fetch(result_rti, parse->rtable); /* Exit if it's ONLY table */ From 9e5f1eaf22e921bed8b58b8a988d7b732c817e7f Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Fri, 7 Jun 2019 08:32:08 +0300 Subject: [PATCH 418/528] Disable pathman for SELECT FOR SHARE/UPDATE clauses on 9.5. Noticed after ba302016ce1a65 subpartitions test failed on 9.5. --- src/hooks.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/hooks.c b/src/hooks.c index 854d422b..4086fabd 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -352,6 +352,15 @@ pathman_rel_pathlist_hook(PlannerInfo *root, if (root->parse->commandType != CMD_SELECT && root->parse->commandType != CMD_INSERT) return; + + /* SELECT FOR SHARE/UPDATE is not handled by above check */ + foreach(lc, root->rowMarks) + { + PlanRowMark *rc = (PlanRowMark *) lfirst(lc); + + if (rc->rti == rti) + return; + } #endif /* Skip if this table is not allowed to act as parent (e.g. FROM ONLY) */ From f093e76586f8fce4479775abed1dc003e1a63365 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Fri, 7 Jun 2019 08:36:02 +0300 Subject: [PATCH 419/528] Bump 1.5.7 lib version. --- META.json | 4 ++-- expected/pathman_calamity.out | 2 +- src/include/init.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/META.json b/META.json index 544b130d..5c689c9c 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_pathman", "abstract": "Fast partitioning tool for PostgreSQL", "description": "pg_pathman provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.5.6", + "version": "1.5.7", "maintainer": [ "Arseny Sher " ], @@ -22,7 +22,7 @@ "pg_pathman": { "file": "pg_pathman--1.5.sql", "docfile": "README.md", - "version": "1.5.6", + "version": "1.5.7", "abstract": "Effective partitioning tool for PostgreSQL 9.5 and higher" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 5e72a8e1..e7999731 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -13,7 +13,7 @@ SELECT debug_capture(); SELECT pathman_version(); pathman_version ----------------- - 1.5.6 + 1.5.7 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index 8431f70d..e636821c 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -158,7 +158,7 @@ simplify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT "1.5.0" /* Current version of native C library */ -#define CURRENT_LIB_VERSION "1.5.6" +#define CURRENT_LIB_VERSION "1.5.7" void *pathman_cache_search_relid(HTAB *cache_table, From dbcbd02e411e6acea6d97f572234746007979538 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 11 Jun 2019 17:15:13 +0300 Subject: [PATCH 420/528] Pass create_single_range_partition through SPI in spawn_partitions_val. To allow DDL-wobbling extensions (mtm, in particular) to intercept it (yeah, through targetlist analysis. Seems like this is the only place performing DDL not covered by declarative partitioning (which can be handled as utility statements). Stas Kelvich. --- range.sql | 3 +-- src/partition_creation.c | 52 ++++++++++++++++++++++++++++++++++++---- 2 files changed, 49 insertions(+), 6 deletions(-) diff --git a/range.sql b/range.sql index 5aeaad58..ef439cee 100644 --- a/range.sql +++ b/range.sql @@ -820,8 +820,7 @@ CREATE OR REPLACE FUNCTION @extschema@.create_single_range_partition( partition_name TEXT DEFAULT NULL, tablespace TEXT DEFAULT NULL) RETURNS REGCLASS AS 'pg_pathman', 'create_single_range_partition_pl' -LANGUAGE C -SET client_min_messages = WARNING; +LANGUAGE C; /* * Construct CHECK constraint condition for a range partition. diff --git a/src/partition_creation.c b/src/partition_creation.c index b41b2541..47f16ca6 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -32,6 +32,7 @@ #include "commands/tablecmds.h" #include "commands/tablespace.h" #include "commands/trigger.h" +#include "executor/spi.h" #include "miscadmin.h" #include "nodes/nodeFuncs.h" #include "parser/parse_func.h" @@ -595,6 +596,14 @@ spawn_partitions_val(Oid parent_relid, /* parent's Oid */ check_lt(&cmp_value_bound_finfo, collid, value, cur_leading_bound)) { Bound bounds[2]; + int rc; + bool isnull; + char *create_sql; + HeapTuple typeTuple; + char *typname; + Oid parent_nsp = get_rel_namespace(parent_relid); + char *parent_nsp_name = get_namespace_name(parent_nsp); + char *partition_name = choose_range_partition_name(parent_relid, parent_nsp); /* Assign the 'following' boundary to current 'leading' value */ cur_following_bound = cur_leading_bound; @@ -607,10 +616,45 @@ spawn_partitions_val(Oid parent_relid, /* parent's Oid */ bounds[0] = MakeBound(should_append ? cur_following_bound : cur_leading_bound); bounds[1] = MakeBound(should_append ? cur_leading_bound : cur_following_bound); - last_partition = create_single_range_partition_internal(parent_relid, - &bounds[0], &bounds[1], - range_bound_type, - NULL, NULL); + /* + * Instead of directly calling create_single_range_partition_internal() + * we are going to call it through SPI, to make it possible for various + * DDL-replicating extensions to catch that call and do something about + * it. --sk + */ + + /* Get typname of range_bound_type to perform cast */ + typeTuple = SearchSysCache1(TYPEOID, ObjectIdGetDatum(range_bound_type)); + Assert(HeapTupleIsValid(typeTuple)); + typname = pstrdup(NameStr(((Form_pg_type) GETSTRUCT(typeTuple))->typname)); + ReleaseSysCache(typeTuple); + + /* Construct call to create_single_range_partition() */ + create_sql = psprintf( + "select %s.create_single_range_partition('%s.%s', '%s'::%s, '%s'::%s, '%s.%s')", + get_namespace_name(get_pathman_schema()), + parent_nsp_name, + get_rel_name(parent_relid), + IsInfinite(&bounds[0]) ? "NULL" : datum_to_cstring(bounds[0].value, range_bound_type), + typname, + IsInfinite(&bounds[1]) ? "NULL" : datum_to_cstring(bounds[1].value, range_bound_type), + typname, + parent_nsp_name, + partition_name + ); + + /* ...and call it. */ + SPI_connect(); + PushActiveSnapshot(GetTransactionSnapshot()); + rc = SPI_execute(create_sql, false, 0); + if (rc <= 0 || SPI_processed != 1) + elog(ERROR, "Failed to create range partition"); + last_partition = DatumGetObjectId(SPI_getbinval(SPI_tuptable->vals[0], + SPI_tuptable->tupdesc, + 1, &isnull)); + Assert(!isnull); + SPI_finish(); + PopActiveSnapshot(); #ifdef USE_ASSERT_CHECKING elog(DEBUG2, "%s partition with following='%s' & leading='%s' [%u]", From 3fa5d08ac510e4156c6f6d7cd80b3689e211f1d2 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Thu, 13 Jun 2019 16:59:35 +0300 Subject: [PATCH 421/528] Forbid to spawn partitions using bgw from bgw spawning partitions. --- src/pathman_workers.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/pathman_workers.c b/src/pathman_workers.c index 69f5db3b..30ecf6a2 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -84,6 +84,9 @@ static const char *spawn_partitions_bgw = "SpawnPartitionsWorker"; static const char *concurrent_part_bgw = "ConcurrentPartWorker"; +/* Used for preventing spawn bgw recursion trouble */ +static bool am_spawn_bgw = false; + /* * Estimate amount of shmem needed for concurrent partitioning. */ @@ -312,6 +315,11 @@ create_partitions_for_value_bg_worker(Oid relid, Datum value, Oid value_type) SpawnPartitionArgs *bgw_args; Oid child_oid = InvalidOid; + if (am_spawn_bgw) + ereport(ERROR, + (errmsg("Attempt to spawn partition using bgw from bgw spawning partitions"), + errhint("Probably init_callback has INSERT to its table?"))); + /* Create a dsm segment for the worker to pass arguments */ segment = create_partitions_bg_worker_segment(relid, value, value_type); segment_handle = dsm_segment_handle(segment); @@ -363,6 +371,8 @@ bgw_main_spawn_partitions(Datum main_arg) /* We're now ready to receive signals */ BackgroundWorkerUnblockSignals(); + am_spawn_bgw = true; + /* Create resource owner */ CurrentResourceOwner = ResourceOwnerCreate(NULL, spawn_partitions_bgw); From e10b7d84fa2dac07c06b2c2a4b43b73c5eb6b74c Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Fri, 14 Jun 2019 10:19:56 +0300 Subject: [PATCH 422/528] Streamline error handling in spawn partitions worker bgw. Avoid intercepting error in it. Standard bgw error handling code should be enough: it emits error and exits cleanly. --- src/include/partition_creation.h | 3 +- src/partition_creation.c | 205 +++++++++++++------------------ src/pathman_workers.c | 20 +-- 3 files changed, 99 insertions(+), 129 deletions(-) diff --git a/src/include/partition_creation.h b/src/include/partition_creation.h index 63768a95..cc666923 100644 --- a/src/include/partition_creation.h +++ b/src/include/partition_creation.h @@ -24,8 +24,7 @@ /* Create RANGE partitions to store some value */ Oid create_partitions_for_value(Oid relid, Datum value, Oid value_type); -Oid create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type, - bool is_background_worker); +Oid create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type); /* Create one RANGE partition */ diff --git a/src/partition_creation.c b/src/partition_creation.c index 47f16ca6..e0cb40e0 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -315,8 +315,7 @@ create_partitions_for_value(Oid relid, Datum value, Oid value_type) elog(DEBUG2, "create_partitions(): chose backend [%u]", MyProcPid); last_partition = create_partitions_for_value_internal(relid, value, - value_type, - false); /* backend */ + value_type); } } else @@ -348,147 +347,119 @@ create_partitions_for_value(Oid relid, Datum value, Oid value_type) * use create_partitions_for_value() instead. */ Oid -create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type, - bool is_background_worker) +create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type) { MemoryContext old_mcxt = CurrentMemoryContext; Oid partid = InvalidOid; /* last created partition (or InvalidOid) */ + Datum values[Natts_pathman_config]; + bool isnull[Natts_pathman_config]; - PG_TRY(); + /* Get both PartRelationInfo & PATHMAN_CONFIG contents for this relation */ + if (pathman_config_contains_relation(relid, values, isnull, NULL, NULL)) { - Datum values[Natts_pathman_config]; - bool isnull[Natts_pathman_config]; + PartRelationInfo *prel; + LockAcquireResult lock_result; /* could we lock the parent? */ + Oid base_bound_type; /* base type of prel->ev_type */ + Oid base_value_type; /* base type of value_type */ - /* Get both PartRelationInfo & PATHMAN_CONFIG contents for this relation */ - if (pathman_config_contains_relation(relid, values, isnull, NULL, NULL)) - { - PartRelationInfo *prel; - LockAcquireResult lock_result; /* could we lock the parent? */ - Oid base_bound_type; /* base type of prel->ev_type */ - Oid base_value_type; /* base type of value_type */ - - /* Prevent modifications of partitioning scheme */ - lock_result = xact_lock_rel(relid, ShareUpdateExclusiveLock, false); + /* Prevent modifications of partitioning scheme */ + lock_result = xact_lock_rel(relid, ShareUpdateExclusiveLock, false); - /* Fetch PartRelationInfo by 'relid' */ - prel = get_pathman_relation_info(relid); - shout_if_prel_is_invalid(relid, prel, PT_RANGE); + /* Fetch PartRelationInfo by 'relid' */ + prel = get_pathman_relation_info(relid); + shout_if_prel_is_invalid(relid, prel, PT_RANGE); - /* Fetch base types of prel->ev_type & value_type */ - base_bound_type = getBaseType(prel->ev_type); - base_value_type = getBaseType(value_type); + /* Fetch base types of prel->ev_type & value_type */ + base_bound_type = getBaseType(prel->ev_type); + base_value_type = getBaseType(value_type); - /* - * Search for a suitable partition if we didn't hold it, - * since somebody might have just created it for us. - * - * If the table is locked, it means that we've - * already failed to find a suitable partition - * and called this function to do the job. - */ - Assert(lock_result != LOCKACQUIRE_NOT_AVAIL); - if (lock_result == LOCKACQUIRE_OK) - { - Oid *parts; - int nparts; - - /* Search for matching partitions */ - parts = find_partitions_for_value(value, value_type, prel, &nparts); - - /* Shout if there's more than one */ - if (nparts > 1) - elog(ERROR, ERR_PART_ATTR_MULTIPLE); - - /* It seems that we got a partition! */ - else if (nparts == 1) - { - /* Unlock the parent (we're not going to spawn) */ - UnlockRelationOid(relid, ShareUpdateExclusiveLock); + /* + * Search for a suitable partition if we didn't hold it, + * since somebody might have just created it for us. + * + * If the table is locked, it means that we've + * already failed to find a suitable partition + * and called this function to do the job. + */ + Assert(lock_result != LOCKACQUIRE_NOT_AVAIL); + if (lock_result == LOCKACQUIRE_OK) + { + Oid *parts; + int nparts; - /* Simply return the suitable partition */ - partid = parts[0]; - } + /* Search for matching partitions */ + parts = find_partitions_for_value(value, value_type, prel, &nparts); - /* Don't forget to free */ - pfree(parts); - } + /* Shout if there's more than one */ + if (nparts > 1) + elog(ERROR, ERR_PART_ATTR_MULTIPLE); - /* Else spawn a new one (we hold a lock on the parent) */ - if (partid == InvalidOid) + /* It seems that we got a partition! */ + else if (nparts == 1) { - RangeEntry *ranges = PrelGetRangesArray(prel); - Bound bound_min, /* absolute MIN */ - bound_max; /* absolute MAX */ + /* Unlock the parent (we're not going to spawn) */ + UnlockRelationOid(relid, ShareUpdateExclusiveLock); - Oid interval_type = InvalidOid; - Datum interval_binary, /* assigned 'width' of one partition */ - interval_text; + /* Simply return the suitable partition */ + partid = parts[0]; + } - /* Copy datums in order to protect them from cache invalidation */ - bound_min = CopyBound(&ranges[0].min, - prel->ev_byval, - prel->ev_len); + /* Don't forget to free */ + pfree(parts); + } - bound_max = CopyBound(&ranges[PrelLastChild(prel)].max, - prel->ev_byval, - prel->ev_len); + /* Else spawn a new one (we hold a lock on the parent) */ + if (partid == InvalidOid) + { + RangeEntry *ranges = PrelGetRangesArray(prel); + Bound bound_min, /* absolute MIN */ + bound_max; /* absolute MAX */ - /* Check if interval is set */ - if (isnull[Anum_pathman_config_range_interval - 1]) - { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot spawn new partition for key '%s'", - datum_to_cstring(value, value_type)), - errdetail("default range interval is NULL"))); - } + Oid interval_type = InvalidOid; + Datum interval_binary, /* assigned 'width' of one partition */ + interval_text; - /* Retrieve interval as TEXT from tuple */ - interval_text = values[Anum_pathman_config_range_interval - 1]; + /* Copy datums in order to protect them from cache invalidation */ + bound_min = CopyBound(&ranges[0].min, + prel->ev_byval, + prel->ev_len); - /* Convert interval to binary representation */ - interval_binary = extract_binary_interval_from_text(interval_text, - base_bound_type, - &interval_type); + bound_max = CopyBound(&ranges[PrelLastChild(prel)].max, + prel->ev_byval, + prel->ev_len); - /* At last, spawn partitions to store the value */ - partid = spawn_partitions_val(PrelParentRelid(prel), - &bound_min, &bound_max, base_bound_type, - interval_binary, interval_type, - value, base_value_type, - prel->ev_collid); + /* Check if interval is set */ + if (isnull[Anum_pathman_config_range_interval - 1]) + { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot spawn new partition for key '%s'", + datum_to_cstring(value, value_type)), + errdetail("default range interval is NULL"))); } - /* Don't forget to close 'prel'! */ - close_pathman_relation_info(prel); - } - else - elog(ERROR, "table \"%s\" is not partitioned", - get_rel_name_or_relid(relid)); - } - PG_CATCH(); - { - ErrorData *error; + /* Retrieve interval as TEXT from tuple */ + interval_text = values[Anum_pathman_config_range_interval - 1]; - /* Simply rethrow ERROR if we're in backend */ - if (!is_background_worker) - PG_RE_THROW(); + /* Convert interval to binary representation */ + interval_binary = extract_binary_interval_from_text(interval_text, + base_bound_type, + &interval_type); - /* Switch to the original context & copy edata */ - MemoryContextSwitchTo(old_mcxt); - error = CopyErrorData(); - FlushErrorState(); - - /* Produce log message if we're in BGW */ - elog(LOG, - CppAsString(create_partitions_for_value_internal) ": %s [%u]", - error->message, - MyProcPid); + /* At last, spawn partitions to store the value */ + partid = spawn_partitions_val(PrelParentRelid(prel), + &bound_min, &bound_max, base_bound_type, + interval_binary, interval_type, + value, base_value_type, + prel->ev_collid); + } - /* Reset 'partid' in case of error */ - partid = InvalidOid; + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); } - PG_END_TRY(); + else + elog(ERROR, "table \"%s\" is not partitioned", + get_rel_name_or_relid(relid)); return partid; } diff --git a/src/pathman_workers.c b/src/pathman_workers.c index 30ecf6a2..ae6d13b9 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -364,6 +364,7 @@ bgw_main_spawn_partitions(Datum main_arg) dsm_segment *segment; SpawnPartitionArgs *args; Datum value; + Oid result; /* Establish signal handlers before unblocking signals. */ pqsignal(SIGTERM, handle_sigterm); @@ -415,18 +416,17 @@ bgw_main_spawn_partitions(Datum main_arg) DebugPrintDatum(value, args->value_type), MyProcPid); #endif - /* Create partitions and save the Oid of the last one */ - args->result = create_partitions_for_value_internal(args->partitioned_table, - value, /* unpacked Datum */ - args->value_type, - true); /* background woker */ + /* + * Create partitions and save the Oid of the last one. + * If we fail here, args->result is 0 since it is zeroed on initialization. + */ + result = create_partitions_for_value_internal(args->partitioned_table, + value, /* unpacked Datum */ + args->value_type); /* Finish transaction in an appropriate way */ - if (args->result == InvalidOid) - AbortCurrentTransaction(); - else - CommitTransactionCommand(); - + CommitTransactionCommand(); + args->result = result; dsm_detach(segment); } From 3176805567e4b19f864c5fa7909e3cb210eedd1c Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Fri, 14 Jun 2019 11:42:06 +0300 Subject: [PATCH 423/528] Unused var warning. --- src/partition_creation.c | 1 - 1 file changed, 1 deletion(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index e0cb40e0..bea41379 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -349,7 +349,6 @@ create_partitions_for_value(Oid relid, Datum value, Oid value_type) Oid create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type) { - MemoryContext old_mcxt = CurrentMemoryContext; Oid partid = InvalidOid; /* last created partition (or InvalidOid) */ Datum values[Natts_pathman_config]; bool isnull[Natts_pathman_config]; From b75fbe71ec1095d2eebcbc5d2e2e7786408cf761 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Fri, 14 Jun 2019 11:43:56 +0300 Subject: [PATCH 424/528] Bump 1.5.8 lib version. --- META.json | 4 ++-- expected/pathman_calamity.out | 2 +- src/include/init.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/META.json b/META.json index 5c689c9c..292b29db 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_pathman", "abstract": "Fast partitioning tool for PostgreSQL", "description": "pg_pathman provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.5.7", + "version": "1.5.8", "maintainer": [ "Arseny Sher " ], @@ -22,7 +22,7 @@ "pg_pathman": { "file": "pg_pathman--1.5.sql", "docfile": "README.md", - "version": "1.5.7", + "version": "1.5.8", "abstract": "Effective partitioning tool for PostgreSQL 9.5 and higher" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index e7999731..b167ef3e 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -13,7 +13,7 @@ SELECT debug_capture(); SELECT pathman_version(); pathman_version ----------------- - 1.5.7 + 1.5.8 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index e636821c..fd11047d 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -158,7 +158,7 @@ simplify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT "1.5.0" /* Current version of native C library */ -#define CURRENT_LIB_VERSION "1.5.7" +#define CURRENT_LIB_VERSION "1.5.8" void *pathman_cache_search_relid(HTAB *cache_table, From c14d2ad74cbf0d2b3d5837d41fdd4f71ccc0f56b Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Fri, 19 Jul 2019 20:21:50 +0300 Subject: [PATCH 425/528] Additional mtm compatibility --- expected/pathman_basic.out | 6 +++--- sql/pathman_basic.sql | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 0ae1ae6a..95134ee2 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1312,7 +1312,7 @@ DROP TABLE test.num_range_rel CASCADE; DROP TABLE test.range_rel CASCADE; NOTICE: drop cascades to 10 other objects /* Test attributes copying */ -CREATE UNLOGGED TABLE test.range_rel ( +CREATE TABLE test.range_rel ( id SERIAL PRIMARY KEY, dt DATE NOT NULL) WITH (fillfactor = 70); @@ -1328,13 +1328,13 @@ SELECT pathman.create_range_partitions('test.range_rel', 'dt', SELECT reloptions, relpersistence FROM pg_class WHERE oid='test.range_rel'::REGCLASS; reloptions | relpersistence -----------------+---------------- - {fillfactor=70} | u + {fillfactor=70} | p (1 row) SELECT reloptions, relpersistence FROM pg_class WHERE oid='test.range_rel_1'::REGCLASS; reloptions | relpersistence -----------------+---------------- - {fillfactor=70} | u + {fillfactor=70} | p (1 row) DROP TABLE test.range_rel CASCADE; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 6d2e52e1..9e0c3bf2 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -380,7 +380,7 @@ DROP TABLE test.num_range_rel CASCADE; DROP TABLE test.range_rel CASCADE; /* Test attributes copying */ -CREATE UNLOGGED TABLE test.range_rel ( +CREATE TABLE test.range_rel ( id SERIAL PRIMARY KEY, dt DATE NOT NULL) WITH (fillfactor = 70); From c48c2b25d6ef4f4a7064f21ff464b463dfeea769 Mon Sep 17 00:00:00 2001 From: Teodor Sigaev Date: Tue, 1 Oct 2019 17:34:56 +0300 Subject: [PATCH 426/528] PGPRO-3087 Prevent double expand partitioned table by built-in inheritance and pg_pathman's one --- expected/pathman_basic.out | 20 +++++++++++++++++++- sql/pathman_basic.sql | 8 ++++++++ src/hooks.c | 10 ++++++++-- 3 files changed, 35 insertions(+), 3 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 95134ee2..3baf2989 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1804,7 +1804,25 @@ ORDER BY partition; DROP TABLE test.provided_part_names CASCADE; NOTICE: drop cascades to 2 other objects +/* test preventing of double expand of inherited tables */ +CREATE TABLE test.mixinh_parent (id INT PRIMARY KEY); +CREATE TABLE test.mixinh_child1 () INHERITS (test.mixinh_parent); +SELECT create_range_partitions('test.mixinh_child1', 'id', 1, 10, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +INSERT INTO test.mixinh_child1 VALUES (1); +SELECT * FROM test.mixinh_child1; + id +---- + 1 +(1 row) + +SELECT * FROM test.mixinh_parent; +ERROR: could not expand partitioned table "mixinh_child1" DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 28 other objects +NOTICE: drop cascades to 32 other objects DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman CASCADE; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 9e0c3bf2..8a97448e 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -546,6 +546,14 @@ ORDER BY partition; DROP TABLE test.provided_part_names CASCADE; +/* test preventing of double expand of inherited tables */ +CREATE TABLE test.mixinh_parent (id INT PRIMARY KEY); +CREATE TABLE test.mixinh_child1 () INHERITS (test.mixinh_parent); +SELECT create_range_partitions('test.mixinh_child1', 'id', 1, 10, 1); +INSERT INTO test.mixinh_child1 VALUES (1); +SELECT * FROM test.mixinh_child1; +SELECT * FROM test.mixinh_parent; + DROP SCHEMA test CASCADE; DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman CASCADE; diff --git a/src/hooks.c b/src/hooks.c index 4086fabd..fcaab6df 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -406,10 +406,16 @@ pathman_rel_pathlist_hook(PlannerInfo *root, * and added its children to the plan. */ if (appinfo->child_relid == rti && - child_oid == parent_oid && OidIsValid(appinfo->parent_reloid)) { - goto cleanup; + if (child_oid == parent_oid) + goto cleanup; + else if (!has_pathman_relation_info(parent_oid)) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("could not expand partitioned table \"%s\"", + get_rel_name(child_oid)), + errhint("Do not use inheritance and pg_pathman partitions together"))); } } } From 722db19a9ff5f58abb7b7394eaa58c3c00a06f2e Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Wed, 2 Oct 2019 14:39:46 +0300 Subject: [PATCH 427/528] Bump 1.5.9 lib version. --- META.json | 4 ++-- expected/pathman_calamity.out | 2 +- src/include/init.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/META.json b/META.json index 292b29db..cd55fcb4 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_pathman", "abstract": "Fast partitioning tool for PostgreSQL", "description": "pg_pathman provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.5.8", + "version": "1.5.9", "maintainer": [ "Arseny Sher " ], @@ -22,7 +22,7 @@ "pg_pathman": { "file": "pg_pathman--1.5.sql", "docfile": "README.md", - "version": "1.5.8", + "version": "1.5.9", "abstract": "Effective partitioning tool for PostgreSQL 9.5 and higher" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index b167ef3e..35d56733 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -13,7 +13,7 @@ SELECT debug_capture(); SELECT pathman_version(); pathman_version ----------------- - 1.5.8 + 1.5.9 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index fd11047d..15efae16 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -158,7 +158,7 @@ simplify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT "1.5.0" /* Current version of native C library */ -#define CURRENT_LIB_VERSION "1.5.8" +#define CURRENT_LIB_VERSION "1.5.9" void *pathman_cache_search_relid(HTAB *cache_table, From 27ba5db0692a8bb09f97e9b666f4631b4d5f7d4e Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Fri, 11 Oct 2019 11:17:58 +0200 Subject: [PATCH 428/528] README: remove obsolete emails --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 4fb8e5ac..03b64815 100644 --- a/README.md +++ b/README.md @@ -776,8 +776,8 @@ All sections and data will remain unchanged and will be handled by the standard Do not hesitate to post your issues, questions and new ideas at the [issues](https://github.com/postgrespro/pg_pathman/issues) page. ## Authors -Ildar Musin Postgres Professional Ltd., Russia +[Ildar Musin](https://github.com/zilder) Alexander Korotkov Postgres Professional Ltd., Russia -Dmitry Ivanov Postgres Professional Ltd., Russia +[Dmitry Ivanov](https://github.com/funbringer) Maksim Milyutin Postgres Professional Ltd., Russia -Ildus Kurbangaliev Postgres Professional Ltd., Russia +[Ildus Kurbangaliev](https://github.com/ildus) From 3faa7ca42063f760dbb7c4071d24b90039d8220e Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 18 Nov 2019 22:52:17 +0300 Subject: [PATCH 429/528] Deprecation note. --- README.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 03b64815..b49c20ec 100644 --- a/README.md +++ b/README.md @@ -3,14 +3,18 @@ [![codecov](https://codecov.io/gh/postgrespro/pg_pathman/branch/master/graph/badge.svg)](https://codecov.io/gh/postgrespro/pg_pathman) [![GitHub license](https://img.shields.io/badge/license-PostgreSQL-blue.svg)](https://raw.githubusercontent.com/postgrespro/pg_pathman/master/LICENSE) +### NOTE: this project is not under development anymore + +`pg_pathman` supports Postgres versions [9.5..12], but most probably it won't be ported to 13 and later releases. [Native partitioning](https://www.postgresql.org/docs/current/ddl-partitioning.html) is pretty mature now and has almost everything implemented in `pg_pathman`'; we encourage users switching to it. We are still maintaining the project (fixing bugs in supported versions), but no new development is going to happen here. + # pg_pathman The `pg_pathman` module provides optimized partitioning mechanism and functions to manage partitions. The extension is compatible with: - * PostgreSQL 9.5, 9.6, 10, 11; - * Postgres Pro Standard 9.5, 9.6, 10; + * PostgreSQL 9.5, 9.6, 10, 11, 12; + * Postgres Pro Standard 9.5, 9.6, 10, 11, 12; * Postgres Pro Enterprise; Take a look at our Wiki [out there](https://github.com/postgrespro/pg_pathman/wiki). From 08113c98bee6da0173f5117a86b34aa761606963 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Thu, 14 Nov 2019 18:32:08 +0300 Subject: [PATCH 430/528] Port to 12. Notable/non-trivial issues: - Dealing with new tableam and abstracted slots while supporting 9.5 is quite hairy. Probably deserves some refactoring. - nodeModifyTable decides to convert received from PartitionFilter tuple, if its tts_ops is not one native of table's am (BufferHeapTupleTableSlot, essentially). That might sound sane, but nodeModifyTable doesn't know anything about our parent->child attr mapping and has only parent's tupledesc. Thus we end up with tupledesc not matching actual tuple. To prevent that, always create BufferHeapTupleTableSlot, which (fortunately and weirdly) can easily store virtual tuple as well as materialized one. (vanilla partitioning does mapping *after* making sure tts_ops is ok) - For some reason which is not clear to me, nodeCustom promises that its tts_ops is fixed TTSOpsVirtual. RuntimeAppend doesn't think so, however. It easily passed BufferHeapTupleSlot up there is no projection, which is fine. That's changed by converting to slot to virtual one even in this case to keep the promise. - append_rte_to_estate: for efficiency 12 introduced estate->es_range_table_array mirroring es_range_table. Further, relcache management in executor was centralized, and now rri's relcache entries get into es_relations, where ExecEndPlan finds them to close. So we also fill both arrays. - Something like core's 4b40e4: now hashtext wants to know collation. We never recorded it, so just pass default one. - Two things led to massive duplication of test outputs: - Append nodes with single subplan are eliminated now. - CTEs are no longer optimization fences by default. --- expected/pathman_array_qual.out | 4 + expected/pathman_array_qual_1.out | 2397 +++++++++++++++++++++++ expected/pathman_basic.out | 5 + expected/pathman_basic_1.out | 249 ++- expected/pathman_calamity.out | 7 + expected/pathman_calamity_1.out | 1061 ++++++++++ expected/pathman_check.out | 0 expected/pathman_cte.out | 10 +- expected/pathman_cte_1.out | 265 +++ expected/pathman_expressions.out | 3 + expected/pathman_expressions_1.out | 3 + expected/pathman_expressions_2.out | 430 ++++ expected/pathman_gaps.out | 4 + expected/pathman_gaps_1.out | 812 ++++++++ expected/pathman_join_clause.out | 4 + expected/pathman_join_clause_1.out | 176 ++ expected/pathman_only.out | 5 + expected/pathman_only_1.out | 277 +++ expected/pathman_rowmarks.out | 3 + expected/pathman_rowmarks_1.out | 3 + expected/pathman_rowmarks_2.out | 387 ++++ expected/pathman_subpartitions.out | 4 + expected/pathman_subpartitions_1.out | 460 +++++ expected/pathman_upd_del.out | 7 + expected/pathman_upd_del_1.out | 7 + expected/pathman_upd_del_2.out | 458 +++++ expected/pathman_views.out | 3 + expected/pathman_views_1.out | 3 + expected/pathman_views_2.out | 188 ++ sql/pathman_array_qual.sql | 5 + sql/pathman_basic.sql | 6 + sql/pathman_calamity.sql | 8 + sql/pathman_cte.sql | 14 +- sql/pathman_expressions.sql | 3 + sql/pathman_gaps.sql | 4 + sql/pathman_join_clause.sql | 5 +- sql/pathman_only.sql | 5 + sql/pathman_rowmarks.sql | 3 + sql/pathman_subpartitions.sql | 5 + sql/pathman_upd_del.sql | 7 + sql/pathman_views.sql | 3 + src/compat/pg_compat.c | 12 + src/hooks.c | 4 + src/include/compat/pg_compat.h | 133 +- src/include/compat/rowmarks_fix.h | 4 + src/include/partition_filter.h | 1 - src/include/planner_tree_modification.h | 2 +- src/init.c | 34 +- src/nodes_common.c | 22 +- src/partition_creation.c | 91 +- src/partition_filter.c | 79 +- src/partition_router.c | 155 +- src/pathman_workers.c | 2 +- src/pg_pathman.c | 71 +- src/pl_funcs.c | 28 +- src/pl_range_funcs.c | 10 +- src/planner_tree_modification.c | 3 + src/relation_info.c | 8 + src/runtime_merge_append.c | 6 +- src/utility_stmt_hooking.c | 57 +- 60 files changed, 7783 insertions(+), 242 deletions(-) create mode 100644 expected/pathman_array_qual_1.out create mode 100644 expected/pathman_calamity_1.out create mode 100644 expected/pathman_check.out create mode 100644 expected/pathman_cte_1.out create mode 100644 expected/pathman_expressions_2.out create mode 100644 expected/pathman_gaps_1.out create mode 100644 expected/pathman_join_clause_1.out create mode 100644 expected/pathman_only_1.out create mode 100644 expected/pathman_rowmarks_2.out create mode 100644 expected/pathman_subpartitions_1.out create mode 100644 expected/pathman_upd_del_2.out create mode 100644 expected/pathman_views_2.out diff --git a/expected/pathman_array_qual.out b/expected/pathman_array_qual.out index 36ec268d..49dca03a 100644 --- a/expected/pathman_array_qual.out +++ b/expected/pathman_array_qual.out @@ -1,3 +1,7 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. + */ \set VERBOSITY terse SET search_path = 'public'; CREATE EXTENSION pg_pathman; diff --git a/expected/pathman_array_qual_1.out b/expected/pathman_array_qual_1.out new file mode 100644 index 00000000..6c8def94 --- /dev/null +++ b/expected/pathman_array_qual_1.out @@ -0,0 +1,2397 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA array_qual; +CREATE TABLE array_qual.test(val TEXT NOT NULL); +CREATE SEQUENCE array_qual.test_seq; +SELECT add_to_pathman_config('array_qual.test', 'val', NULL); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT add_range_partition('array_qual.test', 'a'::TEXT, 'b'); + add_range_partition +--------------------- + array_qual.test_1 +(1 row) + +SELECT add_range_partition('array_qual.test', 'b'::TEXT, 'c'); + add_range_partition +--------------------- + array_qual.test_2 +(1 row) + +SELECT add_range_partition('array_qual.test', 'c'::TEXT, 'd'); + add_range_partition +--------------------- + array_qual.test_3 +(1 row) + +SELECT add_range_partition('array_qual.test', 'd'::TEXT, 'e'); + add_range_partition +--------------------- + array_qual.test_4 +(1 row) + +INSERT INTO array_qual.test VALUES ('aaaa'); +INSERT INTO array_qual.test VALUES ('bbbb'); +INSERT INTO array_qual.test VALUES ('cccc'); +ANALYZE; +/* + * Test expr op ANY (...) + */ +/* matching collations */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val < ANY (array['a', 'b']); + QUERY PLAN +-------------------- + Seq Scan on test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val < ANY (array['a', 'z']); + QUERY PLAN +-------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 +(5 rows) + +/* different collations */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val COLLATE "POSIX" < ANY (array['a', 'b']); + QUERY PLAN +----------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_2 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_3 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_4 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val < ANY (array['a', 'b' COLLATE "POSIX"]); + QUERY PLAN +--------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (val < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_2 + Filter: (val < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_3 + Filter: (val < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_4 + Filter: (val < 'b'::text COLLATE "POSIX") +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val COLLATE "C" < ANY (array['a', 'b' COLLATE "POSIX"]); +ERROR: collation mismatch between explicit collations "C" and "POSIX" at character 95 +/* different collations (pruning should work) */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val COLLATE "POSIX" = ANY (array['a', 'b']); + QUERY PLAN +------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: ((val)::text = ANY ('{a,b}'::text[])) + -> Seq Scan on test_2 + Filter: ((val)::text = ANY ('{a,b}'::text[])) +(5 rows) + +/* non-btree operator */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val ~~ ANY (array['a', 'b']); + QUERY PLAN +------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (val ~~ ANY ('{a,b}'::text[])) + -> Seq Scan on test_2 + Filter: (val ~~ ANY ('{a,b}'::text[])) + -> Seq Scan on test_3 + Filter: (val ~~ ANY ('{a,b}'::text[])) + -> Seq Scan on test_4 + Filter: (val ~~ ANY ('{a,b}'::text[])) +(9 rows) + +DROP TABLE array_qual.test CASCADE; +NOTICE: drop cascades to 5 other objects +CREATE TABLE array_qual.test(a INT4 NOT NULL, b INT4); +SELECT create_range_partitions('array_qual.test', 'a', 1, 100, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +INSERT INTO array_qual.test SELECT i, i FROM generate_series(1, 1000) g(i); +ANALYZE; +/* + * Test expr IN (...) + */ +/* a IN (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (1, 2, 3, 4); + QUERY PLAN +---------------------------------------------- + Seq Scan on test_1 + Filter: (a = ANY ('{1,2,3,4}'::integer[])) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (100, 200, 300, 400); + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (-100, 100); + QUERY PLAN +----------------------------------------------- + Seq Scan on test_1 + Filter: (a = ANY ('{-100,100}'::integer[])) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (-100, -200, -300); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (-100, -200, -300, NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (NULL, NULL, NULL, NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +/* b IN (...) - pruning should not work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (1, 2, 3, 4); + QUERY PLAN +---------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (100, 200, 300, 400); + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (-100, 100); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{-100,100}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (-100, -200, -300); + QUERY PLAN +----------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (-100, -200, -300, NULL); + QUERY PLAN +---------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (NULL, NULL, NULL, NULL); + QUERY PLAN +---------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) +(21 rows) + +/* + * Test expr = ANY (...) + */ +/* a = ANY (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[100, 100]); + QUERY PLAN +---------------------------------------------- + Seq Scan on test_1 + Filter: (a = ANY ('{100,100}'::integer[])) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[100, 200, 300, 400]); + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100, 200], array[300, 400]]); + QUERY PLAN +---------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{{100,200},{300,400}}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{{100,200},{300,400}}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{{100,200},{300,400}}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ANY ('{{100,200},{300,400}}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100, 200], array[300, 400], array[NULL, NULL]::int4[]]); + QUERY PLAN +---------------------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{{100,200},{300,400},{NULL,NULL}}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{{100,200},{300,400},{NULL,NULL}}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{{100,200},{300,400},{NULL,NULL}}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ANY ('{{100,200},{300,400},{NULL,NULL}}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100, 200], array[300, NULL]]); + QUERY PLAN +----------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{{100,200},{300,NULL}}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{{100,200},{300,NULL}}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{{100,200},{300,NULL}}'::integer[])) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +/* + * Test expr = ALL (...) + */ +/* a = ALL (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[]::int4[]); + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_5 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_6 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_7 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_8 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_9 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_10 + Filter: (a = ALL ('{}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[100, 100]); + QUERY PLAN +---------------------------------------------- + Seq Scan on test_1 + Filter: (a = ALL ('{100,100}'::integer[])) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[100, 200, 300, 400]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[array[100, 200], array[300, 400]]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[array[100, 200], array[300, 400], array[NULL, NULL]::int4[]]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[array[100, 200], array[300, NULL]]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +/* + * Test expr < ANY (...) + */ +/* a < ANY (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[100, 100]); + QUERY PLAN +--------------------- + Seq Scan on test_1 + Filter: (a < 100) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[99, 100, 101]); + QUERY PLAN +-------------------- + Seq Scan on test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[500, 550]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + Filter: (a < 550) +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[100, 700]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + Filter: (a < 700) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[NULL, 700]); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + Filter: (a < ANY ('{NULL,700}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SELECT count(*) FROM array_qual.test WHERE a < ANY (array[NULL, 700]); + count +------- + 699 +(1 row) + +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT count(*) FROM array_qual.test WHERE a < ANY (array[NULL, 700]); + count +------- + 699 +(1 row) + +/* + * Test expr < ALL (...) + */ +/* a < ALL (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[]::int4[]); + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_2 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_3 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_4 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_5 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_6 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_7 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_8 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_9 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_10 + Filter: (a < ALL ('{}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[100, 100]); + QUERY PLAN +--------------------- + Seq Scan on test_1 + Filter: (a < 100) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[99, 100, 101]); + QUERY PLAN +-------------------- + Seq Scan on test_1 + Filter: (a < 99) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[500, 550]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + Filter: (a < 500) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[100, 700]); + QUERY PLAN +--------------------- + Seq Scan on test_1 + Filter: (a < 100) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[NULL, 700]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SELECT count(*) FROM array_qual.test WHERE a < ALL (array[NULL, 700]); + count +------- + 0 +(1 row) + +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT count(*) FROM array_qual.test WHERE a < ALL (array[NULL, 700]); + count +------- + 0 +(1 row) + +/* + * Test expr > ANY (...) + */ +/* a > ANY (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[100, 100]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 100) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[99, 100, 101]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 99) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[500, 550]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_5 + Filter: (a > 500) + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[100, 700]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 100) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[NULL, 700]); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_7 + Filter: (a > ANY ('{NULL,700}'::integer[])) + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SELECT count(*) FROM array_qual.test WHERE a > ANY (array[NULL, 700]); + count +------- + 300 +(1 row) + +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT count(*) FROM array_qual.test WHERE a > ANY (array[NULL, 700]); + count +------- + 300 +(1 row) + +/* + * Test expr > ALL (...) + */ +/* a > ALL (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[]::int4[]); + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_2 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_3 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_4 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_5 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_6 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_7 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_8 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_9 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_10 + Filter: (a > ALL ('{}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[100, 100]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 100) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[99, 100, 101]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_2 + Filter: (a > 101) + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(11 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[500, 550]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 550) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[100, 700]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_7 + Filter: (a > 700) + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[NULL, 700]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SELECT count(*) FROM array_qual.test WHERE a > ALL (array[NULL, 700]); + count +------- + 0 +(1 row) + +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT count(*) FROM array_qual.test WHERE a > ALL (array[NULL, 700]); + count +------- + 0 +(1 row) + +/* + * Test expr > ANY (... $1 ...) + */ +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ANY (array[$1, 100, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) +(22 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) +(22 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ANY (array[100, 600, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[100, 600, $1])) +(22 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[100, 600, $1])) +(22 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ANY (array[NULL, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) +(22 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) +(22 rows) + +EXECUTE q(NULL); + a | b +---+--- +(0 rows) + +DEALLOCATE q; +/* + * Test expr > ALL (... $1 ...) + */ +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, 1000000, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, NULL, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[NULL, $1, NULL]); +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXECUTE q(NULL); + a | b +---+--- +(0 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, 100, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) +(12 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[100, $1, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[100, $1, 600])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[100, $1, 600])) +(12 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[100, 600, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[100, 600, $1])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[100, 600, $1])) +(12 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[array[100, NULL], array[1, $1]]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[array[100, 600], array[1, $1]]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(999); + QUERY PLAN +-------------------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) +(12 rows) + +/* check query plan: EXECUTE q(999) */ +DO language plpgsql +$$ + DECLARE + query text; + result jsonb; + num int; + + BEGIN + query := 'EXECUTE q(999)'; + + EXECUTE format('EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, FORMAT JSON) %s', query) + INTO result; + + SELECT count(*) FROM jsonb_array_elements_text(result->0->'Plan'->'Plans') INTO num; + + RAISE notice '%: number of partitions: %', query, num; + END +$$; +NOTICE: EXECUTE q(999): number of partitions: 5 +DEALLOCATE q; +PREPARE q(int4[]) AS SELECT * FROM array_qual.test WHERE a > ALL (array[array[100, 600], $1]); +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +---------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +---------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 999}'); + QUERY PLAN +---------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) +(12 rows) + +/* check query plan: EXECUTE q('{1, 999}') */ +DO language plpgsql +$$ + DECLARE + query text; + result jsonb; + num int; + + BEGIN + query := 'EXECUTE q(''{1, 999}'')'; + + EXECUTE format('EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, FORMAT JSON) %s', query) + INTO result; + + SELECT count(*) FROM jsonb_array_elements_text(result->0->'Plan'->'Plans') INTO num; + + RAISE notice '%: number of partitions: %', query, num; + END +$$; +NOTICE: EXECUTE q('{1, 999}'): number of partitions: 1 +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, 898]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_9 + Filter: (a > 898) + -> Seq Scan on test_10 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_9 + Filter: (a > 898) + -> Seq Scan on test_10 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_9 + Filter: (a > 898) + -> Seq Scan on test_10 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_9 + Filter: (a > 898) + -> Seq Scan on test_10 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_9 + Filter: (a > 898) + -> Seq Scan on test_10 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 898])) +(6 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 898])) +(6 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(900); /* check quals optimization */ + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 898])) +(6 rows) + +EXECUTE q(1000); + a | b +---+--- +(0 rows) + +/* check query plan: EXECUTE q(999) */ +DO language plpgsql +$$ + DECLARE + query text; + result jsonb; + num int; + + BEGIN + query := 'EXECUTE q(999)'; + + EXECUTE format('EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, FORMAT JSON) %s', query) + INTO result; + + SELECT count(*) FROM jsonb_array_elements_text(result->0->'Plan'->'Plans') INTO num; + + RAISE notice '%: number of partitions: %', query, num; + END +$$; +NOTICE: EXECUTE q(999): number of partitions: 1 +DEALLOCATE q; +/* + * Test expr = ALL (... $1 ...) + */ +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a = ALL (array[$1, 100, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a = ALL (array[100, 600, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a = ALL (array[100, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a = ALL (ARRAY[100, $1])) + -> Seq Scan on test_1 test + Filter: (a = ALL (ARRAY[100, $1])) +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a = ALL (ARRAY[100, $1])) + -> Seq Scan on test_1 test + Filter: (a = ALL (ARRAY[100, $1])) +(4 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(100); + a | b +-----+----- + 100 | 100 +(1 row) + +DEALLOCATE q; +DROP SCHEMA array_qual CASCADE; +NOTICE: drop cascades to 12 other objects +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 3baf2989..aa5b5ab6 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1,3 +1,8 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. Also, EXPLAIN now always shows key first in quals + * ('test commutator' queries). + */ \set VERBOSITY terse SET search_path = 'public'; CREATE SCHEMA pathman; diff --git a/expected/pathman_basic_1.out b/expected/pathman_basic_1.out index 61aed5db..d1403c77 100644 --- a/expected/pathman_basic_1.out +++ b/expected/pathman_basic_1.out @@ -1,3 +1,8 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. Also, EXPLAIN now always shows key first in quals + * ('test commutator' queries). + */ \set VERBOSITY terse SET search_path = 'public'; CREATE SCHEMA pathman; @@ -251,12 +256,11 @@ SELECT pathman.set_enable_parent('test.improved_dummy', false); /* disable paren ALTER TABLE test.improved_dummy_1 ADD CHECK (name != 'ib'); /* make test.improved_dummy_1 disappear */ EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; - QUERY PLAN -------------------------------------- - Append - -> Seq Scan on improved_dummy_11 - Filter: (id = 101) -(3 rows) + QUERY PLAN +------------------------------- + Seq Scan on improved_dummy_11 + Filter: (id = 101) +(2 rows) SELECT pathman.set_enable_parent('test.improved_dummy', true); /* enable parent */ set_enable_parent @@ -434,20 +438,18 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = NULL; (2 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; - QUERY PLAN ------------------------------- - Append - -> Seq Scan on hash_rel_1 - Filter: (value = 2) -(3 rows) + QUERY PLAN +------------------------ + Seq Scan on hash_rel_1 + Filter: (value = 2) +(2 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE 2 = value; /* test commutator */ - QUERY PLAN ------------------------------- - Append - -> Seq Scan on hash_rel_1 - Filter: (2 = value) -(3 rows) + QUERY PLAN +------------------------ + Seq Scan on hash_rel_1 + Filter: (2 = value) +(2 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; QUERY PLAN @@ -460,12 +462,11 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; (5 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 = id; /* test commutator */ - QUERY PLAN ------------------------------------ - Append - -> Seq Scan on num_range_rel_3 - Filter: (2500 = id) -(3 rows) + QUERY PLAN +----------------------------- + Seq Scan on num_range_rel_3 + Filter: (2500 = id) +(2 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 < id; /* test commutator */ QUERY PLAN @@ -537,11 +538,10 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE '2015-02-15' < dt; /* tes (5 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-01'; - QUERY PLAN -------------------------------- - Append - -> Seq Scan on range_rel_2 -(2 rows) + QUERY PLAN +------------------------- + Seq Scan on range_rel_2 +(1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-15' AND dt < '2015-03-15'; QUERY PLAN @@ -593,20 +593,18 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = NULL; (2 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; - QUERY PLAN ------------------------------- - Append - -> Seq Scan on hash_rel_1 - Filter: (value = 2) -(3 rows) + QUERY PLAN +------------------------ + Seq Scan on hash_rel_1 + Filter: (value = 2) +(2 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE 2 = value; /* test commutator */ - QUERY PLAN ------------------------------- - Append - -> Seq Scan on hash_rel_1 - Filter: (2 = value) -(3 rows) + QUERY PLAN +------------------------ + Seq Scan on hash_rel_1 + Filter: (2 = value) +(2 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; QUERY PLAN @@ -619,19 +617,18 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; (5 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 = id; /* test commutator */ - QUERY PLAN ----------------------------------------------------------------- - Append - -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 - Index Cond: (2500 = id) -(3 rows) + QUERY PLAN +---------------------------------------------------------- + Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (id = 2500) +(2 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 < id; /* test commutator */ QUERY PLAN ---------------------------------------------------------------- Append -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 - Index Cond: (2500 < id) + Index Cond: (id > 2500) -> Seq Scan on num_range_rel_4 (4 rows) @@ -710,17 +707,16 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE '2015-02-15' < dt; /* tes ------------------------------------------------------------------------------------ Append -> Index Scan using range_rel_2_dt_idx on range_rel_2 - Index Cond: ('Sun Feb 15 00:00:00 2015'::timestamp without time zone < dt) + Index Cond: (dt > 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) -> Seq Scan on range_rel_3 -> Seq Scan on range_rel_4 (5 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-01'; - QUERY PLAN -------------------------------- - Append - -> Seq Scan on range_rel_2 -(2 rows) + QUERY PLAN +------------------------- + Seq Scan on range_rel_2 +(1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-15' AND dt < '2015-03-15'; QUERY PLAN @@ -810,41 +806,6 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel_1 UNION ALL SELECT * FROM test. -> Index Scan using range_rel_2_dt_idx on range_rel_2 (4 rows) -/* - * Join - */ -set enable_nestloop = OFF; -SET enable_hashjoin = ON; -SET enable_mergejoin = OFF; -EXPLAIN (COSTS OFF) -SELECT * FROM test.range_rel j1 -JOIN test.range_rel j2 on j2.id = j1.id -JOIN test.num_range_rel j3 on j3.id = j1.id -WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; - QUERY PLAN ---------------------------------------------------------------------------------------- - Sort - Sort Key: j2.dt - -> Hash Join - Hash Cond: (j1.id = j2.id) - -> Hash Join - Hash Cond: (j3.id = j1.id) - -> Append - -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3 - -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_1 - -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_2 - -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_3 - -> Hash - -> Append - -> Index Scan using range_rel_1_pkey on range_rel_1 j1 - -> Index Scan using range_rel_2_pkey on range_rel_2 j1_1 - -> Hash - -> Append - -> Index Scan using range_rel_2_dt_idx on range_rel_2 j2 - -> Index Scan using range_rel_3_dt_idx on range_rel_3 j2_1 - -> Index Scan using range_rel_4_dt_idx on range_rel_4 j2_2 -(20 rows) - /* * Test inlined SQL functions */ @@ -859,22 +820,20 @@ CREATE OR REPLACE FUNCTION test.sql_inline_func(i_id int) RETURNS SETOF INT AS $ select * from test.sql_inline where id = i_id limit 1; $$ LANGUAGE sql STABLE; EXPLAIN (COSTS OFF) SELECT * FROM test.sql_inline_func(5); - QUERY PLAN --------------------------------------- + QUERY PLAN +-------------------------------- Limit - -> Append - -> Seq Scan on sql_inline_0 - Filter: (id = 5) -(4 rows) + -> Seq Scan on sql_inline_0 + Filter: (id = 5) +(3 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.sql_inline_func(1); - QUERY PLAN --------------------------------------- + QUERY PLAN +-------------------------------- Limit - -> Append - -> Seq Scan on sql_inline_2 - Filter: (id = 1) -(4 rows) + -> Seq Scan on sql_inline_2 + Filter: (id = 1) +(3 rows) DROP FUNCTION test.sql_inline_func(int); DROP TABLE test.sql_inline CASCADE; @@ -945,12 +904,11 @@ SELECT pathman.merge_range_partitions('test.num_range_rel_1', 'test.num_range_re (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 700; - QUERY PLAN ----------------------------------------------------------------- - Append - -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 - Index Cond: ((id >= 100) AND (id <= 700)) -(3 rows) + QUERY PLAN +---------------------------------------------------------- + Index Scan using num_range_rel_1_pkey on num_range_rel_1 + Index Cond: ((id >= 100) AND (id <= 700)) +(2 rows) SELECT pathman.merge_range_partitions('test.range_rel_1', 'test.range_rel_' || currval('test.range_rel_seq')); merge_range_partitions @@ -966,11 +924,10 @@ SELECT pathman.append_range_partition('test.num_range_rel'); (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 4000; - QUERY PLAN ------------------------------------ - Append - -> Seq Scan on num_range_rel_6 -(2 rows) + QUERY PLAN +----------------------------- + Seq Scan on num_range_rel_6 +(1 row) SELECT pathman.prepend_range_partition('test.num_range_rel'); prepend_range_partition @@ -979,11 +936,10 @@ SELECT pathman.prepend_range_partition('test.num_range_rel'); (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id < 0; - QUERY PLAN ------------------------------------ - Append - -> Seq Scan on num_range_rel_7 -(2 rows) + QUERY PLAN +----------------------------- + Seq Scan on num_range_rel_7 +(1 row) SELECT pathman.drop_range_partition('test.num_range_rel_7'); drop_range_partition @@ -1049,12 +1005,11 @@ SELECT pathman.drop_range_partition('test.range_rel_7'); (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-12-15' AND '2015-01-15'; - QUERY PLAN -------------------------------------------------------------------------------------- - Append - -> Index Scan using range_rel_1_dt_idx on range_rel_1 - Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) -(3 rows) + QUERY PLAN +------------------------------------------------------------------------------- + Index Scan using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(2 rows) SELECT pathman.add_range_partition('test.range_rel', '2014-12-01'::DATE, '2015-01-02'::DATE); ERROR: specified range [12-01-2014, 01-02-2015) overlaps with existing partitions @@ -1347,7 +1302,7 @@ DROP TABLE test.num_range_rel CASCADE; DROP TABLE test.range_rel CASCADE; NOTICE: drop cascades to 10 other objects /* Test attributes copying */ -CREATE UNLOGGED TABLE test.range_rel ( +CREATE TABLE test.range_rel ( id SERIAL PRIMARY KEY, dt DATE NOT NULL) WITH (fillfactor = 70); @@ -1363,13 +1318,13 @@ SELECT pathman.create_range_partitions('test.range_rel', 'dt', SELECT reloptions, relpersistence FROM pg_class WHERE oid='test.range_rel'::REGCLASS; reloptions | relpersistence -----------------+---------------- - {fillfactor=70} | u + {fillfactor=70} | p (1 row) SELECT reloptions, relpersistence FROM pg_class WHERE oid='test.range_rel_1'::REGCLASS; reloptions | relpersistence -----------------+---------------- - {fillfactor=70} | u + {fillfactor=70} | p (1 row) DROP TABLE test.range_rel CASCADE; @@ -1390,12 +1345,11 @@ SELECT generate_series('2015-01-01', '2015-04-30', '1 day'::interval); INSERT INTO test.range_rel (dt) SELECT generate_series('2014-12-31', '2014-12-01', '-1 day'::interval); EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt = '2014-12-15'; - QUERY PLAN --------------------------------------------------------------------------------- - Append - -> Seq Scan on range_rel_14 - Filter: (dt = 'Mon Dec 15 00:00:00 2014'::timestamp without time zone) -(3 rows) + QUERY PLAN +-------------------------------------------------------------------------- + Seq Scan on range_rel_14 + Filter: (dt = 'Mon Dec 15 00:00:00 2014'::timestamp without time zone) +(2 rows) SELECT * FROM test.range_rel WHERE dt = '2014-12-15'; id | dt | data @@ -1404,12 +1358,11 @@ SELECT * FROM test.range_rel WHERE dt = '2014-12-15'; (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt = '2015-03-15'; - QUERY PLAN --------------------------------------------------------------------------------- - Append - -> Seq Scan on range_rel_8 - Filter: (dt = 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) -(3 rows) + QUERY PLAN +-------------------------------------------------------------------------- + Seq Scan on range_rel_8 + Filter: (dt = 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) +(2 rows) SELECT * FROM test.range_rel WHERE dt = '2015-03-15'; id | dt | data @@ -1839,7 +1792,25 @@ ORDER BY partition; DROP TABLE test.provided_part_names CASCADE; NOTICE: drop cascades to 2 other objects +/* test preventing of double expand of inherited tables */ +CREATE TABLE test.mixinh_parent (id INT PRIMARY KEY); +CREATE TABLE test.mixinh_child1 () INHERITS (test.mixinh_parent); +SELECT create_range_partitions('test.mixinh_child1', 'id', 1, 10, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +INSERT INTO test.mixinh_child1 VALUES (1); +SELECT * FROM test.mixinh_child1; + id +---- + 1 +(1 row) + +SELECT * FROM test.mixinh_parent; +ERROR: could not expand partitioned table "mixinh_child1" DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 28 other objects +NOTICE: drop cascades to 32 other objects DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman CASCADE; diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 35d56733..c258b5cc 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -1,3 +1,10 @@ +/* + * pathman_calamity.out and pathman_calamity_1.out differ only in that since + * 12 we get + * ERROR: invalid input syntax for type integer: "abc" + * instead of + * ERROR: invalid input syntax for integer: "15.6" + */ \set VERBOSITY terse SET search_path = 'public'; CREATE EXTENSION pg_pathman; diff --git a/expected/pathman_calamity_1.out b/expected/pathman_calamity_1.out new file mode 100644 index 00000000..ee422784 --- /dev/null +++ b/expected/pathman_calamity_1.out @@ -0,0 +1,1061 @@ +/* + * pathman_calamity.out and pathman_calamity_1.out differ only in that since + * 12 we get + * ERROR: invalid input syntax for type integer: "abc" + * instead of + * ERROR: invalid input syntax for integer: "15.6" + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA calamity; +/* call for coverage test */ +set client_min_messages = ERROR; +SELECT debug_capture(); + debug_capture +--------------- + +(1 row) + +SELECT pathman_version(); + pathman_version +----------------- + 1.5.9 +(1 row) + +set client_min_messages = NOTICE; +/* create table to be partitioned */ +CREATE TABLE calamity.part_test(val serial); +/* test pg_pathman's cache */ +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 + drop_partitions +----------------- + 3 +(1 row) + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 + drop_partitions +----------------- + 3 +(1 row) + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT append_range_partition('calamity.part_test'); + append_range_partition +------------------------ + calamity.part_test_4 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 +NOTICE: 0 rows copied from calamity.part_test_4 + drop_partitions +----------------- + 4 +(1 row) + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT append_range_partition('calamity.part_test'); + append_range_partition +------------------------ + calamity.part_test_4 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 +NOTICE: 0 rows copied from calamity.part_test_4 + drop_partitions +----------------- + 4 +(1 row) + +SELECT count(*) FROM calamity.part_test; + count +------- + 30 +(1 row) + +DELETE FROM calamity.part_test; +/* test function create_single_range_partition() */ +SELECT create_single_range_partition(NULL, NULL::INT4, NULL); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT create_single_range_partition('pg_class', NULL::INT4, NULL); /* not ok */ +ERROR: table "pg_class" is not partitioned by RANGE +SELECT add_to_pathman_config('calamity.part_test', 'val'); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT create_single_range_partition('calamity.part_test', NULL::INT4, NULL); /* not ok */ +ERROR: table "part_test" is not partitioned by RANGE +DELETE FROM pathman_config WHERE partrel = 'calamity.part_test'::REGCLASS; +/* test function create_range_partitions_internal() */ +SELECT create_range_partitions_internal(NULL, '{}'::INT[], NULL, NULL); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT create_range_partitions_internal('calamity.part_test', + NULL::INT[], NULL, NULL); /* not ok */ +ERROR: 'bounds' should not be NULL +SELECT create_range_partitions_internal('calamity.part_test', '{1}'::INT[], + '{part_1}'::TEXT[], NULL); /* not ok */ +ERROR: wrong length of 'partition_names' array +SELECT create_range_partitions_internal('calamity.part_test', '{1}'::INT[], + NULL, '{tblspc_1}'::TEXT[]); /* not ok */ +ERROR: wrong length of 'tablespaces' array +SELECT create_range_partitions_internal('calamity.part_test', + '{1, NULL}'::INT[], NULL, NULL); /* not ok */ +ERROR: only first bound can be NULL +SELECT create_range_partitions_internal('calamity.part_test', + '{2, 1}'::INT[], NULL, NULL); /* not ok */ +ERROR: 'bounds' array must be ascending +/* test function create_hash_partitions() */ +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY[]::TEXT[]); /* not ok */ +ERROR: array should not be empty +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY[ 'p1', NULL ]::TEXT[]); /* not ok */ +ERROR: array should not contain NULLs +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY[ ['p1'], ['p2'] ]::TEXT[]); /* not ok */ +ERROR: array should contain only 1 dimension +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY['calamity.p1']::TEXT[]); /* not ok */ +ERROR: size of 'partition_names' must be equal to 'partitions_count' +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + tablespaces := ARRAY['abcd']::TEXT[]); /* not ok */ +ERROR: size of 'tablespaces' must be equal to 'partitions_count' +/* test case when naming sequence does not exist */ +CREATE TABLE calamity.no_naming_seq(val INT4 NOT NULL); +SELECT add_to_pathman_config('calamity.no_naming_seq', 'val', '100'); + add_to_pathman_config +----------------------- + t +(1 row) + +select add_range_partition(' calamity.no_naming_seq', 10, 20); +ERROR: auto naming sequence "no_naming_seq_seq" does not exist +DROP TABLE calamity.no_naming_seq CASCADE; +/* test (-inf, +inf) partition creation */ +CREATE TABLE calamity.double_inf(val INT4 NOT NULL); +SELECT add_to_pathman_config('calamity.double_inf', 'val', '10'); + add_to_pathman_config +----------------------- + t +(1 row) + +select add_range_partition('calamity.double_inf', NULL::INT4, NULL::INT4, + partition_name := 'double_inf_part'); +ERROR: cannot create partition with range (-inf, +inf) +DROP TABLE calamity.double_inf CASCADE; +/* test stub 'enable_parent' value for PATHMAN_CONFIG_PARAMS */ +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +DELETE FROM pathman_config_params WHERE partrel = 'calamity.part_test'::regclass; +SELECT append_range_partition('calamity.part_test'); + append_range_partition +------------------------ + calamity.part_test_4 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_test; + QUERY PLAN +------------------------------- + Append + -> Seq Scan on part_test_1 + -> Seq Scan on part_test_2 + -> Seq Scan on part_test_3 + -> Seq Scan on part_test_4 +(5 rows) + +SELECT drop_partitions('calamity.part_test', true); + drop_partitions +----------------- + 4 +(1 row) + +DELETE FROM calamity.part_test; +/* check function validate_interval_value() */ +SELECT set_interval('pg_catalog.pg_class', 100); /* not ok */ +ERROR: table "pg_class" is not partitioned by RANGE +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT set_interval('calamity.part_test', 100); /* ok */ + set_interval +-------------- + +(1 row) + +SELECT set_interval('calamity.part_test', 15.6); /* not ok */ +ERROR: invalid input syntax for type integer: "15.6" +SELECT set_interval('calamity.part_test', 'abc'::text); /* not ok */ +ERROR: invalid input syntax for type integer: "abc" +SELECT drop_partitions('calamity.part_test', true); + drop_partitions +----------------- + 3 +(1 row) + +DELETE FROM calamity.part_test; +/* check function build_hash_condition() */ +SELECT build_hash_condition('int4', 'val', 10, 1); + build_hash_condition +------------------------------------------------- + public.get_hash_part_idx(hashint4(val), 10) = 1 +(1 row) + +SELECT build_hash_condition('text', 'val', 10, 1); + build_hash_condition +------------------------------------------------- + public.get_hash_part_idx(hashtext(val), 10) = 1 +(1 row) + +SELECT build_hash_condition('int4', 'val', 1, 1); +ERROR: 'partition_index' must be lower than 'partitions_count' +SELECT build_hash_condition('int4', 'val', 10, 20); +ERROR: 'partition_index' must be lower than 'partitions_count' +SELECT build_hash_condition('text', 'val', 10, NULL) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT build_hash_condition('calamity.part_test', 'val', 10, 1); +ERROR: no hash function for type calamity.part_test +/* check function build_range_condition() */ +SELECT build_range_condition(NULL, 'val', 10, 20); /* not ok */ +ERROR: 'partition_relid' should not be NULL +SELECT build_range_condition('calamity.part_test', NULL, 10, 20); /* not ok */ +ERROR: 'expression' should not be NULL +SELECT build_range_condition('calamity.part_test', 'val', 10, 20); /* OK */ + build_range_condition +------------------------------ + ((val >= 10) AND (val < 20)) +(1 row) + +SELECT build_range_condition('calamity.part_test', 'val', 10, NULL); /* OK */ + build_range_condition +----------------------- + ((val >= 10)) +(1 row) + +SELECT build_range_condition('calamity.part_test', 'val', NULL, 10); /* OK */ + build_range_condition +----------------------- + ((val < 10)) +(1 row) + +/* check function validate_interval_value() */ +SELECT validate_interval_value(1::REGCLASS, 'expr', 2, '1 mon'); /* not ok */ +ERROR: relation "1" does not exist +SELECT validate_interval_value(NULL, 'expr', 2, '1 mon'); /* not ok */ +ERROR: 'partrel' should not be NULL +SELECT validate_interval_value('pg_class', NULL, 2, '1 mon'); /* not ok */ +ERROR: 'expression' should not be NULL +SELECT validate_interval_value('pg_class', 'relname', NULL, '1 mon'); /* not ok */ +ERROR: 'parttype' should not be NULL +SELECT validate_interval_value('pg_class', 'relname', 1, 'HASH'); /* not ok */ +ERROR: interval should be NULL for HASH partitioned table +SELECT validate_interval_value('pg_class', 'expr', 2, '1 mon'); /* not ok */ +ERROR: failed to analyze partitioning expression "expr" +SELECT validate_interval_value('pg_class', 'expr', 2, NULL); /* not ok */ +ERROR: failed to analyze partitioning expression "expr" +SELECT validate_interval_value('pg_class', 'EXPR', 1, 'HASH'); /* not ok */ +ERROR: failed to analyze partitioning expression "EXPR" +/* check function validate_relname() */ +SELECT validate_relname('calamity.part_test'); + validate_relname +------------------ + +(1 row) + +SELECT validate_relname(1::REGCLASS); +ERROR: relation "1" does not exist +SELECT validate_relname(NULL); +ERROR: relation should not be NULL +/* check function validate_expression() */ +SELECT validate_expression(1::regclass, NULL); /* not ok */ +ERROR: relation "1" does not exist +SELECT validate_expression(NULL::regclass, NULL); /* not ok */ +ERROR: 'relid' should not be NULL +SELECT validate_expression('calamity.part_test', NULL); /* not ok */ +ERROR: 'expression' should not be NULL +SELECT validate_expression('calamity.part_test', 'valval'); /* not ok */ +ERROR: failed to analyze partitioning expression "valval" +SELECT validate_expression('calamity.part_test', 'random()'); /* not ok */ +ERROR: failed to analyze partitioning expression "random()" +SELECT validate_expression('calamity.part_test', 'val'); /* OK */ + validate_expression +--------------------- + +(1 row) + +SELECT validate_expression('calamity.part_test', 'VaL'); /* OK */ + validate_expression +--------------------- + +(1 row) + +/* check function get_number_of_partitions() */ +SELECT get_number_of_partitions('calamity.part_test'); + get_number_of_partitions +-------------------------- + 0 +(1 row) + +SELECT get_number_of_partitions(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function get_parent_of_partition() */ +SELECT get_parent_of_partition('calamity.part_test'); +ERROR: "part_test" is not a partition +SELECT get_parent_of_partition(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function get_base_type() */ +CREATE DOMAIN calamity.test_domain AS INT4; +SELECT get_base_type('int4'::regtype); + get_base_type +--------------- + integer +(1 row) + +SELECT get_base_type('calamity.test_domain'::regtype); + get_base_type +--------------- + integer +(1 row) + +SELECT get_base_type(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function get_partition_key_type() */ +SELECT get_partition_key_type('calamity.part_test'); +ERROR: relation "part_test" has no partitions +SELECT get_partition_key_type(0::regclass); +ERROR: relation "0" has no partitions +SELECT get_partition_key_type(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function build_check_constraint_name() */ +SELECT build_check_constraint_name('calamity.part_test'); /* OK */ + build_check_constraint_name +----------------------------- + pathman_part_test_check +(1 row) + +SELECT build_check_constraint_name(0::REGCLASS); /* not ok */ +ERROR: relation "0" does not exist +SELECT build_check_constraint_name(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function build_sequence_name() */ +SELECT build_sequence_name('calamity.part_test'); /* OK */ + build_sequence_name +------------------------ + calamity.part_test_seq +(1 row) + +SELECT build_sequence_name(1::REGCLASS); /* not ok */ +ERROR: relation "1" does not exist +SELECT build_sequence_name(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function partition_table_concurrently() */ +SELECT partition_table_concurrently(1::REGCLASS); /* not ok */ +ERROR: relation "1" has no partitions +SELECT partition_table_concurrently('pg_class', 0); /* not ok */ +ERROR: 'batch_size' should not be less than 1 or greater than 10000 +SELECT partition_table_concurrently('pg_class', 1, 1E-5); /* not ok */ +ERROR: 'sleep_time' should not be less than 0.5 +SELECT partition_table_concurrently('pg_class'); /* not ok */ +ERROR: relation "pg_class" has no partitions +/* check function stop_concurrent_part_task() */ +SELECT stop_concurrent_part_task(1::REGCLASS); /* not ok */ +ERROR: cannot find worker for relation "1" +/* check function drop_range_partition_expand_next() */ +SELECT drop_range_partition_expand_next('pg_class'); /* not ok */ +ERROR: relation "pg_class" is not a partition +SELECT drop_range_partition_expand_next(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function generate_range_bounds() */ +SELECT generate_range_bounds(NULL, 100, 10) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT generate_range_bounds(0, NULL::INT4, 10) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT generate_range_bounds(0, 100, NULL) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT generate_range_bounds(0, 100, 0); /* not ok */ +ERROR: 'p_count' must be greater than zero +SELECT generate_range_bounds('a'::TEXT, 'test'::TEXT, 10); /* not ok */ +ERROR: cannot find operator +(text, text) +SELECT generate_range_bounds('a'::TEXT, '1 mon'::INTERVAL, 10); /* not ok */ +ERROR: cannot find operator +(text, interval) +SELECT generate_range_bounds(0::NUMERIC, 1::NUMERIC, 10); /* OK */ + generate_range_bounds +-------------------------- + {0,1,2,3,4,5,6,7,8,9,10} +(1 row) + +SELECT generate_range_bounds('1-jan-2017'::DATE, + '1 day'::INTERVAL, + 4); /* OK */ + generate_range_bounds +---------------------------------------------------------- + {01-01-2017,01-02-2017,01-03-2017,01-04-2017,01-05-2017} +(1 row) + +SELECT check_range_available(NULL, NULL::INT4, NULL); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT check_range_available('pg_class', 1, 10); /* OK (not partitioned) */ +WARNING: table "pg_class" is not partitioned + check_range_available +----------------------- + +(1 row) + +/* check invoke_on_partition_created_callback() */ +CREATE FUNCTION calamity.dummy_cb(arg jsonb) RETURNS void AS $$ + begin + raise warning 'arg: %', arg::text; + end +$$ LANGUAGE plpgsql; +/* Invalid args */ +SELECT invoke_on_partition_created_callback(NULL, 'calamity.part_test', 1); +ERROR: 'parent_relid' should not be NULL +SELECT invoke_on_partition_created_callback('calamity.part_test', NULL, 1); +ERROR: 'partition_relid' should not be NULL +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 0); + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 1); +ERROR: callback function 1 does not exist +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', NULL); + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +/* HASH */ +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure); +WARNING: arg: {"parent": null, "parttype": "1", "partition": null, "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +/* RANGE */ +SELECT invoke_on_partition_created_callback('calamity.part_test'::regclass, 'pg_class'::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL::int, NULL); +WARNING: arg: {"parent": "part_test", "parttype": "2", "partition": "pg_class", "range_max": null, "range_min": null, "parent_schema": "calamity", "partition_schema": "pg_catalog"} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL::int, NULL); +WARNING: arg: {"parent": null, "parttype": "2", "partition": null, "range_max": null, "range_min": null, "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, 1, NULL); +WARNING: arg: {"parent": null, "parttype": "2", "partition": null, "range_max": null, "range_min": "1", "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL, 1); +WARNING: arg: {"parent": null, "parttype": "2", "partition": null, "range_max": "1", "range_min": null, "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +DROP FUNCTION calamity.dummy_cb(arg jsonb); +/* check function add_to_pathman_config() -- PHASE #1 */ +SELECT add_to_pathman_config(NULL, 'val'); /* no table */ +ERROR: 'parent_relid' should not be NULL +SELECT add_to_pathman_config(0::REGCLASS, 'val'); /* no table (oid) */ +ERROR: relation "0" does not exist +SELECT add_to_pathman_config('calamity.part_test', NULL); /* no expr */ +ERROR: 'expression' should not be NULL +SELECT add_to_pathman_config('calamity.part_test', 'V_A_L'); /* wrong expr */ +ERROR: failed to analyze partitioning expression "V_A_L" +SELECT add_to_pathman_config('calamity.part_test', 'val'); /* OK */ + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT disable_pathman_for('calamity.part_test'); + disable_pathman_for +--------------------- + +(1 row) + +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); /* OK */ + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT disable_pathman_for('calamity.part_test'); + disable_pathman_for +--------------------- + +(1 row) + +/* check function add_to_pathman_config() -- PHASE #2 */ +CREATE TABLE calamity.part_ok(val serial); +INSERT INTO calamity.part_ok SELECT generate_series(1, 2); +SELECT create_hash_partitions('calamity.part_ok', 'val', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +CREATE TABLE calamity.wrong_partition (LIKE calamity.part_test) INHERITS (calamity.part_test); /* wrong partition w\o constraints */ +NOTICE: merging column "val" with inherited definition +SELECT add_to_pathman_config('calamity.part_test', 'val'); +ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +----------------------------- + Append + -> Seq Scan on part_ok_0 + -> Seq Scan on part_ok_1 + -> Seq Scan on part_ok_2 + -> Seq Scan on part_ok_3 +(5 rows) + +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +----------------------------- + Append + -> Seq Scan on part_ok_0 + -> Seq Scan on part_ok_1 + -> Seq Scan on part_ok_2 + -> Seq Scan on part_ok_3 +(5 rows) + +ALTER TABLE calamity.wrong_partition +ADD CONSTRAINT pathman_wrong_partition_check +CHECK (val = 1 OR val = 2); /* wrong constraint */ +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +ERROR: wrong constraint format for RANGE partition "wrong_partition" +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +----------------------------- + Append + -> Seq Scan on part_ok_0 + -> Seq Scan on part_ok_1 + -> Seq Scan on part_ok_2 + -> Seq Scan on part_ok_3 +(5 rows) + +ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_check; +ALTER TABLE calamity.wrong_partition +ADD CONSTRAINT pathman_wrong_partition_check +CHECK (val >= 10 AND val = 2); /* wrong constraint */ +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +ERROR: wrong constraint format for RANGE partition "wrong_partition" +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +----------------------------- + Append + -> Seq Scan on part_ok_0 + -> Seq Scan on part_ok_1 + -> Seq Scan on part_ok_2 + -> Seq Scan on part_ok_3 +(5 rows) + +ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_check; +/* check GUC variable */ +SHOW pg_pathman.enable; + pg_pathman.enable +------------------- + on +(1 row) + +/* check function create_hash_partitions_internal() (called for the 2nd time) */ +CREATE TABLE calamity.hash_two_times(val serial); +SELECT create_hash_partitions_internal('calamity.hash_two_times', 'val', 2); +ERROR: table "hash_two_times" is not partitioned +SELECT create_hash_partitions('calamity.hash_two_times', 'val', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +SELECT create_hash_partitions_internal('calamity.hash_two_times', 'val', 2); +ERROR: cannot add new HASH partitions +/* check function disable_pathman_for() */ +CREATE TABLE calamity.to_be_disabled(val INT NOT NULL); +SELECT create_hash_partitions('calamity.to_be_disabled', 'val', 3); /* add row to main config */ + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT set_enable_parent('calamity.to_be_disabled', true); /* add row to params */ + set_enable_parent +------------------- + +(1 row) + +SELECT disable_pathman_for('calamity.to_be_disabled'); /* should delete both rows */ + disable_pathman_for +--------------------- + +(1 row) + +SELECT count(*) FROM pathman_config WHERE partrel = 'calamity.to_be_disabled'::REGCLASS; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pathman_config_params WHERE partrel = 'calamity.to_be_disabled'::REGCLASS; + count +------- + 0 +(1 row) + +/* check function get_part_range_by_idx() */ +CREATE TABLE calamity.test_range_idx(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.test_range_idx', 'val', 1, 10, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +SELECT get_part_range(NULL, 1, NULL::INT4); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT get_part_range('calamity.test_range_idx', NULL, NULL::INT4); /* not ok */ +ERROR: 'partition_idx' should not be NULL +SELECT get_part_range('calamity.test_range_idx', 0, NULL::INT2); /* not ok */ +ERROR: pg_typeof(dummy) should be integer +SELECT get_part_range('calamity.test_range_idx', -2, NULL::INT4); /* not ok */ +ERROR: negative indices other than -1 (last partition) are not allowed +SELECT get_part_range('calamity.test_range_idx', 4, NULL::INT4); /* not ok */ +ERROR: partition #4 does not exist (total amount is 1) +SELECT get_part_range('calamity.test_range_idx', 0, NULL::INT4); /* OK */ + get_part_range +---------------- + {1,11} +(1 row) + +DROP TABLE calamity.test_range_idx CASCADE; +NOTICE: drop cascades to 2 other objects +/* check function get_part_range_by_oid() */ +CREATE TABLE calamity.test_range_oid(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.test_range_oid', 'val', 1, 10, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +SELECT get_part_range(NULL, NULL::INT4); /* not ok */ +ERROR: 'partition_relid' should not be NULL +SELECT get_part_range('pg_class', NULL::INT4); /* not ok */ +ERROR: relation "pg_class" is not a partition +SELECT get_part_range('calamity.test_range_oid_1', NULL::INT2); /* not ok */ +ERROR: pg_typeof(dummy) should be integer +SELECT get_part_range('calamity.test_range_oid_1', NULL::INT4); /* OK */ + get_part_range +---------------- + {1,11} +(1 row) + +DROP TABLE calamity.test_range_oid CASCADE; +NOTICE: drop cascades to 2 other objects +/* check function merge_range_partitions() */ +SELECT merge_range_partitions('pg_class'); /* not ok */ +ERROR: cannot merge partitions +SELECT merge_range_partitions('pg_class', 'pg_inherits'); /* not ok */ +ERROR: cannot merge partitions +CREATE TABLE calamity.merge_test_a(val INT4 NOT NULL); +CREATE TABLE calamity.merge_test_b(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.merge_test_a', 'val', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_range_partitions('calamity.merge_test_b', 'val', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT merge_range_partitions('calamity.merge_test_a_1', + 'calamity.merge_test_b_1'); /* not ok */ +ERROR: cannot merge partitions +DROP TABLE calamity.merge_test_a,calamity.merge_test_b CASCADE; +NOTICE: drop cascades to 6 other objects +DROP SCHEMA calamity CASCADE; +NOTICE: drop cascades to 15 other objects +DROP EXTENSION pg_pathman; +/* + * ------------------------------- + * Special tests (SET statement) + * ------------------------------- + */ +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = false; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SET pg_pathman.enable = true; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SET pg_pathman.enable = false; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +RESET pg_pathman.enable; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +RESET ALL; +BEGIN; ROLLBACK; +BEGIN ISOLATION LEVEL SERIALIZABLE; ROLLBACK; +BEGIN; SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; ROLLBACK; +DROP EXTENSION pg_pathman; +/* + * ------------------------------------- + * Special tests (pathman_cache_stats) + * ------------------------------------- + */ +CREATE SCHEMA calamity; +CREATE EXTENSION pg_pathman; +/* check that cache loading is lazy */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 + partition status cache | 2 +(4 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 11 other objects +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 + partition status cache | 2 +(4 rows) + +/* Change this setting for code coverage */ +SET pg_pathman.enable_bounds_cache = false; +/* check view pathman_cache_stats (bounds cache disabled) */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on test_pathman_cache_stats_1 + -> Seq Scan on test_pathman_cache_stats_2 + -> Seq Scan on test_pathman_cache_stats_3 + -> Seq Scan on test_pathman_cache_stats_4 + -> Seq Scan on test_pathman_cache_stats_5 + -> Seq Scan on test_pathman_cache_stats_6 + -> Seq Scan on test_pathman_cache_stats_7 + -> Seq Scan on test_pathman_cache_stats_8 + -> Seq Scan on test_pathman_cache_stats_9 + -> Seq Scan on test_pathman_cache_stats_10 +(11 rows) + +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 10 + partition status cache | 3 +(4 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 11 other objects +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 + partition status cache | 2 +(4 rows) + +/* Restore this GUC */ +SET pg_pathman.enable_bounds_cache = true; +/* check view pathman_cache_stats (bounds cache enabled) */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on test_pathman_cache_stats_1 + -> Seq Scan on test_pathman_cache_stats_2 + -> Seq Scan on test_pathman_cache_stats_3 + -> Seq Scan on test_pathman_cache_stats_4 + -> Seq Scan on test_pathman_cache_stats_5 + -> Seq Scan on test_pathman_cache_stats_6 + -> Seq Scan on test_pathman_cache_stats_7 + -> Seq Scan on test_pathman_cache_stats_8 + -> Seq Scan on test_pathman_cache_stats_9 + -> Seq Scan on test_pathman_cache_stats_10 +(11 rows) + +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 + partition status cache | 3 +(4 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 11 other objects +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 + partition status cache | 2 +(4 rows) + +/* check that parents cache has been flushed after partition was dropped */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on test_pathman_cache_stats_1 + -> Seq Scan on test_pathman_cache_stats_2 + -> Seq Scan on test_pathman_cache_stats_3 + -> Seq Scan on test_pathman_cache_stats_4 + -> Seq Scan on test_pathman_cache_stats_5 + -> Seq Scan on test_pathman_cache_stats_6 + -> Seq Scan on test_pathman_cache_stats_7 + -> Seq Scan on test_pathman_cache_stats_8 + -> Seq Scan on test_pathman_cache_stats_9 + -> Seq Scan on test_pathman_cache_stats_10 +(11 rows) + +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 + partition status cache | 3 +(4 rows) + +SELECT drop_range_partition('calamity.test_pathman_cache_stats_1'); + drop_range_partition +------------------------------------- + calamity.test_pathman_cache_stats_1 +(1 row) + +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 9 + partition parents cache | 9 + partition status cache | 2 +(4 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 10 other objects +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 + partition status cache | 2 +(4 rows) + +DROP SCHEMA calamity CASCADE; +DROP EXTENSION pg_pathman; +/* + * ------------------------------------------ + * Special tests (uninitialized pg_pathman) + * ------------------------------------------ + */ +CREATE SCHEMA calamity; +CREATE EXTENSION pg_pathman; +/* check function pathman_cache_search_relid() */ +CREATE TABLE calamity.survivor(val INT NOT NULL); +SELECT create_range_partitions('calamity.survivor', 'val', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +DROP EXTENSION pg_pathman CASCADE; +SET pg_pathman.enable = f; /* DON'T LOAD CONFIG */ +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +CREATE EXTENSION pg_pathman; +SHOW pg_pathman.enable; + pg_pathman.enable +------------------- + off +(1 row) + +SELECT add_to_pathman_config('calamity.survivor', 'val', '10'); /* not ok */ +ERROR: pg_pathman is disabled +SELECT * FROM pathman_partition_list; /* not ok */ +ERROR: pg_pathman is not initialized yet +SELECT get_part_range('calamity.survivor', 0, NULL::INT); /* not ok */ +ERROR: pg_pathman is disabled +EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ + QUERY PLAN +------------------------------ + Append + -> Seq Scan on survivor + -> Seq Scan on survivor_1 + -> Seq Scan on survivor_2 +(4 rows) + +SET pg_pathman.enable = t; /* LOAD CONFIG */ +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT add_to_pathman_config('calamity.survivor', 'val', '10'); /* OK */ + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT * FROM pathman_partition_list; /* OK */ + parent | partition | parttype | expr | range_min | range_max +-------------------+---------------------+----------+------+-----------+----------- + calamity.survivor | calamity.survivor_1 | 2 | val | 1 | 11 + calamity.survivor | calamity.survivor_2 | 2 | val | 11 | 21 +(2 rows) + +SELECT get_part_range('calamity.survivor', 0, NULL::INT); /* OK */ + get_part_range +---------------- + {1,11} +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ + QUERY PLAN +------------------------------ + Append + -> Seq Scan on survivor_1 + -> Seq Scan on survivor_2 +(3 rows) + +DROP TABLE calamity.survivor CASCADE; +NOTICE: drop cascades to 3 other objects +DROP SCHEMA calamity CASCADE; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_check.out b/expected/pathman_check.out new file mode 100644 index 00000000..e69de29b diff --git a/expected/pathman_cte.out b/expected/pathman_cte.out index c7edd5a4..ce818a36 100644 --- a/expected/pathman_cte.out +++ b/expected/pathman_cte.out @@ -1,10 +1,14 @@ +/* + * Test simple CTE queries. + * Since 12 (608b167f9f), CTEs which are scanned once are no longer an + * optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_cte_1.out instead. + */ \set VERBOSITY terse SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA test_cte; -/* - * Test simple CTE queries - */ CREATE TABLE test_cte.range_rel ( id INT4, dt TIMESTAMP NOT NULL, diff --git a/expected/pathman_cte_1.out b/expected/pathman_cte_1.out new file mode 100644 index 00000000..70a9ee88 --- /dev/null +++ b/expected/pathman_cte_1.out @@ -0,0 +1,265 @@ +/* + * Test simple CTE queries. + * Since 12 (608b167f9f), CTEs which are scanned once are no longer an + * optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_cte_1.out instead. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_cte; +CREATE TABLE test_cte.range_rel ( + id INT4, + dt TIMESTAMP NOT NULL, + txt TEXT); +INSERT INTO test_cte.range_rel (dt, txt) +SELECT g, md5(g::TEXT) +FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) AS g; +SELECT create_range_partitions('test_cte.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +/* perform a query */ +EXPLAIN (COSTS OFF) + WITH ttt AS (SELECT * FROM test_cte.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-15') +SELECT * FROM ttt; + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_2 + -> Seq Scan on range_rel_3 + Filter: (dt < 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) +(4 rows) + +DROP TABLE test_cte.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +CREATE TABLE test_cte.hash_rel ( + id INT4, + value INTEGER NOT NULL); +INSERT INTO test_cte.hash_rel VALUES (1, 1); +INSERT INTO test_cte.hash_rel VALUES (2, 2); +INSERT INTO test_cte.hash_rel VALUES (3, 3); +SELECT create_hash_partitions('test_cte.hash_rel', 'value', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +/* perform a query */ +EXPLAIN (COSTS OFF) + WITH ttt AS (SELECT * FROM test_cte.hash_rel WHERE value = 2) +SELECT * FROM ttt; + QUERY PLAN +------------------------ + Seq Scan on hash_rel_1 + Filter: (value = 2) +(2 rows) + +DROP TABLE test_cte.hash_rel CASCADE; +NOTICE: drop cascades to 3 other objects +/* + * Test CTE query - by @parihaaraka (add varno to WalkerContext) + */ +CREATE TABLE test_cte.cte_del_xacts (id BIGSERIAL PRIMARY KEY, pdate DATE NOT NULL); +INSERT INTO test_cte.cte_del_xacts (pdate) +SELECT gen_date +FROM generate_series('2016-01-01'::date, '2016-04-9'::date, '1 day') AS gen_date; +CREATE TABLE test_cte.cte_del_xacts_specdata +( + tid BIGINT PRIMARY KEY, + test_mode SMALLINT, + state_code SMALLINT NOT NULL DEFAULT 8, + regtime TIMESTAMP WITHOUT TIME ZONE NOT NULL +); +INSERT INTO test_cte.cte_del_xacts_specdata VALUES (1, 1, 1, current_timestamp); /* for subquery test */ +/* create 2 partitions */ +SELECT create_range_partitions('test_cte.cte_del_xacts'::regclass, 'pdate', + '2016-01-01'::date, '50 days'::interval); + create_range_partitions +------------------------- + 2 +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------- + Delete on cte_del_xacts t + Delete on cte_del_xacts t + Delete on cte_del_xacts_1 t_1 + Delete on cte_del_xacts_2 t_2 + -> Hash Join + Hash Cond: ((cte_del_xacts_specdata.tid = t.id) AND ((cte_del_xacts_specdata.regtime)::date = t.pdate)) + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) + -> Hash + -> Seq Scan on cte_del_xacts t + -> Hash Join + Hash Cond: ((t_1.id = cte_del_xacts_specdata.tid) AND (t_1.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Seq Scan on cte_del_xacts_1 t_1 + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) + -> Hash Join + Hash Cond: ((t_2.id = cte_del_xacts_specdata.tid) AND (t_2.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Seq Scan on cte_del_xacts_2 t_2 + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) +(22 rows) + +SELECT drop_partitions('test_cte.cte_del_xacts'); /* now drop partitions */ +NOTICE: 50 rows copied from test_cte.cte_del_xacts_1 +NOTICE: 50 rows copied from test_cte.cte_del_xacts_2 + drop_partitions +----------------- + 2 +(1 row) + +/* create 1 partition */ +SELECT create_range_partitions('test_cte.cte_del_xacts'::regclass, 'pdate', + '2016-01-01'::date, '1 year'::interval); + create_range_partitions +------------------------- + 1 +(1 row) + +/* parent enabled! */ +SELECT set_enable_parent('test_cte.cte_del_xacts', true); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------- + Delete on cte_del_xacts t + Delete on cte_del_xacts t + Delete on cte_del_xacts_1 t_1 + -> Hash Join + Hash Cond: ((cte_del_xacts_specdata.tid = t.id) AND ((cte_del_xacts_specdata.regtime)::date = t.pdate)) + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) + -> Hash + -> Seq Scan on cte_del_xacts t + -> Hash Join + Hash Cond: ((t_1.id = cte_del_xacts_specdata.tid) AND (t_1.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Seq Scan on cte_del_xacts_1 t_1 + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) +(15 rows) + +/* parent disabled! */ +SELECT set_enable_parent('test_cte.cte_del_xacts', false); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Delete on cte_del_xacts_1 t + -> Hash Join + Hash Cond: ((t.id = cte_del_xacts_specdata.tid) AND (t.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Seq Scan on cte_del_xacts_1 t + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) +(7 rows) + +/* create stub pl/PgSQL function */ +CREATE OR REPLACE FUNCTION test_cte.cte_del_xacts_stab(name TEXT) +RETURNS smallint AS +$$ +begin + return 2::smallint; +end +$$ +LANGUAGE plpgsql STABLE; +/* test subquery planning */ +WITH tmp AS ( + SELECT tid FROM test_cte.cte_del_xacts_specdata + WHERE state_code != test_cte.cte_del_xacts_stab('test')) +SELECT * FROM test_cte.cte_del_xacts t JOIN tmp ON t.id = tmp.tid; + id | pdate | tid +----+------------+----- + 1 | 01-01-2016 | 1 +(1 row) + +/* test subquery planning (one more time) */ +WITH tmp AS ( + SELECT tid FROM test_cte.cte_del_xacts_specdata + WHERE state_code != test_cte.cte_del_xacts_stab('test')) +SELECT * FROM test_cte.cte_del_xacts t JOIN tmp ON t.id = tmp.tid; + id | pdate | tid +----+------------+----- + 1 | 01-01-2016 | 1 +(1 row) + +DROP FUNCTION test_cte.cte_del_xacts_stab(TEXT); +DROP TABLE test_cte.cte_del_xacts, test_cte.cte_del_xacts_specdata CASCADE; +NOTICE: drop cascades to 2 other objects +/* Test recursive CTE */ +CREATE TABLE test_cte.recursive_cte_test_tbl(id INT NOT NULL, name TEXT NOT NULL); +SELECT create_hash_partitions('test_cte.recursive_cte_test_tbl', 'id', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||id FROM generate_series(1,100) f(id); +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||(id + 1) FROM generate_series(1,100) f(id); +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||(id + 2) FROM generate_series(1,100) f(id); +SELECT * FROM test_cte.recursive_cte_test_tbl WHERE id = 5; + id | name +----+------- + 5 | name5 + 5 | name6 + 5 | name7 +(3 rows) + +WITH RECURSIVE test AS ( + SELECT min(name) AS name + FROM test_cte.recursive_cte_test_tbl + WHERE id = 5 + UNION ALL + SELECT (SELECT min(name) + FROM test_cte.recursive_cte_test_tbl + WHERE id = 5 AND name > test.name) + FROM test + WHERE name IS NOT NULL) +SELECT * FROM test; + name +------- + name5 + name6 + name7 + +(4 rows) + +DROP SCHEMA test_cte CASCADE; +NOTICE: drop cascades to 3 other objects +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_expressions.out b/expected/pathman_expressions.out index 66f931e3..1db38acb 100644 --- a/expected/pathman_expressions.out +++ b/expected/pathman_expressions.out @@ -3,6 +3,9 @@ * NOTE: This test behaves differenly on < 11 because planner now turns * Row(Const, Const) into just Const of record type, apparently since 3decd150 * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_expressions_2.out is the updated version. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_expressions_1.out b/expected/pathman_expressions_1.out index 893bcd21..126534a0 100644 --- a/expected/pathman_expressions_1.out +++ b/expected/pathman_expressions_1.out @@ -3,6 +3,9 @@ * NOTE: This test behaves differenly on < 11 because planner now turns * Row(Const, Const) into just Const of record type, apparently since 3decd150 * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_expressions_2.out is the updated version. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_expressions_2.out b/expected/pathman_expressions_2.out new file mode 100644 index 00000000..83b0c7b0 --- /dev/null +++ b/expected/pathman_expressions_2.out @@ -0,0 +1,430 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_expressions_2.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_exprs; +/* + * Test partitioning expression canonicalization process + */ +CREATE TABLE test_exprs.canon(c JSONB NOT NULL); +SELECT create_range_partitions('test_exprs.canon', '(C->>''key'')::int8', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT expr FROM pathman_config; /* check expression */ + expr +------------------------------- + ((c ->> 'key'::text))::bigint +(1 row) + +INSERT INTO test_exprs.canon VALUES ('{ "key": 2, "value": 0 }'); +SELECT *, tableoid::REGCLASS FROM test_exprs.canon; + c | tableoid +------------------------+-------------------- + {"key": 2, "value": 0} | test_exprs.canon_1 +(1 row) + +DROP TABLE test_exprs.canon CASCADE; +NOTICE: drop cascades to 3 other objects +CREATE TABLE test_exprs.canon(val TEXT NOT NULL); +CREATE SEQUENCE test_exprs.canon_seq; +SELECT add_to_pathman_config('test_exprs.canon', 'VAL collate "C"', NULL); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'a'::TEXT, 'b'); + add_range_partition +--------------------- + test_exprs.canon_1 +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'b'::TEXT, 'c'); + add_range_partition +--------------------- + test_exprs.canon_2 +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'c'::TEXT, 'd'); + add_range_partition +--------------------- + test_exprs.canon_3 +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'd'::TEXT, 'e'); + add_range_partition +--------------------- + test_exprs.canon_4 +(1 row) + +SELECT expr FROM pathman_config; /* check expression */ + expr +------------------- + (val COLLATE "C") +(1 row) + +INSERT INTO test_exprs.canon VALUES ('b'); +SELECT *, tableoid::REGCLASS FROM test_exprs.canon; + val | tableoid +-----+-------------------- + b | test_exprs.canon_2 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.canon WHERE val COLLATE "C" < ALL (array['b', 'c']); + QUERY PLAN +--------------------- + Seq Scan on canon_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.canon WHERE val COLLATE "POSIX" < ALL (array['b', 'c']); + QUERY PLAN +----------------------------------------------------------- + Append + -> Seq Scan on canon_1 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on canon_2 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on canon_3 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on canon_4 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") +(9 rows) + +DROP TABLE test_exprs.canon CASCADE; +NOTICE: drop cascades to 5 other objects +/* + * Test composite key. + */ +CREATE TABLE test_exprs.composite(a INT4 NOT NULL, b TEXT NOT NULL); +CREATE SEQUENCE test_exprs.composite_seq; +SELECT add_to_pathman_config('test_exprs.composite', + '(a, b)::test_exprs.composite', + NULL); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(1,a)'::test_exprs.composite, + '(10,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_1 +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(10,a)'::test_exprs.composite, + '(20,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_2 +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(20,a)'::test_exprs.composite, + '(30,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_3 +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(30,a)'::test_exprs.composite, + '(40,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_4 +(1 row) + +SELECT expr FROM pathman_config; /* check expression */ + expr +--------------------------------- + ROW(a, b)::test_exprs.composite +(1 row) + +INSERT INTO test_exprs.composite VALUES(2, 'a'); +INSERT INTO test_exprs.composite VALUES(11, 'a'); +INSERT INTO test_exprs.composite VALUES(2, 'b'); +INSERT INTO test_exprs.composite VALUES(50, 'b'); +ERROR: cannot spawn new partition for key '(50,b)' +SELECT *, tableoid::REGCLASS FROM test_exprs.composite; + a | b | tableoid +----+---+------------------------ + 2 | a | test_exprs.composite_1 + 2 | b | test_exprs.composite_1 + 11 | a | test_exprs.composite_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b)::test_exprs.composite < (21, 0)::test_exprs.composite; + QUERY PLAN +------------------------------------------------------------------------------------ + Append + -> Seq Scan on composite_1 + -> Seq Scan on composite_2 + -> Seq Scan on composite_3 + Filter: (ROW(a, b)::test_exprs.composite < '(21,0)'::test_exprs.composite) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b) < (21, 0)::test_exprs.composite; + QUERY PLAN +-------------------------------------------------------------- + Append + -> Seq Scan on composite_1 + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) + -> Seq Scan on composite_2 + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) + -> Seq Scan on composite_3 + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) + -> Seq Scan on composite_4 + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b)::test_exprs.composite < (21, 0); + QUERY PLAN +---------------------------------------------------------------------- + Append + -> Seq Scan on composite_1 + -> Seq Scan on composite_2 + -> Seq Scan on composite_3 + Filter: (ROW(a, b)::test_exprs.composite < '(21,0)'::record) +(5 rows) + +DROP TABLE test_exprs.composite CASCADE; +NOTICE: drop cascades to 5 other objects +/* We use this rel to check 'pathman_hooks_enabled' */ +CREATE TABLE test_exprs.canary(val INT4 NOT NULL); +CREATE TABLE test_exprs.canary_copy (LIKE test_exprs.canary); +SELECT create_hash_partitions('test_exprs.canary', 'val', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +/* + * Test HASH + */ +CREATE TABLE test_exprs.hash_rel ( + id SERIAL PRIMARY KEY, + value INTEGER NOT NULL, + value2 INTEGER NOT NULL +); +INSERT INTO test_exprs.hash_rel (value, value2) + SELECT val, val * 2 FROM generate_series(1, 5) val; +SELECT COUNT(*) FROM test_exprs.hash_rel; + count +------- + 5 +(1 row) + +\set VERBOSITY default +/* Try using constant expression */ +SELECT create_hash_partitions('test_exprs.hash_rel', '1 + 1', 4); +ERROR: failed to analyze partitioning expression "1 + 1" +DETAIL: partitioning expression should reference table "hash_rel" +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using system attributes */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'xmin', 4); +ERROR: failed to analyze partitioning expression "xmin" +DETAIL: system attributes are not supported +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using subqueries */ +SELECT create_hash_partitions('test_exprs.hash_rel', + 'value, (select oid from pg_class limit 1)', + 4); +ERROR: failed to analyze partitioning expression "value, (select oid from pg_class limit 1)" +DETAIL: subqueries are not allowed in partitioning expression +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using mutable expression */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'random()', 4); +ERROR: failed to analyze partitioning expression "random()" +DETAIL: functions in partitioning expression must be marked IMMUTABLE +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using broken parentheses */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2))', 4); +ERROR: failed to parse partitioning expression "value * value2))" +DETAIL: syntax error at or near ")" +QUERY: SELECT public.validate_expression(parent_relid, expression) +CONTEXT: PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using missing columns */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value3', 4); +ERROR: failed to analyze partitioning expression "value * value3" +DETAIL: column "value3" does not exist +HINT: Perhaps you meant to reference the column "hash_rel.value" or the column "hash_rel.value2". +QUERY: SELECT public.validate_expression(parent_relid, expression) +CONTEXT: PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ +EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy +SELECT * FROM test_exprs.canary WHERE val = 1; + QUERY PLAN +---------------------------- + Insert on canary_copy + -> Seq Scan on canary_0 + Filter: (val = 1) +(3 rows) + +\set VERBOSITY terse +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +SELECT COUNT(*) FROM ONLY test_exprs.hash_rel; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM test_exprs.hash_rel; + count +------- + 5 +(1 row) + +INSERT INTO test_exprs.hash_rel (value, value2) + SELECT val, val * 2 FROM generate_series(6, 10) val; +SELECT COUNT(*) FROM ONLY test_exprs.hash_rel; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM test_exprs.hash_rel; + count +------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.hash_rel WHERE value = 5; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_0 + Filter: (value = 5) + -> Seq Scan on hash_rel_1 + Filter: (value = 5) + -> Seq Scan on hash_rel_2 + Filter: (value = 5) + -> Seq Scan on hash_rel_3 + Filter: (value = 5) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.hash_rel WHERE (value * value2) = 5; + QUERY PLAN +---------------------------------- + Seq Scan on hash_rel_0 + Filter: ((value * value2) = 5) +(2 rows) + +/* + * Test RANGE + */ +CREATE TABLE test_exprs.range_rel (id SERIAL PRIMARY KEY, dt TIMESTAMP NOT NULL, txt TEXT); +INSERT INTO test_exprs.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2020-04-30', '1 month'::interval) as g; +\set VERBOSITY default +/* Try using constant expression */ +SELECT create_range_partitions('test_exprs.range_rel', '''16 years''::interval', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); +ERROR: failed to analyze partitioning expression "'16 years'::interval" +DETAIL: partitioning expression should reference table "range_rel" +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM +/* Try using mutable expression */ +SELECT create_range_partitions('test_exprs.range_rel', 'RANDOM()', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); +ERROR: failed to analyze partitioning expression "RANDOM()" +DETAIL: functions in partitioning expression must be marked IMMUTABLE +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM +/* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ +EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy +SELECT * FROM test_exprs.canary WHERE val = 1; + QUERY PLAN +---------------------------- + Insert on canary_copy + -> Seq Scan on canary_0 + Filter: (val = 1) +(3 rows) + +\set VERBOSITY terse +SELECT create_range_partitions('test_exprs.range_rel', 'AGE(dt, ''2000-01-01''::DATE)', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +INSERT INTO test_exprs.range_rel_1 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); +ERROR: new row for relation "range_rel_1" violates check constraint "pathman_range_rel_1_check" +SELECT COUNT(*) FROM test_exprs.range_rel_6; + count +------- + 4 +(1 row) + +INSERT INTO test_exprs.range_rel_6 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); +SELECT COUNT(*) FROM test_exprs.range_rel_6; + count +------- + 5 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.range_rel WHERE (AGE(dt, '2000-01-01'::DATE)) = '18 years'::interval; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Seq Scan on range_rel_4 + Filter: (age(dt, 'Sat Jan 01 00:00:00 2000'::timestamp without time zone) = '@ 18 years'::interval) +(2 rows) + +DROP SCHEMA test_exprs CASCADE; +NOTICE: drop cascades to 24 other objects +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_gaps.out b/expected/pathman_gaps.out index a21734f0..1d9b1f33 100644 --- a/expected/pathman_gaps.out +++ b/expected/pathman_gaps.out @@ -1,3 +1,7 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ \set VERBOSITY terse SET search_path = 'public'; CREATE EXTENSION pg_pathman; diff --git a/expected/pathman_gaps_1.out b/expected/pathman_gaps_1.out new file mode 100644 index 00000000..d6e1973d --- /dev/null +++ b/expected/pathman_gaps_1.out @@ -0,0 +1,812 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA gaps; +CREATE TABLE gaps.test_1(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_1', 'val', 1, 10, 3); + create_range_partitions +------------------------- + 3 +(1 row) + +DROP TABLE gaps.test_1_2; +CREATE TABLE gaps.test_2(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_2', 'val', 1, 10, 5); + create_range_partitions +------------------------- + 5 +(1 row) + +DROP TABLE gaps.test_2_3; +CREATE TABLE gaps.test_3(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_3', 'val', 1, 10, 8); + create_range_partitions +------------------------- + 8 +(1 row) + +DROP TABLE gaps.test_3_4; +CREATE TABLE gaps.test_4(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_4', 'val', 1, 10, 11); + create_range_partitions +------------------------- + 11 +(1 row) + +DROP TABLE gaps.test_4_4; +DROP TABLE gaps.test_4_5; +/* Check existing partitions */ +SELECT * FROM pathman_partition_list ORDER BY parent, partition; + parent | partition | parttype | expr | range_min | range_max +-------------+----------------+----------+------+-----------+----------- + gaps.test_1 | gaps.test_1_1 | 2 | val | 1 | 11 + gaps.test_1 | gaps.test_1_3 | 2 | val | 21 | 31 + gaps.test_2 | gaps.test_2_1 | 2 | val | 1 | 11 + gaps.test_2 | gaps.test_2_2 | 2 | val | 11 | 21 + gaps.test_2 | gaps.test_2_4 | 2 | val | 31 | 41 + gaps.test_2 | gaps.test_2_5 | 2 | val | 41 | 51 + gaps.test_3 | gaps.test_3_1 | 2 | val | 1 | 11 + gaps.test_3 | gaps.test_3_2 | 2 | val | 11 | 21 + gaps.test_3 | gaps.test_3_3 | 2 | val | 21 | 31 + gaps.test_3 | gaps.test_3_5 | 2 | val | 41 | 51 + gaps.test_3 | gaps.test_3_6 | 2 | val | 51 | 61 + gaps.test_3 | gaps.test_3_7 | 2 | val | 61 | 71 + gaps.test_3 | gaps.test_3_8 | 2 | val | 71 | 81 + gaps.test_4 | gaps.test_4_1 | 2 | val | 1 | 11 + gaps.test_4 | gaps.test_4_2 | 2 | val | 11 | 21 + gaps.test_4 | gaps.test_4_3 | 2 | val | 21 | 31 + gaps.test_4 | gaps.test_4_6 | 2 | val | 51 | 61 + gaps.test_4 | gaps.test_4_7 | 2 | val | 61 | 71 + gaps.test_4 | gaps.test_4_8 | 2 | val | 71 | 81 + gaps.test_4 | gaps.test_4_9 | 2 | val | 81 | 91 + gaps.test_4 | gaps.test_4_10 | 2 | val | 91 | 101 + gaps.test_4 | gaps.test_4_11 | 2 | val | 101 | 111 +(22 rows) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 11; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 16; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 21; + QUERY PLAN +---------------------- + Seq Scan on test_1_3 + Filter: (val = 21) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 11; + QUERY PLAN +---------------------- + Seq Scan on test_1_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 11; + QUERY PLAN +---------------------- + Seq Scan on test_1_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 16; + QUERY PLAN +---------------------- + Seq Scan on test_1_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 16; + QUERY PLAN +---------------------- + Seq Scan on test_1_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 21; + QUERY PLAN +---------------------- + Seq Scan on test_1_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 21; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_1_1 + -> Seq Scan on test_1_3 + Filter: (val <= 21) +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 11; + QUERY PLAN +---------------------- + Seq Scan on test_1_3 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 11; + QUERY PLAN +---------------------- + Seq Scan on test_1_3 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 16; + QUERY PLAN +---------------------- + Seq Scan on test_1_3 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 16; + QUERY PLAN +---------------------- + Seq Scan on test_1_3 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 21; + QUERY PLAN +---------------------- + Seq Scan on test_1_3 + Filter: (val > 21) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 21; + QUERY PLAN +---------------------- + Seq Scan on test_1_3 +(1 row) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 21; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 26; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 31; + QUERY PLAN +---------------------- + Seq Scan on test_2_4 + Filter: (val = 31) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 26; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 26; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 31; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 + Filter: (val <= 31) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 41; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 + Filter: (val <= 41) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 11; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_2 + Filter: (val > 11) + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 11; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 26; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 26; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + Filter: (val > 31) + -> Seq Scan on test_2_5 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(3 rows) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 31; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 36; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 41; + QUERY PLAN +---------------------- + Seq Scan on test_3_5 + Filter: (val = 41) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 41; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 + Filter: (val <= 41) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 51; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 51; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + Filter: (val <= 51) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_3 + Filter: (val > 21) + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + Filter: (val > 41) + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(5 rows) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 31; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 36; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 41; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 46; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 51; + QUERY PLAN +---------------------- + Seq Scan on test_4_6 + Filter: (val = 51) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 46; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 46; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 51; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 51; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 + Filter: (val <= 51) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 61; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 61; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + Filter: (val <= 61) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 21; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_3 + Filter: (val > 21) + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 21; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 31; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 31; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 36; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 36; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 41; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 41; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 46; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 46; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 51; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + Filter: (val > 51) + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 51; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +DROP SCHEMA gaps CASCADE; +NOTICE: drop cascades to 30 other objects +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_join_clause.out b/expected/pathman_join_clause.out index 25d5cba9..ed822543 100644 --- a/expected/pathman_join_clause.out +++ b/expected/pathman_join_clause.out @@ -1,3 +1,7 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ \set VERBOSITY terse SET search_path = 'public'; CREATE SCHEMA pathman; diff --git a/expected/pathman_join_clause_1.out b/expected/pathman_join_clause_1.out new file mode 100644 index 00000000..09b9a00c --- /dev/null +++ b/expected/pathman_join_clause_1.out @@ -0,0 +1,176 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +/* + * Test push down a join clause into child nodes of append + */ +/* create test tables */ +CREATE TABLE test.fk ( + id1 INT NOT NULL, + id2 INT NOT NULL, + start_key INT, + end_key INT, + PRIMARY KEY (id1, id2)); +CREATE TABLE test.mytbl ( + id1 INT NOT NULL, + id2 INT NOT NULL, + key INT NOT NULL, + CONSTRAINT fk_fk FOREIGN KEY (id1, id2) REFERENCES test.fk(id1, id2), + PRIMARY KEY (id1, key)); +SELECT pathman.create_hash_partitions('test.mytbl', 'id1', 8); + create_hash_partitions +------------------------ + 8 +(1 row) + +/* ...fill out with test data */ +INSERT INTO test.fk VALUES (1, 1); +INSERT INTO test.mytbl VALUES (1, 1, 5), (1, 1, 6); +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; +/* run test queries */ +EXPLAIN (COSTS OFF) /* test plan */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Nested Loop + -> Seq Scan on fk + -> Custom Scan (RuntimeAppend) + Prune by: (fk.id1 = m.id1) + -> Bitmap Heap Scan on mytbl_0 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_0_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_1 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_1_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_2 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_2_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_3 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_3_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_4 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_4_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_5 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_5_pkey + Index Cond: (id1 = fk.id1) + -> Seq Scan on mytbl_6 m + Filter: ((fk.id1 = id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Heap Scan on mytbl_7 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_7_pkey + Index Cond: (id1 = fk.id1) +(41 rows) + +/* test joint data */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + tableoid | id1 | id2 | key | start_key | end_key +--------------+-----+-----+-----+-----------+--------- + test.mytbl_6 | 1 | 1 | 5 | | +(1 row) + +/* + * Test case by @dimarick + */ +CREATE TABLE test.parent ( + id SERIAL NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child_nopart ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +INSERT INTO test.parent (owner_id) VALUES (1), (2), (3), (3); +INSERT INTO test.child (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +INSERT INTO test.child_nopart (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +SELECT pathman.create_hash_partitions('test.child', 'owner_id', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; +/* Query #1 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +----------------------------------------------------------------------------------------------------- + Nested Loop Left Join + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Custom Scan (RuntimeAppend) + Prune by: ((child.owner_id = 3) AND (child.owner_id = parent.owner_id)) + -> Seq Scan on child_1 child + Filter: ((owner_id = 3) AND (owner_id = parent.owner_id) AND (parent_id = parent.id)) +(7 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + +/* Query #2 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +---------------------------------------------------------------------- + Nested Loop Left Join + Join Filter: (child_1.parent_id = parent.id) + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Seq Scan on child_1 + Filter: (owner_id = 3) +(6 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + +DROP SCHEMA test CASCADE; +NOTICE: drop cascades to 15 other objects +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman CASCADE; diff --git a/expected/pathman_only.out b/expected/pathman_only.out index 28471cf3..b54722d8 100644 --- a/expected/pathman_only.out +++ b/expected/pathman_only.out @@ -2,6 +2,11 @@ * --------------------------------------------- * NOTE: This test behaves differenly on PgPro * --------------------------------------------- + * + * Since 12 (608b167f9f), CTEs which are scanned once are no longer an + * optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_only_1.out instead. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_only_1.out b/expected/pathman_only_1.out new file mode 100644 index 00000000..fe64e5c9 --- /dev/null +++ b/expected/pathman_only_1.out @@ -0,0 +1,277 @@ +/* + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- + * + * Since 12 (608b167f9f), CTEs which are scanned once are no longer an + * optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_only_1.out instead. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_only; +/* Test special case: ONLY statement with not-ONLY for partitioned table */ +CREATE TABLE test_only.from_only_test(val INT NOT NULL); +INSERT INTO test_only.from_only_test SELECT generate_series(1, 20); +SELECT create_range_partitions('test_only.from_only_test', 'val', 1, 2); + create_range_partitions +------------------------- + 10 +(1 row) + +VACUUM ANALYZE; +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test; + QUERY PLAN +------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Seq Scan on from_only_test + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 +(15 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +UNION SELECT * FROM ONLY test_only.from_only_test; + QUERY PLAN +------------------------------------------------- + HashAggregate + Group Key: from_only_test_1.val + -> Append + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Seq Scan on from_only_test +(15 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test +UNION SELECT * FROM ONLY test_only.from_only_test; + QUERY PLAN +--------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test_1.val + -> Append + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Append + -> Seq Scan on from_only_test_1 from_only_test_1_1 + -> Seq Scan on from_only_test_2 from_only_test_2_1 + -> Seq Scan on from_only_test_3 from_only_test_3_1 + -> Seq Scan on from_only_test_4 from_only_test_4_1 + -> Seq Scan on from_only_test_5 from_only_test_5_1 + -> Seq Scan on from_only_test_6 from_only_test_6_1 + -> Seq Scan on from_only_test_7 from_only_test_7_1 + -> Seq Scan on from_only_test_8 from_only_test_8_1 + -> Seq Scan on from_only_test_9 from_only_test_9_1 + -> Seq Scan on from_only_test_10 from_only_test_10_1 + -> Seq Scan on from_only_test +(26 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test; + QUERY PLAN +--------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Seq Scan on from_only_test + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Append + -> Seq Scan on from_only_test_1 from_only_test_1_1 + -> Seq Scan on from_only_test_2 from_only_test_2_1 + -> Seq Scan on from_only_test_3 from_only_test_3_1 + -> Seq Scan on from_only_test_4 from_only_test_4_1 + -> Seq Scan on from_only_test_5 from_only_test_5_1 + -> Seq Scan on from_only_test_6 from_only_test_6_1 + -> Seq Scan on from_only_test_7 from_only_test_7_1 + -> Seq Scan on from_only_test_8 from_only_test_8_1 + -> Seq Scan on from_only_test_9 from_only_test_9_1 + -> Seq Scan on from_only_test_10 from_only_test_10_1 +(26 rows) + +/* not ok, ONLY|non-ONLY in one query (this is not the case for PgPro) */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test a +JOIN ONLY test_only.from_only_test b USING(val); + QUERY PLAN +--------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test b + -> Custom Scan (RuntimeAppend) + Prune by: (b.val = a.val) + -> Seq Scan on from_only_test_1 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_2 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_3 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_4 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_5 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_6 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_7 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_8 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_9 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_10 a + Filter: (b.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM test_only.from_only_test), + q2 AS (SELECT * FROM ONLY test_only.from_only_test) +SELECT * FROM q1 JOIN q2 USING(val); + QUERY PLAN +--------------------------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test from_only_test_1 + -> Custom Scan (RuntimeAppend) + Prune by: (from_only_test_1.val = from_only_test.val) + -> Seq Scan on from_only_test_1 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (from_only_test_1.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM ONLY test_only.from_only_test) +SELECT * FROM test_only.from_only_test JOIN q1 USING(val); + QUERY PLAN +--------------------------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test from_only_test_1 + -> Custom Scan (RuntimeAppend) + Prune by: (from_only_test_1.val = from_only_test.val) + -> Seq Scan on from_only_test_1 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (from_only_test_1.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +WHERE val = (SELECT val FROM ONLY test_only.from_only_test + ORDER BY val ASC + LIMIT 1); + QUERY PLAN +----------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (from_only_test.val = $0) + InitPlan 1 (returns $0) + -> Limit + -> Sort + Sort Key: from_only_test_1.val + -> Seq Scan on from_only_test from_only_test_1 + -> Seq Scan on from_only_test_1 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (val = $0) +(27 rows) + +DROP SCHEMA test_only CASCADE; +NOTICE: drop cascades to 12 other objects +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_rowmarks.out b/expected/pathman_rowmarks.out index 0bf1078a..4b51cb65 100644 --- a/expected/pathman_rowmarks.out +++ b/expected/pathman_rowmarks.out @@ -2,6 +2,9 @@ * ------------------------------------------- * NOTE: This test behaves differenly on 9.5 * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_rowmarks_2.out is the updated version. */ SET search_path = 'public'; CREATE EXTENSION pg_pathman; diff --git a/expected/pathman_rowmarks_1.out b/expected/pathman_rowmarks_1.out index d072cde9..e72e7076 100644 --- a/expected/pathman_rowmarks_1.out +++ b/expected/pathman_rowmarks_1.out @@ -2,6 +2,9 @@ * ------------------------------------------- * NOTE: This test behaves differenly on 9.5 * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_rowmarks_2.out is the updated version. */ SET search_path = 'public'; CREATE EXTENSION pg_pathman; diff --git a/expected/pathman_rowmarks_2.out b/expected/pathman_rowmarks_2.out new file mode 100644 index 00000000..a111d688 --- /dev/null +++ b/expected/pathman_rowmarks_2.out @@ -0,0 +1,387 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_rowmarks_2.out is the updated version. + */ +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA rowmarks; +CREATE TABLE rowmarks.first(id int NOT NULL); +CREATE TABLE rowmarks.second(id int NOT NULL); +INSERT INTO rowmarks.first SELECT generate_series(1, 10); +INSERT INTO rowmarks.second SELECT generate_series(1, 10); +SELECT create_hash_partitions('rowmarks.first', 'id', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +VACUUM ANALYZE; +/* Not partitioned */ +SELECT * FROM rowmarks.second ORDER BY id FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +/* Simple case (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first ORDER BY id FOR UPDATE; + QUERY PLAN +--------------------------------------- + LockRows + -> Sort + Sort Key: first_0.id + -> Append + -> Seq Scan on first_0 + -> Seq Scan on first_1 + -> Seq Scan on first_2 + -> Seq Scan on first_3 + -> Seq Scan on first_4 +(9 rows) + +/* Simple case (execution) */ +SELECT * FROM rowmarks.first ORDER BY id FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +SELECT FROM rowmarks.first ORDER BY id FOR UPDATE; +-- +(10 rows) + +SELECT tableoid > 0 FROM rowmarks.first ORDER BY id FOR UPDATE; + ?column? +---------- + t + t + t + t + t + t + t + t + t + t +(10 rows) + +/* A little harder (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.first + ORDER BY id + OFFSET 10 LIMIT 1 + FOR UPDATE) +FOR SHARE; + QUERY PLAN +----------------------------------------------------- + LockRows + InitPlan 1 (returns $1) + -> Limit + -> LockRows + -> Sort + Sort Key: first_0.id + -> Append + -> Seq Scan on first_0 + -> Seq Scan on first_1 + -> Seq Scan on first_2 + -> Seq Scan on first_3 + -> Seq Scan on first_4 + -> Custom Scan (RuntimeAppend) + Prune by: (first.id = $1) + -> Seq Scan on first_0 first + Filter: (id = $1) + -> Seq Scan on first_1 first + Filter: (id = $1) + -> Seq Scan on first_2 first + Filter: (id = $1) + -> Seq Scan on first_3 first + Filter: (id = $1) + -> Seq Scan on first_4 first + Filter: (id = $1) +(24 rows) + +/* A little harder (execution) */ +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.first + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + id +---- + 6 +(1 row) + +/* Two tables (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.second + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + QUERY PLAN +---------------------------------------------- + LockRows + InitPlan 1 (returns $1) + -> Limit + -> LockRows + -> Sort + Sort Key: second.id + -> Seq Scan on second + -> Custom Scan (RuntimeAppend) + Prune by: (first.id = $1) + -> Seq Scan on first_0 first + Filter: (id = $1) + -> Seq Scan on first_1 first + Filter: (id = $1) + -> Seq Scan on first_2 first + Filter: (id = $1) + -> Seq Scan on first_3 first + Filter: (id = $1) + -> Seq Scan on first_4 first + Filter: (id = $1) +(19 rows) + +/* Two tables (execution) */ +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.second + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + id +---- + 6 +(1 row) + +/* JOIN (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + QUERY PLAN +--------------------------------------------------- + LockRows + -> Sort + Sort Key: first_0.id + -> Hash Join + Hash Cond: (first_0.id = second.id) + -> Append + -> Seq Scan on first_0 + -> Seq Scan on first_1 + -> Seq Scan on first_2 + -> Seq Scan on first_3 + -> Seq Scan on first_4 + -> Hash + -> Seq Scan on second +(13 rows) + +/* JOIN (execution) */ +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +/* ONLY (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY rowmarks.first FOR SHARE; + QUERY PLAN +------------------------- + LockRows + -> Seq Scan on first +(2 rows) + +/* ONLY (execution) */ +SELECT * FROM ONLY rowmarks.first FOR SHARE; + id +---- +(0 rows) + +/* Check updates (plan) */ +SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ +SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); + QUERY PLAN +--------------------------------- + Update on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Seq Scan on first_0 + Filter: (id = 1) +(6 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); + QUERY PLAN +----------------------------------------------- + Update on second + -> Nested Loop Semi Join + Join Filter: (second.id = first_0.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 + Filter: (id < 1) + -> Seq Scan on first_1 + Filter: (id < 1) + -> Seq Scan on first_2 + Filter: (id < 1) + -> Seq Scan on first_3 + Filter: (id < 1) + -> Seq Scan on first_4 + Filter: (id < 1) +(16 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); + QUERY PLAN +----------------------------------------------- + Update on second + -> Nested Loop Semi Join + Join Filter: (second.id = first_0.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 + Filter: (id = 1) + -> Seq Scan on first_1 + Filter: (id = 2) +(10 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1) +RETURNING *, tableoid::regclass; + QUERY PLAN +--------------------------------- + Update on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Seq Scan on first_0 + Filter: (id = 1) +(6 rows) + +SET enable_hashjoin = t; +SET enable_mergejoin = t; +/* Check updates (execution) */ +UPDATE rowmarks.second SET id = 1 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2) +RETURNING *, tableoid::regclass; + id | tableoid +----+----------------- + 1 | rowmarks.second + 1 | rowmarks.second +(2 rows) + +/* Check deletes (plan) */ +SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ +SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); + QUERY PLAN +--------------------------------- + Delete on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Seq Scan on first_0 + Filter: (id = 1) +(6 rows) + +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); + QUERY PLAN +----------------------------------------------- + Delete on second + -> Nested Loop Semi Join + Join Filter: (second.id = first_0.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 + Filter: (id < 1) + -> Seq Scan on first_1 + Filter: (id < 1) + -> Seq Scan on first_2 + Filter: (id < 1) + -> Seq Scan on first_3 + Filter: (id < 1) + -> Seq Scan on first_4 + Filter: (id < 1) +(16 rows) + +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); + QUERY PLAN +----------------------------------------------- + Delete on second + -> Nested Loop Semi Join + Join Filter: (second.id = first_0.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 + Filter: (id = 1) + -> Seq Scan on first_1 + Filter: (id = 2) +(10 rows) + +SET enable_hashjoin = t; +SET enable_mergejoin = t; +DROP SCHEMA rowmarks CASCADE; +NOTICE: drop cascades to 7 other objects +DETAIL: drop cascades to table rowmarks.first +drop cascades to table rowmarks.second +drop cascades to table rowmarks.first_0 +drop cascades to table rowmarks.first_1 +drop cascades to table rowmarks.first_2 +drop cascades to table rowmarks.first_3 +drop cascades to table rowmarks.first_4 +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_subpartitions.out b/expected/pathman_subpartitions.out index a876b457..c13b4ee8 100644 --- a/expected/pathman_subpartitions.out +++ b/expected/pathman_subpartitions.out @@ -1,3 +1,7 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_subpartitions_1.out is the updated version. + */ \set VERBOSITY terse CREATE EXTENSION pg_pathman; CREATE SCHEMA subpartitions; diff --git a/expected/pathman_subpartitions_1.out b/expected/pathman_subpartitions_1.out new file mode 100644 index 00000000..f190f798 --- /dev/null +++ b/expected/pathman_subpartitions_1.out @@ -0,0 +1,460 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_subpartitions_1.out is the updated version. + */ +\set VERBOSITY terse +CREATE EXTENSION pg_pathman; +CREATE SCHEMA subpartitions; +/* Create two level partitioning structure */ +CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); +INSERT INTO subpartitions.abc SELECT i, i FROM generate_series(1, 200, 20) as i; +SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_hash_partitions('subpartitions.abc_1', 'a', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT create_hash_partitions('subpartitions.abc_2', 'b', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +SELECT * FROM pathman_partition_list; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc | subpartitions.abc_1 | 2 | a | 0 | 100 + subpartitions.abc | subpartitions.abc_2 | 2 | a | 100 | 200 + subpartitions.abc_1 | subpartitions.abc_1_0 | 1 | a | | + subpartitions.abc_1 | subpartitions.abc_1_1 | 1 | a | | + subpartitions.abc_1 | subpartitions.abc_1_2 | 1 | a | | + subpartitions.abc_2 | subpartitions.abc_2_0 | 1 | b | | + subpartitions.abc_2 | subpartitions.abc_2_1 | 1 | b | | +(7 rows) + +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; + tableoid | a | b +-----------------------+-----+----- + subpartitions.abc_1_2 | 1 | 1 + subpartitions.abc_1_0 | 21 | 21 + subpartitions.abc_1_1 | 41 | 41 + subpartitions.abc_1_0 | 61 | 61 + subpartitions.abc_1_2 | 81 | 81 + subpartitions.abc_2_0 | 101 | 101 + subpartitions.abc_2_1 | 121 | 121 + subpartitions.abc_2_0 | 141 | 141 + subpartitions.abc_2_1 | 161 | 161 + subpartitions.abc_2_1 | 181 | 181 +(10 rows) + +/* Insert should result in creation of new subpartition */ +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); + append_range_partition +------------------------ + subpartitions.abc_3 +(1 row) + +SELECT create_range_partitions('subpartitions.abc_3', 'b', 200, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT * FROM pathman_partition_list WHERE parent = 'subpartitions.abc_3'::regclass; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc_3 | subpartitions.abc_3_1 | 2 | b | 200 | 210 + subpartitions.abc_3 | subpartitions.abc_3_2 | 2 | b | 210 | 220 +(2 rows) + +INSERT INTO subpartitions.abc VALUES (215, 215); +SELECT * FROM pathman_partition_list WHERE parent = 'subpartitions.abc_3'::regclass; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc_3 | subpartitions.abc_3_1 | 2 | b | 200 | 210 + subpartitions.abc_3 | subpartitions.abc_3_2 | 2 | b | 210 | 220 +(2 rows) + +SELECT tableoid::regclass, * FROM subpartitions.abc WHERE a = 215 AND b = 215 ORDER BY a, b; + tableoid | a | b +-----------------------+-----+----- + subpartitions.abc_3_2 | 215 | 215 +(1 row) + +/* Pruning tests */ +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a < 150; + QUERY PLAN +--------------------------------- + Append + -> Append + -> Seq Scan on abc_1_0 + -> Seq Scan on abc_1_1 + -> Seq Scan on abc_1_2 + -> Append + -> Seq Scan on abc_2_0 + Filter: (a < 150) + -> Seq Scan on abc_2_1 + Filter: (a < 150) +(10 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE b = 215; + QUERY PLAN +--------------------------------- + Append + -> Append + -> Seq Scan on abc_1_0 + Filter: (b = 215) + -> Seq Scan on abc_1_1 + Filter: (b = 215) + -> Seq Scan on abc_1_2 + Filter: (b = 215) + -> Seq Scan on abc_2_1 + Filter: (b = 215) + -> Seq Scan on abc_3_2 + Filter: (b = 215) +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a = 215 AND b = 215; + QUERY PLAN +------------------------------------- + Seq Scan on abc_3_2 + Filter: ((a = 215) AND (b = 215)) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a >= 210 AND b >= 210; + QUERY PLAN +---------------------- + Seq Scan on abc_3_2 + Filter: (a >= 210) +(2 rows) + +CREATE OR REPLACE FUNCTION check_multilevel_queries() +RETURNS VOID AS +$$ +BEGIN + IF NOT EXISTS(SELECT * FROM (SELECT tableoid::regclass, * + FROM subpartitions.abc + WHERE a = 215 AND b = 215 + ORDER BY a, b) t1) + THEN + RAISE EXCEPTION 'should be at least one record in result'; + END IF; +END +$$ LANGUAGE plpgsql; +SELECT check_multilevel_queries(); + check_multilevel_queries +-------------------------- + +(1 row) + +DROP FUNCTION check_multilevel_queries(); +/* Multilevel partitioning with updates */ +CREATE OR REPLACE FUNCTION subpartitions.partitions_tree( + rel REGCLASS, + level TEXT DEFAULT ' ' +) +RETURNS SETOF TEXT AS +$$ +DECLARE + partition REGCLASS; + subpartition TEXT; +BEGIN + IF rel IS NULL THEN + RETURN; + END IF; + + RETURN NEXT rel::TEXT; + + FOR partition IN (SELECT l.partition FROM pathman_partition_list l WHERE parent = rel) + LOOP + FOR subpartition IN (SELECT subpartitions.partitions_tree(partition, level || ' ')) + LOOP + RETURN NEXT level || subpartition::TEXT; + END LOOP; + END LOOP; +END +$$ LANGUAGE plpgsql; +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_4'); + append_range_partition +------------------------ + subpartitions.abc_4 +(1 row) + +SELECT create_hash_partitions('subpartitions.abc_4', 'b', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +SELECT subpartitions.partitions_tree('subpartitions.abc'); + partitions_tree +-------------------------- + subpartitions.abc + subpartitions.abc_1 + subpartitions.abc_1_0 + subpartitions.abc_1_1 + subpartitions.abc_1_2 + subpartitions.abc_2 + subpartitions.abc_2_0 + subpartitions.abc_2_1 + subpartitions.abc_3 + subpartitions.abc_3_1 + subpartitions.abc_3_2 + subpartitions.abc_4 + subpartitions.abc_4_0 + subpartitions.abc_4_1 +(14 rows) + +DROP TABLE subpartitions.abc CASCADE; +NOTICE: drop cascades to 15 other objects +/* Test that update works correctly */ +SET pg_pathman.enable_partitionrouter = ON; +CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); +SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_range_partitions('subpartitions.abc_1', 'b', 0, 50, 2); /* 0 - 100 */ + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_range_partitions('subpartitions.abc_2', 'b', 0, 50, 2); /* 100 - 200 */ + create_range_partitions +------------------------- + 2 +(1 row) + +INSERT INTO subpartitions.abc SELECT 25, 25 FROM generate_series(1, 10); +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_1_1 */ + tableoid | a | b +-----------------------+----+---- + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 +(10 rows) + +UPDATE subpartitions.abc SET a = 125 WHERE a = 25 and b = 25; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_1 */ + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 +(10 rows) + +UPDATE subpartitions.abc SET b = 75 WHERE a = 125 and b = 25; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_2 */ + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 +(10 rows) + +UPDATE subpartitions.abc SET b = 125 WHERE a = 125 and b = 75; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_3 */ + tableoid | a | b +-----------------------+-----+----- + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 +(10 rows) + +/* split_range_partition */ +SELECT split_range_partition('subpartitions.abc_2', 150); /* FAIL */ +ERROR: cannot split partition that has children +SELECT split_range_partition('subpartitions.abc_2_2', 75); /* OK */ + split_range_partition +----------------------- + subpartitions.abc_2_4 +(1 row) + +SELECT subpartitions.partitions_tree('subpartitions.abc'); + partitions_tree +-------------------------- + subpartitions.abc + subpartitions.abc_1 + subpartitions.abc_1_1 + subpartitions.abc_1_2 + subpartitions.abc_2 + subpartitions.abc_2_1 + subpartitions.abc_2_2 + subpartitions.abc_2_4 + subpartitions.abc_2_3 +(9 rows) + +/* merge_range_partitions */ +TRUNCATE subpartitions.abc; +INSERT INTO subpartitions.abc VALUES (150, 0); +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); /* 200 - 300 */ + append_range_partition +------------------------ + subpartitions.abc_3 +(1 row) + +INSERT INTO subpartitions.abc VALUES (250, 50); +SELECT merge_range_partitions('subpartitions.abc_2', 'subpartitions.abc_3'); /* OK */ + merge_range_partitions +------------------------ + subpartitions.abc_2 +(1 row) + +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_1 | 150 | 0 + subpartitions.abc_2_2 | 250 | 50 +(2 rows) + +SELECT merge_range_partitions('subpartitions.abc_2_1', 'subpartitions.abc_2_2'); /* OK */ + merge_range_partitions +------------------------ + subpartitions.abc_2_1 +(1 row) + +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_1 | 150 | 0 + subpartitions.abc_2_1 | 250 | 50 +(2 rows) + +DROP TABLE subpartitions.abc CASCADE; +NOTICE: drop cascades to 10 other objects +/* Check insert & update with dropped columns */ +CREATE TABLE subpartitions.abc(a int, b int, c int, id1 int not null, id2 int not null, val serial); +SELECT create_range_partitions('subpartitions.abc', 'id1', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +ALTER TABLE subpartitions.abc DROP COLUMN c; +SELECT prepend_range_partition('subpartitions.abc'); + prepend_range_partition +------------------------- + subpartitions.abc_3 +(1 row) + +ALTER TABLE subpartitions.abc DROP COLUMN b; +SELECT create_range_partitions('subpartitions.abc_3', 'id2', 0, 10, 3); + create_range_partitions +------------------------- + 3 +(1 row) + +ALTER TABLE subpartitions.abc DROP COLUMN a; +SELECT prepend_range_partition('subpartitions.abc_3'); + prepend_range_partition +------------------------- + subpartitions.abc_3_4 +(1 row) + +SELECT * FROM pathman_partition_list ORDER BY parent, partition; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc | subpartitions.abc_1 | 2 | id1 | 0 | 100 + subpartitions.abc | subpartitions.abc_2 | 2 | id1 | 100 | 200 + subpartitions.abc | subpartitions.abc_3 | 2 | id1 | -100 | 0 + subpartitions.abc_3 | subpartitions.abc_3_1 | 2 | id2 | 0 | 10 + subpartitions.abc_3 | subpartitions.abc_3_2 | 2 | id2 | 10 | 20 + subpartitions.abc_3 | subpartitions.abc_3_3 | 2 | id2 | 20 | 30 + subpartitions.abc_3 | subpartitions.abc_3_4 | 2 | id2 | -10 | 0 +(7 rows) + +INSERT INTO subpartitions.abc VALUES (10, 0), (110, 0), (-1, 0), (-1, -1); +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY id1, id2, val; + tableoid | id1 | id2 | val +-----------------------+-----+-----+----- + subpartitions.abc_3_4 | -1 | -1 | 4 + subpartitions.abc_3_1 | -1 | 0 | 3 + subpartitions.abc_1 | 10 | 0 | 1 + subpartitions.abc_2 | 110 | 0 | 2 +(4 rows) + +SET pg_pathman.enable_partitionrouter = ON; +UPDATE subpartitions.abc SET id1 = -1, id2 = -1 RETURNING tableoid::regclass, *; + tableoid | id1 | id2 | val +-----------------------+-----+-----+----- + subpartitions.abc_3_4 | -1 | -1 | 1 + subpartitions.abc_3_4 | -1 | -1 | 2 + subpartitions.abc_3_4 | -1 | -1 | 3 + subpartitions.abc_3_4 | -1 | -1 | 4 +(4 rows) + +DROP TABLE subpartitions.abc CASCADE; +NOTICE: drop cascades to 9 other objects +--- basic check how rowmark plays along with subparts; PGPRO-2755 +CREATE TABLE subpartitions.a1(n1 integer); +CREATE TABLE subpartitions.a2(n1 integer not null, n2 integer not null); +SELECT create_range_partitions('subpartitions.a2', 'n1', 1, 2, 0); + create_range_partitions +------------------------- + 0 +(1 row) + +SELECT add_range_partition('subpartitions.a2', 10, 20, 'subpartitions.a2_1020'); + add_range_partition +----------------------- + subpartitions.a2_1020 +(1 row) + +SELECT create_range_partitions('subpartitions.a2_1020'::regclass, 'n2'::text, array[30,40], array['subpartitions.a2_1020_3040']); + create_range_partitions +------------------------- + 1 +(1 row) + +INSERT INTO subpartitions.a2 VALUES (10, 30), (11, 31), (12, 32), (19, 39); +INSERT INTO subpartitions.a1 VALUES (12), (19), (20); +SELECT a2.* FROM subpartitions.a1 JOIN subpartitions.a2 ON a2.n1=a1.n1 FOR UPDATE; + n1 | n2 +----+---- + 12 | 32 + 19 | 39 +(2 rows) + +DROP TABLE subpartitions.a2 CASCADE; +NOTICE: drop cascades to 4 other objects +DROP TABLE subpartitions.a1; +DROP SCHEMA subpartitions CASCADE; +NOTICE: drop cascades to function subpartitions.partitions_tree(regclass,text) +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_upd_del.out b/expected/pathman_upd_del.out index 935b65b4..2cc19239 100644 --- a/expected/pathman_upd_del.out +++ b/expected/pathman_upd_del.out @@ -2,6 +2,13 @@ * ------------------------------------------- * NOTE: This test behaves differenly on 9.5 * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. Moreover, again since 12 (608b167f9f), CTEs which are + * scanned once are no longer an optimization fence, changing a good deal of + * plans here. There is an option to forcibly make them MATERIALIZED, but we + * also need to run tests on older versions, so put updated plans in + * pathman_upd_del_2.out instead. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_upd_del_1.out b/expected/pathman_upd_del_1.out index d0022855..5cd5ac9f 100644 --- a/expected/pathman_upd_del_1.out +++ b/expected/pathman_upd_del_1.out @@ -2,6 +2,13 @@ * ------------------------------------------- * NOTE: This test behaves differenly on 9.5 * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. Moreover, again since 12 (608b167f9f), CTEs which are + * scanned once are no longer an optimization fence, changing a good deal of + * plans here. There is an option to forcibly make them MATERIALIZED, but we + * also need to run tests on older versions, so put updated plans in + * pathman_upd_del_2.out instead. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_upd_del_2.out b/expected/pathman_upd_del_2.out new file mode 100644 index 00000000..2aeb6702 --- /dev/null +++ b/expected/pathman_upd_del_2.out @@ -0,0 +1,458 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. Moreover, again since 12 (608b167f9f), CTEs which are + * scanned once are no longer an optimization fence, changing a good deal of + * plans here. There is an option to forcibly make them MATERIALIZED, but we + * also need to run tests on older versions, so put updated plans in + * pathman_upd_del_2.out instead. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +/* Temporary tables for JOINs */ +CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp VALUES (1, 1), (2, 2); +CREATE TABLE test.tmp2 (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp2 SELECT i % 10 + 1, i FROM generate_series(1, 100) i; +SELECT pathman.create_range_partitions('test.tmp2', 'id', 1, 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* Partition table by RANGE */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + value INTEGER); +INSERT INTO test.range_rel (dt, value) SELECT g, extract(day from g) +FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2010-01-01'::date, '1 month'::interval, + 12); + create_range_partitions +------------------------- + 12 +(1 row) + +VACUUM ANALYZE; +/* + * Test UPDATE and DELETE + */ +/* have partitions for this 'dt' */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Update on range_rel_6 + -> Seq Scan on range_rel_6 + Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) +(3 rows) + +BEGIN; +UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; + id | dt | value +-----+--------------------------+------- + 166 | Tue Jun 15 00:00:00 2010 | 111 +(1 row) + +ROLLBACK; +/* have partitions for this 'dt' */ +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt = '2010-06-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Delete on range_rel_6 + -> Seq Scan on range_rel_6 + Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) +(3 rows) + +BEGIN; +DELETE FROM test.range_rel WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* no partitions for this 'dt' */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 222 WHERE dt = '1990-01-01'; + QUERY PLAN +-------------------------------------------------------------------------------- + Update on range_rel + -> Seq Scan on range_rel + Filter: (dt = 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) +(3 rows) + +BEGIN; +UPDATE test.range_rel SET value = 111 WHERE dt = '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt = '1990-01-01'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* no partitions for this 'dt' */ +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt < '1990-01-01'; + QUERY PLAN +-------------------------------------------------------------------------------- + Delete on range_rel + -> Seq Scan on range_rel + Filter: (dt < 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) +(3 rows) + +BEGIN; +DELETE FROM test.range_rel WHERE dt < '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt < '1990-01-01'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* UPDATE + FROM, partitioned table */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = t.value +FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Update on range_rel_1 r + -> Nested Loop + Join Filter: (r.id = t.id) + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) + -> Seq Scan on tmp t +(6 rows) + +BEGIN; +UPDATE test.range_rel r SET value = t.value +FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; +/* UPDATE + FROM, single table */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = r.value +FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Update on tmp t + -> Nested Loop + -> Seq Scan on tmp t + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) +(6 rows) + +BEGIN; +UPDATE test.tmp t SET value = r.value +FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, partitioned table */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp t +WHERE r.dt = '2010-01-02' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Delete on range_rel_1 r + -> Nested Loop + Join Filter: (r.id = t.id) + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Seq Scan on tmp t +(6 rows) + +BEGIN; +DELETE FROM test.range_rel r USING test.tmp t +WHERE r.dt = '2010-01-02' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, single table */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp t USING test.range_rel r +WHERE r.dt = '2010-01-02' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Delete on tmp t + -> Nested Loop + -> Seq Scan on tmp t + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) +(6 rows) + +BEGIN; +DELETE FROM test.tmp t USING test.range_rel r +WHERE r.dt = '2010-01-02' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, two partitioned tables */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* DELETE + USING, partitioned table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* DELETE + USING, single table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; + QUERY PLAN +------------------------------------------------ + Delete on tmp r + -> Nested Loop + -> Nested Loop + -> Seq Scan on tmp r + -> Custom Scan (RuntimeAppend) + Prune by: (r.id = a1.id) + -> Seq Scan on tmp2_1 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_2 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_3 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_4 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_5 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_6 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_7 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_8 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_9 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_10 a1 + Filter: (r.id = id) + -> Custom Scan (RuntimeAppend) + Prune by: (a1.id = a2.id) + -> Seq Scan on tmp2_1 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_2 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_3 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_4 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_5 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_6 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_7 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_8 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_9 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_10 a2 + Filter: (a1.id = id) +(48 rows) + +BEGIN; +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ROLLBACK; +/* UPDATE + FROM, two partitioned tables */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* + * UPDATE + subquery with partitioned table (PG 9.5). + * See pathman_rel_pathlist_hook() + RELOPT_OTHER_MEMBER_REL. + */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = 2 +WHERE t.id IN (SELECT id + FROM test.tmp2 t2 + WHERE id = t.id); + QUERY PLAN +-------------------------------------------- + Update on tmp t + -> Seq Scan on tmp t + Filter: (SubPlan 1) + SubPlan 1 + -> Custom Scan (RuntimeAppend) + Prune by: (t2.id = t.id) + -> Seq Scan on tmp2_1 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_2 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_3 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_4 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_5 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_6 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_7 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_8 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_9 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_10 t2 + Filter: (id = t.id) +(26 rows) + +/* Test special rule for CTE; SELECT (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT * FROM test.range_rel r + WHERE r.dt = '2010-01-02') +DELETE FROM test.tmp USING q; + QUERY PLAN +-------------------------------------------------------------------------------------------- + Delete on tmp + -> Nested Loop + -> Seq Scan on tmp + -> Materialize + -> Seq Scan on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) +(6 rows) + +BEGIN; +WITH q AS (SELECT * FROM test.range_rel r + WHERE r.dt = '2010-01-02') +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; DELETE (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (DELETE FROM test.range_rel r + WHERE r.dt = '2010-01-02' + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------------------------------------------------------- + Delete on tmp + CTE q + -> Delete on range_rel_1 r + -> Seq Scan on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(8 rows) + +BEGIN; +WITH q AS (DELETE FROM test.range_rel r + WHERE r.dt = '2010-01-02' + RETURNING *) +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; DELETE + USING (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (DELETE FROM test.tmp t + USING test.range_rel r + WHERE r.dt = '2010-01-02' AND r.id = t.id + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------------------------------------------------------------- + Delete on tmp + CTE q + -> Delete on tmp t + -> Nested Loop + -> Seq Scan on tmp t + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(11 rows) + +BEGIN; +WITH q AS (DELETE FROM test.tmp t + USING test.range_rel r + WHERE r.dt = '2010-01-02' AND r.id = t.id + RETURNING *) +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; Nested CTEs (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (WITH n AS (SELECT id FROM test.tmp2 WHERE id = 2) + DELETE FROM test.tmp t + USING n + WHERE t.id = n.id + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------- + Delete on tmp + CTE q + -> Delete on tmp t + -> Nested Loop + -> Seq Scan on tmp t + Filter: (id = 2) + -> Seq Scan on tmp2_2 + Filter: (id = 2) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(11 rows) + +/* Test special rule for CTE; CTE in quals (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); + QUERY PLAN +-------------------------------------------------------------- + Delete on tmp t + -> Nested Loop Semi Join + -> Seq Scan on tmp t + -> Custom Scan (RuntimeAppend) + Prune by: ((tmp2.id < 3) AND (t.id = tmp2.id)) + -> Seq Scan on tmp2_1 tmp2 + Filter: (t.id = id) + -> Seq Scan on tmp2_2 tmp2 + Filter: (t.id = id) +(9 rows) + +BEGIN; +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); +ROLLBACK; +DROP SCHEMA test CASCADE; +NOTICE: drop cascades to 27 other objects +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman CASCADE; diff --git a/expected/pathman_views.out b/expected/pathman_views.out index 45423ef5..78589970 100644 --- a/expected/pathman_views.out +++ b/expected/pathman_views.out @@ -2,6 +2,9 @@ * ------------------------------------------- * NOTE: This test behaves differenly on 9.5 * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_views_2.out is the updated version. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_views_1.out b/expected/pathman_views_1.out index bead6de1..ea390d84 100644 --- a/expected/pathman_views_1.out +++ b/expected/pathman_views_1.out @@ -2,6 +2,9 @@ * ------------------------------------------- * NOTE: This test behaves differenly on 9.5 * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_views_2.out is the updated version. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_views_2.out b/expected/pathman_views_2.out new file mode 100644 index 00000000..15770ec0 --- /dev/null +++ b/expected/pathman_views_2.out @@ -0,0 +1,188 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_views_2.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA views; +/* create a partitioned table */ +create table views._abc(id int4 not null); +select create_hash_partitions('views._abc', 'id', 10); + create_hash_partitions +------------------------ + 10 +(1 row) + +insert into views._abc select generate_series(1, 100); +/* create a dummy table */ +create table views._abc_add (like views._abc); +vacuum analyze; +/* create a facade view */ +create view views.abc as select * from views._abc; +create or replace function views.disable_modification() +returns trigger as +$$ +BEGIN + RAISE EXCEPTION '%', TG_OP; + RETURN NULL; +END; +$$ +language 'plpgsql'; +create trigger abc_mod_tr +instead of insert or update or delete +on views.abc for each row +execute procedure views.disable_modification(); +/* Test SELECT */ +explain (costs off) select * from views.abc; + QUERY PLAN +-------------------------- + Append + -> Seq Scan on _abc_0 + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 +(11 rows) + +explain (costs off) select * from views.abc where id = 1; + QUERY PLAN +-------------------- + Seq Scan on _abc_0 + Filter: (id = 1) +(2 rows) + +explain (costs off) select * from views.abc where id = 1 for update; + QUERY PLAN +-------------------------- + LockRows + -> Seq Scan on _abc_0 + Filter: (id = 1) +(3 rows) + +select * from views.abc where id = 1 for update; + id +---- + 1 +(1 row) + +select count (*) from views.abc; + count +------- + 100 +(1 row) + +/* Test INSERT */ +explain (costs off) insert into views.abc values (1); + QUERY PLAN +--------------- + Insert on abc + -> Result +(2 rows) + +insert into views.abc values (1); +ERROR: INSERT +/* Test UPDATE */ +explain (costs off) update views.abc set id = 2 where id = 1 or id = 2; + QUERY PLAN +-------------------------------------- + Update on abc + -> Result + -> Append + -> Seq Scan on _abc_0 + Filter: (id = 1) + -> Seq Scan on _abc_6 + Filter: (id = 2) +(7 rows) + +update views.abc set id = 2 where id = 1 or id = 2; +ERROR: UPDATE +/* Test DELETE */ +explain (costs off) delete from views.abc where id = 1 or id = 2; + QUERY PLAN +-------------------------------------- + Delete on abc + -> Result + -> Append + -> Seq Scan on _abc_0 + Filter: (id = 1) + -> Seq Scan on _abc_6 + Filter: (id = 2) +(7 rows) + +delete from views.abc where id = 1 or id = 2; +ERROR: DELETE +/* Test SELECT with UNION */ +create view views.abc_union as table views._abc union table views._abc_add; +create view views.abc_union_all as table views._abc union all table views._abc_add; +explain (costs off) table views.abc_union; + QUERY PLAN +-------------------------------------- + HashAggregate + Group Key: _abc_0.id + -> Append + -> Append + -> Seq Scan on _abc_0 + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 + -> Seq Scan on _abc_add +(15 rows) + +explain (costs off) select * from views.abc_union where id = 5; + QUERY PLAN +---------------------------------- + HashAggregate + Group Key: _abc_8.id + -> Append + -> Seq Scan on _abc_8 + Filter: (id = 5) + -> Seq Scan on _abc_add + Filter: (id = 5) +(7 rows) + +explain (costs off) table views.abc_union_all; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on _abc_0 + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 + -> Seq Scan on _abc_add +(12 rows) + +explain (costs off) select * from views.abc_union_all where id = 5; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on _abc_8 + Filter: (id = 5) + -> Seq Scan on _abc_add + Filter: (id = 5) +(5 rows) + +DROP SCHEMA views CASCADE; +NOTICE: drop cascades to 16 other objects +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_array_qual.sql b/sql/pathman_array_qual.sql index 7ab15b6a..84327359 100644 --- a/sql/pathman_array_qual.sql +++ b/sql/pathman_array_qual.sql @@ -1,3 +1,8 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. + */ + \set VERBOSITY terse SET search_path = 'public'; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 8a97448e..a164d421 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -1,3 +1,9 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. Also, EXPLAIN now always shows key first in quals + * ('test commutator' queries). + */ + \set VERBOSITY terse SET search_path = 'public'; diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index 51827887..c380ea1d 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -1,3 +1,11 @@ +/* + * pathman_calamity.out and pathman_calamity_1.out differ only in that since + * 12 we get + * ERROR: invalid input syntax for type integer: "abc" + * instead of + * ERROR: invalid input syntax for integer: "15.6" + */ + \set VERBOSITY terse SET search_path = 'public'; CREATE EXTENSION pg_pathman; diff --git a/sql/pathman_cte.sql b/sql/pathman_cte.sql index 04af82f0..5a695cbb 100644 --- a/sql/pathman_cte.sql +++ b/sql/pathman_cte.sql @@ -1,15 +1,17 @@ +/* + * Test simple CTE queries. + * Since 12 (608b167f9f), CTEs which are scanned once are no longer an + * optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_cte_1.out instead. + */ + \set VERBOSITY terse SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA test_cte; - - -/* - * Test simple CTE queries - */ - CREATE TABLE test_cte.range_rel ( id INT4, dt TIMESTAMP NOT NULL, diff --git a/sql/pathman_expressions.sql b/sql/pathman_expressions.sql index 6149a0c2..ed05be79 100644 --- a/sql/pathman_expressions.sql +++ b/sql/pathman_expressions.sql @@ -3,6 +3,9 @@ * NOTE: This test behaves differenly on < 11 because planner now turns * Row(Const, Const) into just Const of record type, apparently since 3decd150 * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_expressions_2.out is the updated version. */ \set VERBOSITY terse diff --git a/sql/pathman_gaps.sql b/sql/pathman_gaps.sql index eb185ff2..55c9a16d 100644 --- a/sql/pathman_gaps.sql +++ b/sql/pathman_gaps.sql @@ -1,3 +1,7 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/sql/pathman_join_clause.sql b/sql/pathman_join_clause.sql index c578d361..3a0a655f 100644 --- a/sql/pathman_join_clause.sql +++ b/sql/pathman_join_clause.sql @@ -1,3 +1,7 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ \set VERBOSITY terse SET search_path = 'public'; CREATE SCHEMA pathman; @@ -105,4 +109,3 @@ WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); DROP SCHEMA test CASCADE; DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman CASCADE; - diff --git a/sql/pathman_only.sql b/sql/pathman_only.sql index e2813ea6..6e34a9c1 100644 --- a/sql/pathman_only.sql +++ b/sql/pathman_only.sql @@ -2,6 +2,11 @@ * --------------------------------------------- * NOTE: This test behaves differenly on PgPro * --------------------------------------------- + * + * Since 12 (608b167f9f), CTEs which are scanned once are no longer an + * optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_only_1.out instead. */ \set VERBOSITY terse diff --git a/sql/pathman_rowmarks.sql b/sql/pathman_rowmarks.sql index aa365544..f1ac0fe9 100644 --- a/sql/pathman_rowmarks.sql +++ b/sql/pathman_rowmarks.sql @@ -2,6 +2,9 @@ * ------------------------------------------- * NOTE: This test behaves differenly on 9.5 * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_rowmarks_2.out is the updated version. */ SET search_path = 'public'; CREATE EXTENSION pg_pathman; diff --git a/sql/pathman_subpartitions.sql b/sql/pathman_subpartitions.sql index 05ac9614..5aaea49a 100644 --- a/sql/pathman_subpartitions.sql +++ b/sql/pathman_subpartitions.sql @@ -1,3 +1,8 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_subpartitions_1.out is the updated version. + */ + \set VERBOSITY terse CREATE EXTENSION pg_pathman; diff --git a/sql/pathman_upd_del.sql b/sql/pathman_upd_del.sql index adca1e4c..a6cab581 100644 --- a/sql/pathman_upd_del.sql +++ b/sql/pathman_upd_del.sql @@ -2,6 +2,13 @@ * ------------------------------------------- * NOTE: This test behaves differenly on 9.5 * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. Moreover, again since 12 (608b167f9f), CTEs which are + * scanned once are no longer an optimization fence, changing a good deal of + * plans here. There is an option to forcibly make them MATERIALIZED, but we + * also need to run tests on older versions, so put updated plans in + * pathman_upd_del_2.out instead. */ \set VERBOSITY terse diff --git a/sql/pathman_views.sql b/sql/pathman_views.sql index 9f386a3d..65e64149 100644 --- a/sql/pathman_views.sql +++ b/sql/pathman_views.sql @@ -2,6 +2,9 @@ * ------------------------------------------- * NOTE: This test behaves differenly on 9.5 * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_views_2.out is the updated version. */ \set VERBOSITY terse diff --git a/src/compat/pg_compat.c b/src/compat/pg_compat.c index 4bc021fd..abf71f9d 100644 --- a/src/compat/pg_compat.c +++ b/src/compat/pg_compat.c @@ -145,8 +145,13 @@ get_all_actual_clauses(List *restrictinfo_list) * make_restrictinfos_from_actual_clauses */ #if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 120000 +#include "optimizer/optimizer.h" +#include "optimizer/restrictinfo.h" +#else #include "optimizer/restrictinfo.h" #include "optimizer/var.h" +#endif /* 12 */ List * make_restrictinfos_from_actual_clauses(PlannerInfo *root, @@ -462,6 +467,13 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, */ return; #endif + +#if PG_VERSION_NUM >= 120000 + case RTE_RESULT: + /* RESULT RTEs, in themselves, are no problem. */ + break; +#endif /* 12 */ + } /* diff --git a/src/hooks.c b/src/hooks.c index fcaab6df..12c053b2 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -13,6 +13,10 @@ #include "compat/pg_compat.h" #include "compat/rowmarks_fix.h" +#if PG_VERSION_NUM >= 120000 +#include "access/table.h" +#endif + #include "declarative.h" #include "hooks.h" #include "init.h" diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 145b2113..26931fd9 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -26,8 +26,15 @@ #include "commands/trigger.h" #include "executor/executor.h" #include "nodes/memnodes.h" +#if PG_VERSION_NUM >= 120000 +#include "nodes/pathnodes.h" +#else #include "nodes/relation.h" +#endif #include "nodes/pg_list.h" +#if PG_VERSION_NUM >= 120000 +#include "optimizer/appendinfo.h" +#endif #include "optimizer/cost.h" #include "optimizer/paths.h" #include "optimizer/pathnode.h" @@ -232,7 +239,20 @@ /* * create_append_path() */ -#if PG_VERSION_NUM >= 110000 +#if PG_VERSION_NUM >= 120000 + +#ifndef PGPRO_VERSION +#define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ + create_append_path(NULL, (rel), (subpaths), NIL, NIL, (required_outer), \ + (parallel_workers), false, NIL, -1) +#else +/* TODO pgpro version */ +#define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ + create_append_path(NULL, (rel), (subpaths), NIL, (required_outer), \ + (parallel_workers), false, NIL, -1, false, NIL) +#endif /* PGPRO_VERSION */ + +#elif PG_VERSION_NUM >= 110000 #ifndef PGPRO_VERSION #define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ @@ -794,11 +814,14 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, /* * MakeTupleTableSlot() */ -#if PG_VERSION_NUM >= 110000 -#define MakeTupleTableSlotCompat() \ +#if PG_VERSION_NUM >= 120000 +#define MakeTupleTableSlotCompat(tts_ops) \ + MakeTupleTableSlot(NULL, (tts_ops)) +#elif PG_VERSION_NUM >= 110000 +#define MakeTupleTableSlotCompat(tts_ops) \ MakeTupleTableSlot(NULL) #else -#define MakeTupleTableSlotCompat() \ +#define MakeTupleTableSlotCompat(tts_ops) \ MakeTupleTableSlot() #endif @@ -877,14 +900,113 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, # define HeapTupleGetXminCompat(htup) HeapTupleHeaderGetXmin((htup)->t_data) #endif +/* + * is_andclause + */ +#if PG_VERSION_NUM >= 120000 +#define is_andclause_compat(clause) is_andclause(clause) +#else +#define is_andclause_compat(clause) and_clause(clause) +#endif + +/* + * GetDefaultTablespace + */ +#if PG_VERSION_NUM >= 120000 +#define GetDefaultTablespaceCompat(relpersistence, partitioned) \ + GetDefaultTablespace((relpersistence), (partitioned)) +#else +#define GetDefaultTablespaceCompat(relpersistence, partitioned) \ + GetDefaultTablespace((relpersistence)) +#endif + +/* + * CreateTemplateTupleDesc + */ +#if PG_VERSION_NUM >= 120000 +#define CreateTemplateTupleDescCompat(natts, hasoid) CreateTemplateTupleDesc(natts) +#else +#define CreateTemplateTupleDescCompat(natts, hasoid) CreateTemplateTupleDesc((natts), (hasoid)) +#endif + +/* + * addRangeTableEntryForRelation + */ +#if PG_VERSION_NUM >= 120000 +#define addRangeTableEntryForRelationCompat(pstate, rel, lockmode, alias, inh, inFromCl) \ + addRangeTableEntryForRelation((pstate), (rel), (lockmode), (alias), (inh), (inFromCl)) +#else +#define addRangeTableEntryForRelationCompat(pstate, rel, lockmode, alias, inh, inFromCl) \ + addRangeTableEntryForRelation((pstate), (rel), (alias), (inh), (inFromCl)) +#endif + +/* + * nextCopyFrom (WITH_OIDS removed) + */ +#if PG_VERSION_NUM >= 120000 +#define NextCopyFromCompat(cstate, econtext, values, nulls, tupleOid) \ + NextCopyFrom((cstate), (econtext), (values), (nulls)) +#else +#define NextCopyFromCompat(cstate, econtext, values, nulls, tupleOid) \ + NextCopyFrom((cstate), (econtext), (values), (nulls), (tupleOid)) +#endif + +/* + * ExecInsertIndexTuples. Since 12 slot contains tupleid. + */ +#if PG_VERSION_NUM >= 120000 +#define ExecInsertIndexTuplesCompat(slot, tupleid, estate, noDupError, specConflict, arbiterIndexes) \ + ExecInsertIndexTuples((slot), (estate), (noDupError), (specConflict), (arbiterIndexes)) +#else +#define ExecInsertIndexTuplesCompat(slot, tupleid, estate, noDupError, specConflict, arbiterIndexes) \ + ExecInsertIndexTuples((slot), (tupleid), (estate), (noDupError), (specConflict), (arbiterIndexes)) +#endif + +/* + * RenameRelationInternal + */ +#if PG_VERSION_NUM >= 120000 +#define RenameRelationInternalCompat(myrelid, newname, is_internal, is_index) \ + RenameRelationInternal((myrelid), (newname), (is_internal), (is_index)) +#else +#define RenameRelationInternalCompat(myrelid, newname, is_internal, is_index) \ + RenameRelationInternal((myrelid), (newname), (is_internal)) +#endif + +/* + * getrelid + */ +#if PG_VERSION_NUM >= 120000 +#define getrelid(rangeindex,rangetable) \ + (rt_fetch(rangeindex, rangetable)->relid) +#endif + +/* + * AddRelationNewConstraints + */ +#if PG_VERSION_NUM >= 120000 +#define AddRelationNewConstraintsCompat(rel, newColDefaults, newConstrains, allow_merge, is_local, is_internal) \ + AddRelationNewConstraints((rel), (newColDefaults), (newConstrains), (allow_merge), (is_local), (is_internal), NULL) +#else +#define AddRelationNewConstraintsCompat(rel, newColDefaults, newConstrains, allow_merge, is_local, is_internal) \ + AddRelationNewConstraints((rel), (newColDefaults), (newConstrains), (allow_merge), (is_local), (is_internal)) +#endif + + /* * ------------- * Common code * ------------- */ +#if PG_VERSION_NUM >= 120000 +#define ExecInitExtraTupleSlotCompat(estate, tdesc, tts_ops) \ + ExecInitExtraTupleSlot((estate), (tdesc), (tts_ops)); +#else +#define ExecInitExtraTupleSlotCompat(estate, tdesc, tts_ops) \ + ExecInitExtraTupleSlotCompatHorse((estate), (tdesc)) static inline TupleTableSlot * -ExecInitExtraTupleSlotCompat(EState *s, TupleDesc t) +ExecInitExtraTupleSlotCompatHorse(EState *s, TupleDesc t) { #if PG_VERSION_NUM >= 110000 return ExecInitExtraTupleSlot(s,t); @@ -896,6 +1018,7 @@ ExecInitExtraTupleSlotCompat(EState *s, TupleDesc t) return res; #endif } +#endif /* See ExecEvalParamExtern() */ static inline ParamExternData * diff --git a/src/include/compat/rowmarks_fix.h b/src/include/compat/rowmarks_fix.h index 4875358e..09e5fbef 100644 --- a/src/include/compat/rowmarks_fix.h +++ b/src/include/compat/rowmarks_fix.h @@ -17,7 +17,11 @@ #include "postgres.h" #include "nodes/parsenodes.h" #include "nodes/plannodes.h" +#if PG_VERSION_NUM < 120000 #include "nodes/relation.h" +#else +#include "optimizer/optimizer.h" +#endif #if PG_VERSION_NUM >= 90600 diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index bf03433c..0b32e575 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -90,7 +90,6 @@ struct ResultPartsStorage bool close_relations; LOCKMODE head_open_lock_mode; - LOCKMODE heap_close_lock_mode; PartRelationInfo *prel; ExprState *prel_expr_state; diff --git a/src/include/planner_tree_modification.h b/src/include/planner_tree_modification.h index 4e33ca34..edca73a0 100644 --- a/src/include/planner_tree_modification.h +++ b/src/include/planner_tree_modification.h @@ -16,7 +16,7 @@ #include "postgres.h" #include "utils/rel.h" -#include "nodes/relation.h" +/* #include "nodes/relation.h" */ #include "nodes/nodeFuncs.h" diff --git a/src/init.c b/src/init.c index 92d2d213..bd85c593 100644 --- a/src/init.c +++ b/src/init.c @@ -21,12 +21,20 @@ #include "utils.h" #include "access/htup_details.h" +#include "access/heapam.h" +#include "access/genam.h" #include "access/sysattr.h" +#if PG_VERSION_NUM >= 120000 +#include "access/table.h" +#endif #include "catalog/indexing.h" #include "catalog/pg_extension.h" #include "catalog/pg_inherits.h" #include "catalog/pg_type.h" #include "miscadmin.h" +#if PG_VERSION_NUM >= 120000 +#include "nodes/nodeFuncs.h" +#endif #include "optimizer/clauses.h" #include "utils/inval.h" #include "utils/builtins.h" @@ -631,7 +639,11 @@ pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, TransactionId *xmin, ItemPointerData* iptr) { Relation rel; +#if PG_VERSION_NUM >= 120000 + TableScanDesc scan; +#else HeapScanDesc scan; +#endif ScanKeyData key[1]; Snapshot snapshot; HeapTuple htup; @@ -653,7 +665,11 @@ pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, Assert(RelationGetDescr(rel)->natts == Natts_pathman_config); snapshot = RegisterSnapshot(GetLatestSnapshot()); +#if PG_VERSION_NUM >= 120000 + scan = table_beginscan(rel, snapshot, 1, key); +#else scan = heap_beginscan(rel, snapshot, 1, key); +#endif while ((htup = heap_getnext(scan, ForwardScanDirection)) != NULL) { @@ -681,7 +697,11 @@ pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, } /* Clean resources */ +#if PG_VERSION_NUM >= 120000 + table_endscan(scan); +#else heap_endscan(scan); +#endif UnregisterSnapshot(snapshot); heap_close(rel, AccessShareLock); @@ -699,7 +719,11 @@ bool read_pathman_params(Oid relid, Datum *values, bool *isnull) { Relation rel; +#if PG_VERSION_NUM >= 120000 + TableScanDesc scan; +#else HeapScanDesc scan; +#endif ScanKeyData key[1]; Snapshot snapshot; HeapTuple htup; @@ -712,7 +736,11 @@ read_pathman_params(Oid relid, Datum *values, bool *isnull) rel = heap_open(get_pathman_config_params_relid(false), AccessShareLock); snapshot = RegisterSnapshot(GetLatestSnapshot()); +#if PG_VERSION_NUM >= 120000 + scan = table_beginscan(rel, snapshot, 1, key); +#else scan = heap_beginscan(rel, snapshot, 1, key); +#endif /* There should be just 1 row */ if ((htup = heap_getnext(scan, ForwardScanDirection)) != NULL) @@ -730,7 +758,11 @@ read_pathman_params(Oid relid, Datum *values, bool *isnull) } /* Clean resources */ +#if PG_VERSION_NUM >= 120000 + table_endscan(scan); +#else heap_endscan(scan); +#endif UnregisterSnapshot(snapshot); heap_close(rel, AccessShareLock); @@ -764,7 +796,7 @@ validate_range_constraint(const Expr *expr, tce = lookup_type_cache(prel->ev_type, TYPECACHE_BTREE_OPFAMILY); /* Is it an AND clause? */ - if (and_clause((Node *) expr)) + if (is_andclause_compat((Node *) expr)) { const BoolExpr *boolexpr = (const BoolExpr *) expr; ListCell *lc; diff --git a/src/nodes_common.c b/src/nodes_common.c index 5f0c0c14..8adf81dd 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -15,9 +15,13 @@ #include "utils.h" #include "nodes/nodeFuncs.h" +#if PG_VERSION_NUM >= 120000 +#include "optimizer/optimizer.h" +#else #include "optimizer/clauses.h" -#include "optimizer/tlist.h" #include "optimizer/var.h" +#endif +#include "optimizer/tlist.h" #include "rewrite/rewriteManip.h" #include "utils/memutils.h" #include "utils/ruleutils.h" @@ -689,11 +693,25 @@ exec_append_common(CustomScanState *node, return NULL; if (!node->ss.ps.ps_ProjInfo) + { + /* + * ExecInitCustomScan carelessly promises that it will always (resultopsfixed) + * return TTSOpsVirtual slot. To keep the promise, convert raw + * BufferHeapTupleSlot to virtual even if we don't have any projection. + * + * BTW, why original code decided to invent its own scan_state->slot + * instead of using ss.ss_ScanTupleSlot? + */ +#if PG_VERSION_NUM >= 120000 + return ExecCopySlot(node->ss.ps.ps_ResultTupleSlot, scan_state->slot); +#else return scan_state->slot; +#endif + } /* * Assuming that current projection doesn't involve SRF. - * NOTE: Any SFR functions are evaluated in ProjectSet node. + * NOTE: Any SFR functions since 69f4b9c are evaluated in ProjectSet node. */ ResetExprContext(node->ss.ps.ps_ExprContext); node->ss.ps.ps_ProjInfo->pi_exprContext->ecxt_scantuple = scan_state->slot; diff --git a/src/partition_creation.c b/src/partition_creation.c index bea41379..e162e99e 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -19,6 +19,9 @@ #include "access/htup_details.h" #include "access/reloptions.h" #include "access/sysattr.h" +#if PG_VERSION_NUM >= 120000 +#include "access/table.h" +#endif #include "access/xact.h" #include "catalog/heap.h" #include "catalog/pg_authid.h" @@ -245,9 +248,9 @@ create_single_partition_common(Oid parent_relid, /* Open the relation and add new check constraint & fkeys */ child_relation = heap_open(partition_relid, AccessExclusiveLock); - AddRelationNewConstraints(child_relation, NIL, - list_make1(check_constraint), - false, true, true); + AddRelationNewConstraintsCompat(child_relation, NIL, + list_make1(check_constraint), + false, true, true); heap_close(child_relation, NoLock); /* Make constraint visible */ @@ -809,6 +812,9 @@ create_single_partition_internal(Oid parent_relid, #if defined(PGPRO_EE) && PG_VERSION_NUM < 100000 create_stmt.partition_info = NULL; #endif +#if PG_VERSION_NUM >= 120000 + create_stmt.accessMethod = NULL; +#endif /* Obtain the sequence of Stmts to create partition and link it to parent */ create_stmts = transformCreateStmt(&create_stmt, NULL); @@ -986,7 +992,11 @@ postprocess_child_table_and_atts(Oid parent_relid, Oid partition_relid) /* Search for 'partition_relid' */ ScanKeyInit(&skey[0], +#if PG_VERSION_NUM >= 120000 + Anum_pg_class_oid, +#else ObjectIdAttributeNumber, +#endif BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(partition_relid)); @@ -1135,7 +1145,12 @@ copy_foreign_keys(Oid parent_relid, Oid partition_oid) Oid copy_fkeys_proc_args[] = { REGCLASSOID, REGCLASSOID }; List *copy_fkeys_proc_name; FmgrInfo copy_fkeys_proc_flinfo; - FunctionCallInfoData copy_fkeys_proc_fcinfo; +#if PG_VERSION_NUM >= 120000 + LOCAL_FCINFO(copy_fkeys_proc_fcinfo, 2); +#else + FunctionCallInfoData copy_fkeys_proc_fcinfo_data; + FunctionCallInfo copy_fkeys_proc_fcinfo = ©_fkeys_proc_fcinfo_data; +#endif char *pathman_schema; /* Fetch pg_pathman's schema */ @@ -1150,15 +1165,22 @@ copy_foreign_keys(Oid parent_relid, Oid partition_oid) copy_fkeys_proc_args, false), ©_fkeys_proc_flinfo); - InitFunctionCallInfoData(copy_fkeys_proc_fcinfo, ©_fkeys_proc_flinfo, + InitFunctionCallInfoData(*copy_fkeys_proc_fcinfo, ©_fkeys_proc_flinfo, 2, InvalidOid, NULL, NULL); - copy_fkeys_proc_fcinfo.arg[0] = ObjectIdGetDatum(parent_relid); - copy_fkeys_proc_fcinfo.argnull[0] = false; - copy_fkeys_proc_fcinfo.arg[1] = ObjectIdGetDatum(partition_oid); - copy_fkeys_proc_fcinfo.argnull[1] = false; +#if PG_VERSION_NUM >= 120000 + copy_fkeys_proc_fcinfo->args[0].value = ObjectIdGetDatum(parent_relid); + copy_fkeys_proc_fcinfo->args[0].isnull = false; + copy_fkeys_proc_fcinfo->args[1].value = ObjectIdGetDatum(partition_oid); + copy_fkeys_proc_fcinfo->args[1].isnull = false; +#else + copy_fkeys_proc_fcinfo->arg[0] = ObjectIdGetDatum(parent_relid); + copy_fkeys_proc_fcinfo->argnull[0] = false; + copy_fkeys_proc_fcinfo->arg[1] = ObjectIdGetDatum(partition_oid); + copy_fkeys_proc_fcinfo->argnull[1] = false; +#endif /* Invoke the callback */ - FunctionCallInvoke(©_fkeys_proc_fcinfo); + FunctionCallInvoke(copy_fkeys_proc_fcinfo); /* Make changes visible */ CommandCounterIncrement(); @@ -1266,9 +1288,9 @@ add_pathman_check_constraint(Oid relid, Constraint *constraint) { Relation part_rel = heap_open(relid, AccessExclusiveLock); - AddRelationNewConstraints(part_rel, NIL, - list_make1(constraint), - false, true, true); + AddRelationNewConstraintsCompat(part_rel, NIL, + list_make1(constraint), + false, true, true); heap_close(part_rel, NoLock); } @@ -1629,7 +1651,12 @@ invoke_init_callback_internal(init_callback_params *cb_params) Oid partition_oid = cb_params->partition_relid; FmgrInfo cb_flinfo; - FunctionCallInfoData cb_fcinfo; +#if PG_VERSION_NUM >= 120000 + LOCAL_FCINFO(cb_fcinfo, 1); +#else + FunctionCallInfoData cb_fcinfo_data; + FunctionCallInfo cb_fcinfo = &cb_fcinfo_data; +#endif JsonbParseState *jsonb_state = NULL; JsonbValue *result, @@ -1761,12 +1788,17 @@ invoke_init_callback_internal(init_callback_params *cb_params) /* Fetch function call data */ fmgr_info(cb_params->callback, &cb_flinfo); - InitFunctionCallInfoData(cb_fcinfo, &cb_flinfo, 1, InvalidOid, NULL, NULL); - cb_fcinfo.arg[0] = PointerGetDatum(JsonbValueToJsonb(result)); - cb_fcinfo.argnull[0] = false; + InitFunctionCallInfoData(*cb_fcinfo, &cb_flinfo, 1, InvalidOid, NULL, NULL); +#if PG_VERSION_NUM >= 120000 + cb_fcinfo->args[0].value = PointerGetDatum(JsonbValueToJsonb(result)); + cb_fcinfo->args[0].isnull = false; +#else + cb_fcinfo->arg[0] = PointerGetDatum(JsonbValueToJsonb(result)); + cb_fcinfo->argnull[0] = false; +#endif /* Invoke the callback */ - FunctionCallInvoke(&cb_fcinfo); + FunctionCallInvoke(cb_fcinfo); } /* Invoke a callback of a specified type */ @@ -1830,19 +1862,28 @@ validate_part_callback(Oid procid, bool emit_error) static Oid text_to_regprocedure(text *proc_signature) { - FunctionCallInfoData fcinfo; +#if PG_VERSION_NUM >= 120000 + LOCAL_FCINFO(fcinfo, 1); +#else + FunctionCallInfoData fcinfo_data; + FunctionCallInfo fcinfo = &fcinfo_data; +#endif Datum result; - InitFunctionCallInfoData(fcinfo, NULL, 1, InvalidOid, NULL, NULL); + InitFunctionCallInfoData(*fcinfo, NULL, 1, InvalidOid, NULL, NULL); -#if PG_VERSION_NUM >= 90600 - fcinfo.arg[0] = PointerGetDatum(proc_signature); +#if PG_VERSION_NUM >= 120000 + fcinfo->args[0].value = PointerGetDatum(proc_signature); + fcinfo->args[0].isnull = false; +#elif PG_VERSION_NUM >= 90600 + fcinfo->arg[0] = PointerGetDatum(proc_signature); + fcinfo->argnull[0] = false; #else - fcinfo.arg[0] = CStringGetDatum(text_to_cstring(proc_signature)); + fcinfo->arg[0] = CStringGetDatum(text_to_cstring(proc_signature)); + fcinfo->argnull[0] = false; #endif - fcinfo.argnull[0] = false; - result = to_regprocedure(&fcinfo); + result = to_regprocedure(fcinfo); return DatumGetObjectId(result); } diff --git a/src/partition_filter.c b/src/partition_filter.c index f905470e..a923c650 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -17,6 +17,9 @@ #include "utils.h" #include "access/htup_details.h" +#if PG_VERSION_NUM >= 120000 +#include "access/table.h" +#endif #include "access/xact.h" #include "catalog/pg_class.h" #include "catalog/pg_type.h" @@ -86,7 +89,7 @@ static void prepare_rri_fdw_for_insert(ResultRelInfoHolder *rri_holder, static Node *fix_returning_list_mutator(Node *node, void *state); -static Index append_rte_to_estate(EState *estate, RangeTblEntry *rte); +static Index append_rte_to_estate(EState *estate, RangeTblEntry *rte, Relation child_rel); static int append_rri_to_estate(EState *estate, ResultRelInfo *rri); static void pf_memcxt_callback(void *arg); @@ -182,10 +185,12 @@ init_result_parts_storage(ResultPartsStorage *parts_storage, parts_storage->command_type = cmd_type; parts_storage->speculative_inserts = speculative_inserts; - /* Should partitions be locked till transaction's end? */ + /* + * Should ResultPartsStorage do ExecCloseIndices and heap_close on + * finalization? + */ parts_storage->close_relations = close_relations; parts_storage->head_open_lock_mode = RowExclusiveLock; - parts_storage->heap_close_lock_mode = NoLock; /* Fetch PartRelationInfo for this partitioned relation */ parts_storage->prel = get_pathman_relation_info(parent_relid); @@ -214,13 +219,22 @@ fini_result_parts_storage(ResultPartsStorage *parts_storage) if (parts_storage->fini_rri_holder_cb) parts_storage->fini_rri_holder_cb(rri_holder, parts_storage); - /* Close partitions and indices */ + /* + * Close indices, unless ExecEndPlan won't do that for us (this is + * is CopyFrom which misses it, not usual executor run, essentially). + * Otherwise, it is always automaticaly closed; in <= 11, relcache + * refs of rris managed heap_open/close on their own, and ExecEndPlan + * closed them directly. Since 9ddef3, relcache management + * of executor was centralized; now rri refs are copies of ones in + * estate->es_relations, which are closed in ExecEndPlan. + * So we push our rel there, and it is also automatically closed. + */ if (parts_storage->close_relations) { ExecCloseIndices(rri_holder->result_rel_info); - + /* And relation itself */ heap_close(rri_holder->result_rel_info->ri_RelationDesc, - parts_storage->heap_close_lock_mode); + NoLock); } /* Free conversion-related stuff */ @@ -315,7 +329,7 @@ scan_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid) ExecCheckRTPerms(list_make1(child_rte), true); /* Append RangeTblEntry to estate->es_range_table */ - child_rte_idx = append_rte_to_estate(parts_storage->estate, child_rte); + child_rte_idx = append_rte_to_estate(parts_storage->estate, child_rte, child_rel); /* Create ResultRelInfo for partition */ child_result_rel_info = makeNode(ResultRelInfo); @@ -355,7 +369,12 @@ scan_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid) rri_holder->partid = partid; rri_holder->result_rel_info = child_result_rel_info; - /* Generate tuple transformation map and some other stuff */ + /* + * Generate parent->child tuple transformation map. We need to + * convert tuples because e.g. parent's TupleDesc might have dropped + * columns which child doesn't have at all because it was created after + * the drop. + */ rri_holder->tuple_map = build_part_tuple_map(base_rel, child_rel); /* Default values */ @@ -760,21 +779,35 @@ partition_filter_exec(CustomScanState *node) /* If there's a transform map, rebuild the tuple */ if (rri_holder->tuple_map) { + Relation child_rel = rri->ri_RelationDesc; + + /* xxx why old code decided to materialize it? */ +#if PG_VERSION_NUM < 120000 HeapTuple htup_old, htup_new; - Relation child_rel = rri->ri_RelationDesc; htup_old = ExecMaterializeSlot(slot); htup_new = do_convert_tuple(htup_old, rri_holder->tuple_map); ExecClearTuple(slot); +#endif - /* Allocate new slot if needed */ + /* + * Allocate new slot if needed. + * For 12, it is sort of important to create BufferHeapTuple, + * though we will store virtual one there. Otherwise, ModifyTable + * decides to copy it to mt_scans slot which has tupledesc of + * parent. + */ if (!state->tup_convert_slot) - state->tup_convert_slot = MakeTupleTableSlotCompat(); + state->tup_convert_slot = MakeTupleTableSlotCompat(&TTSOpsBufferHeapTuple); /* TODO: why should we *always* set a new slot descriptor? */ ExecSetSlotDescriptor(state->tup_convert_slot, RelationGetDescr(child_rel)); +#if PG_VERSION_NUM >= 120000 + slot = execute_attr_map_slot(rri_holder->tuple_map->attrMap, slot, state->tup_convert_slot); +#else slot = ExecStoreTuple(htup_new, state->tup_convert_slot, InvalidBuffer, true); +#endif } return slot; @@ -1143,7 +1176,7 @@ fix_returning_list_mutator(Node *node, void *state) /* Append RangeTblEntry 'rte' to estate->es_range_table */ static Index -append_rte_to_estate(EState *estate, RangeTblEntry *rte) +append_rte_to_estate(EState *estate, RangeTblEntry *rte, Relation child_rel) { estate_mod_data *emd_struct = fetch_estate_mod_data(estate); @@ -1156,6 +1189,28 @@ append_rte_to_estate(EState *estate, RangeTblEntry *rte) /* Update estate_mod_data */ emd_struct->estate_not_modified = false; + /* + * On PG >= 12, also add rte to es_range_table_array. This is horribly + * inefficient, yes. + * At least in 12 es_range_table_array ptr is not saved anywhere in + * core, so it is safe to repalloc. + */ +#if PG_VERSION_NUM >= 120000 + estate->es_range_table_size = list_length(estate->es_range_table); + estate->es_range_table_array = (RangeTblEntry **) + repalloc(estate->es_range_table_array, + estate->es_range_table_size * sizeof(RangeTblEntry *)); + estate->es_range_table_array[estate->es_range_table_size - 1] = rte; + + /* + * Also reallocate es_relations, because es_range_table_size defines its + * len. This also ensures ExecEndPlan will close the rel. + */ + estate->es_relations = (Relation *) + repalloc(estate->es_relations, estate->es_range_table_size * sizeof(Relation)); + estate->es_relations[estate->es_range_table_size - 1] = child_rel; +#endif + return list_length(estate->es_range_table); } diff --git a/src/partition_router.c b/src/partition_router.c index 82578c5d..8c3bac55 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -14,12 +14,23 @@ #include "partition_router.h" #include "compat/pg_compat.h" +#if PG_VERSION_NUM >= 120000 +#include "access/table.h" +#include "access/tableam.h" +#endif #include "access/xact.h" +#if PG_VERSION_NUM >= 120000 +#include "access/heapam.h" /* direct heap_delete, no-no */ +#endif #include "access/htup_details.h" #include "catalog/pg_class.h" #include "commands/trigger.h" #include "executor/nodeModifyTable.h" #include "foreign/fdwapi.h" +#if PG_VERSION_NUM >= 120000 +#include "nodes/makefuncs.h" /* make_ands_explicit */ +#include "optimizer/optimizer.h" +#endif #include "optimizer/clauses.h" #include "storage/bufmgr.h" #include "utils/guc.h" @@ -272,7 +283,8 @@ router_set_slot(PartitionRouterState *state, /* Don't forget to set saved_slot! */ state->yielded_slot = ExecInitExtraTupleSlotCompat(mt_state->ps.state, - slot->tts_tupleDescriptor); + slot->tts_tupleDescriptor, + &TTSOpsHeapTuple); ExecCopySlot(state->yielded_slot, slot); } @@ -394,8 +406,15 @@ router_lock_or_delete_tuple(PartitionRouterState *state, ExprContext *econtext = GetPerTupleExprContext(estate); ExprState *constraint = state->constraint; - HeapUpdateFailureData hufd; + /* Maintaining both >= 12 and earlier is quite horrible there, you know */ +#if PG_VERSION_NUM >= 120000 + TM_FailureData tmfd; + TM_Result result; +#else + HeapUpdateFailureData tmfd; HTSU_Result result; +#endif + EPQState *epqstate = &state->epqstate; LOCKMODE lockmode; @@ -422,9 +441,14 @@ router_lock_or_delete_tuple(PartitionRouterState *state, if (rri->ri_TrigDesc && rri->ri_TrigDesc->trig_update_before_row) { +#if PG_VERSION_NUM >= 120000 + if (!ExecBRUpdateTriggers(estate, epqstate, rri, tupleid, NULL, slot)) + return NULL; +#else slot = ExecBRUpdateTriggers(estate, epqstate, rri, tupleid, NULL, slot); if (TupIsNull(slot)) return NULL; +#endif } /* BEFORE ROW DELETE triggers */ @@ -439,7 +463,7 @@ router_lock_or_delete_tuple(PartitionRouterState *state, result = heap_delete_compat(rel, tupleid, estate->es_output_cid, estate->es_crosscheck_snapshot, - true /* wait for commit */, &hufd, + true /* wait for commit */, &tmfd, true /* changing partition */); } else @@ -448,10 +472,11 @@ router_lock_or_delete_tuple(PartitionRouterState *state, Buffer buffer; tuple.t_self = *tupleid; + /* xxx why we ever need this? */ result = heap_lock_tuple(rel, &tuple, estate->es_output_cid, lockmode, LockWaitBlock, - false, &buffer, &hufd); + false, &buffer, &tmfd); ReleaseBuffer(buffer); } @@ -459,8 +484,12 @@ router_lock_or_delete_tuple(PartitionRouterState *state, /* Check lock/delete status */ switch (result) { +#if PG_VERSION_NUM >= 120000 + case TM_SelfModified: +#else case HeapTupleSelfUpdated: - if (hufd.cmax != estate->es_output_cid) +#endif + if (tmfd.cmax != estate->es_output_cid) ereport(ERROR, (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION), errmsg("tuple to be updated was already modified by an operation triggered by the current command"), @@ -469,20 +498,121 @@ router_lock_or_delete_tuple(PartitionRouterState *state, /* Already deleted by self; nothing to do */ return NULL; +#if PG_VERSION_NUM >= 120000 + case TM_Ok: +#else case HeapTupleMayBeUpdated: +#endif break; +#if PG_VERSION_NUM >= 120000 /* TM_Deleted/TM_Updated */ + case TM_Updated: + { + /* not sure this stuff is correct at all */ + TupleTableSlot *inputslot; + TupleTableSlot *epqslot; + + if (IsolationUsesXactSnapshot()) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("could not serialize access due to concurrent update"))); + + /* + * Already know that we're going to need to do EPQ, so + * fetch tuple directly into the right slot. + */ + inputslot = EvalPlanQualSlot(epqstate, rel, rri->ri_RangeTableIndex); + + result = table_tuple_lock(rel, tupleid, + estate->es_snapshot, + inputslot, estate->es_output_cid, + LockTupleExclusive, LockWaitBlock, + TUPLE_LOCK_FLAG_FIND_LAST_VERSION, + &tmfd); + + switch (result) + { + case TM_Ok: + Assert(tmfd.traversed); + epqslot = EvalPlanQual(epqstate, + rel, + rri->ri_RangeTableIndex, + inputslot); + if (TupIsNull(epqslot)) + /* Tuple not passing quals anymore, exiting... */ + return NULL; + + /* just copied from below, ha */ + *tupleid = tmfd.ctid; + slot = epqslot; + goto recheck; + + case TM_SelfModified: + + /* + * This can be reached when following an update + * chain from a tuple updated by another session, + * reaching a tuple that was already updated in + * this transaction. If previously updated by this + * command, ignore the delete, otherwise error + * out. + * + * See also TM_SelfModified response to + * table_tuple_delete() above. + */ + if (tmfd.cmax != estate->es_output_cid) + ereport(ERROR, + (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION), + errmsg("tuple to be deleted was already modified by an operation triggered by the current command"), + errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows."))); + return NULL; + + case TM_Deleted: + /* tuple already deleted; nothing to do */ + return NULL; + + default: + + /* + * TM_Invisible should be impossible because we're + * waiting for updated row versions, and would + * already have errored out if the first version + * is invisible. + * + * TM_Updated should be impossible, because we're + * locking the latest version via + * TUPLE_LOCK_FLAG_FIND_LAST_VERSION. + */ + elog(ERROR, "unexpected table_tuple_lock status: %u", + result); + return NULL; + } + + Assert(false); + break; + } + + + case TM_Deleted: + if (IsolationUsesXactSnapshot()) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("could not serialize access due to concurrent delete"))); + /* tuple already deleted; nothing to do */ + return NULL; + +#else case HeapTupleUpdated: if (IsolationUsesXactSnapshot()) ereport(ERROR, (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), errmsg("could not serialize access due to concurrent update"))); - if (ItemPointerIndicatesMovedPartitions(&hufd.ctid)) + if (ItemPointerIndicatesMovedPartitions(&tmfd.ctid)) ereport(ERROR, (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), errmsg("tuple to be updated was already moved to another partition due to concurrent update"))); - if (!ItemPointerEquals(tupleid, &hufd.ctid)) + if (!ItemPointerEquals(tupleid, &tmfd.ctid)) { TupleTableSlot *epqslot; @@ -491,13 +621,13 @@ router_lock_or_delete_tuple(PartitionRouterState *state, rel, rri->ri_RangeTableIndex, LockTupleExclusive, - &hufd.ctid, - hufd.xmax); + &tmfd.ctid, + tmfd.xmax); if (!TupIsNull(epqslot)) { Assert(tupleid != NULL); - *tupleid = hufd.ctid; + *tupleid = tmfd.ctid; slot = epqslot; goto recheck; } @@ -505,8 +635,13 @@ router_lock_or_delete_tuple(PartitionRouterState *state, /* Tuple already deleted; nothing to do */ return NULL; +#endif /* TM_Deleted/TM_Updated */ +#if PG_VERSION_NUM >= 120000 + case TM_Invisible: +#else case HeapTupleInvisible: +#endif elog(ERROR, "attempted to lock invisible tuple"); break; diff --git a/src/pathman_workers.c b/src/pathman_workers.c index ae6d13b9..54d62e7f 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -839,7 +839,7 @@ show_concurrent_part_tasks_internal(PG_FUNCTION_ARGS) userctx->cur_idx = 0; /* Create tuple descriptor */ - tupdesc = CreateTemplateTupleDesc(Natts_pathman_cp_tasks, false); + tupdesc = CreateTemplateTupleDescCompat(Natts_pathman_cp_tasks, false); TupleDescInitEntry(tupdesc, Anum_pathman_cp_tasks_userid, "userid", REGROLEOID, -1, 0); diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 7764aa94..285a130f 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -23,15 +23,23 @@ #include "runtime_merge_append.h" #include "postgres.h" +#include "access/genam.h" #include "access/htup_details.h" #include "access/sysattr.h" +#if PG_VERSION_NUM >= 120000 +#include "access/table.h" +#endif #include "access/xact.h" +#include "catalog/pg_collation.h" #include "catalog/indexing.h" #include "catalog/pg_type.h" #include "catalog/pg_extension.h" #include "commands/extension.h" #include "foreign/fdwapi.h" #include "miscadmin.h" +#if PG_VERSION_NUM >= 120000 +#include "optimizer/optimizer.h" +#endif #include "optimizer/clauses.h" #include "optimizer/plancat.h" #include "optimizer/restrictinfo.h" @@ -384,7 +392,11 @@ get_pathman_schema(void) return InvalidOid; /* exit if pg_pathman does not exist */ ScanKeyInit(&entry[0], +#if PG_VERSION_NUM >= 120000 + Anum_pg_extension_oid, +#else ObjectIdAttributeNumber, +#endif BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(ext_oid)); @@ -485,6 +497,26 @@ append_child_relation(PlannerInfo *root, child_rti = list_length(root->parse->rtable); root->simple_rte_array[child_rti] = child_rte; + /* Build an AppendRelInfo for this child */ + appinfo = makeNode(AppendRelInfo); + appinfo->parent_relid = parent_rti; + appinfo->child_relid = child_rti; + appinfo->parent_reloid = parent_rte->relid; + + /* Store table row types for wholerow references */ + appinfo->parent_reltype = RelationGetDescr(parent_relation)->tdtypeid; + appinfo->child_reltype = RelationGetDescr(child_relation)->tdtypeid; + + make_inh_translation_list(parent_relation, child_relation, child_rti, + &appinfo->translated_vars); + + /* Now append 'appinfo' to 'root->append_rel_list' */ + root->append_rel_list = lappend(root->append_rel_list, appinfo); + /* And to array in >= 11, it must be big enough */ +#if PG_VERSION_NUM >= 110000 + root->append_rel_array[child_rti] = appinfo; +#endif + /* Create RelOptInfo for this child (and make some estimates as well) */ child_rel = build_simple_rel_compat(root, child_rti, parent_rel); @@ -533,26 +565,6 @@ append_child_relation(PlannerInfo *root, } - /* Build an AppendRelInfo for this child */ - appinfo = makeNode(AppendRelInfo); - appinfo->parent_relid = parent_rti; - appinfo->child_relid = child_rti; - appinfo->parent_reloid = parent_rte->relid; - - /* Store table row types for wholerow references */ - appinfo->parent_reltype = RelationGetDescr(parent_relation)->tdtypeid; - appinfo->child_reltype = RelationGetDescr(child_relation)->tdtypeid; - - make_inh_translation_list(parent_relation, child_relation, child_rti, - &appinfo->translated_vars); - - /* Now append 'appinfo' to 'root->append_rel_list' */ - root->append_rel_list = lappend(root->append_rel_list, appinfo); - /* And to array in >= 11, it must be big enough */ -#if PG_VERSION_NUM >= 110000 - root->append_rel_array[child_rti] = appinfo; -#endif - /* Translate column privileges for this child */ if (parent_rte->relid != child_oid) { @@ -618,7 +630,11 @@ append_child_relation(PlannerInfo *root, * Restriction reduces to constant FALSE or constant NULL after * substitution, so this child need not be scanned. */ +#if PG_VERSION_NUM >= 120000 + mark_dummy_rel(child_rel); +#else set_dummy_rel_pathlist(child_rel); +#endif } childquals = make_ands_implicit((Expr *) childqual); childquals = make_restrictinfos_from_actual_clauses(root, childquals); @@ -632,7 +648,11 @@ append_child_relation(PlannerInfo *root, * This child need not be scanned, so we can omit it from the * appendrel. */ +#if PG_VERSION_NUM >= 120000 + mark_dummy_rel(child_rel); +#else set_dummy_rel_pathlist(child_rel); +#endif } /* @@ -1065,9 +1085,14 @@ handle_const(const Const *c, } /* Else use the Const's value */ else value = c->constvalue; - - /* Calculate 32-bit hash of 'value' and corresponding index */ - hash = OidFunctionCall1(prel->hash_proc, value); + /* + * Calculate 32-bit hash of 'value' and corresponding index. + * Since 12, hashtext requires valid collation. Since we never + * supported this, passing db default one will do. + */ + hash = OidFunctionCall1Coll(prel->hash_proc, + DEFAULT_COLLATION_OID, + value); idx = hash_to_part_index(DatumGetInt32(hash), PrelChildrenCount(prel)); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index c302089e..ebf80861 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -19,6 +19,12 @@ #include "utils.h" #include "access/htup_details.h" +#if PG_VERSION_NUM >= 120000 +#include "access/heapam.h" +#include "access/relscan.h" +#include "access/table.h" +#include "access/tableam.h" +#endif #include "access/xact.h" #include "catalog/dependency.h" #include "catalog/indexing.h" @@ -82,7 +88,11 @@ PG_FUNCTION_INFO_V1( pathman_version ); typedef struct { Relation pathman_config; +#if PG_VERSION_NUM >= 120000 + TableScanDesc pathman_config_scan; +#else HeapScanDesc pathman_config_scan; +#endif Snapshot snapshot; PartRelationInfo *current_prel; /* selected PartRelationInfo */ @@ -202,7 +212,8 @@ get_base_type_pl(PG_FUNCTION_ARGS) } /* - * Return tablespace name of a specified relation. + * Return tablespace name of a specified relation which must not be + * natively partitioned. */ Datum get_tablespace_pl(PG_FUNCTION_ARGS) @@ -216,7 +227,7 @@ get_tablespace_pl(PG_FUNCTION_ARGS) /* If tablespace id is InvalidOid then use the default tablespace */ if (!OidIsValid(tablespace_id)) { - tablespace_id = GetDefaultTablespace(get_rel_persistence(relid)); + tablespace_id = GetDefaultTablespaceCompat(get_rel_persistence(relid), false); /* If tablespace is still invalid then use database's default */ if (!OidIsValid(tablespace_id)) @@ -274,7 +285,7 @@ show_cache_stats_internal(PG_FUNCTION_ARGS) usercxt->current_item = 0; /* Create tuple descriptor */ - tupdesc = CreateTemplateTupleDesc(Natts_pathman_cache_stats, false); + tupdesc = CreateTemplateTupleDescCompat(Natts_pathman_cache_stats, false); TupleDescInitEntry(tupdesc, Anum_pathman_cs_context, "context", TEXTOID, -1, 0); @@ -381,13 +392,18 @@ show_partition_list_internal(PG_FUNCTION_ARGS) usercxt->pathman_config = heap_open(get_pathman_config_relid(false), AccessShareLock); usercxt->snapshot = RegisterSnapshot(GetLatestSnapshot()); +#if PG_VERSION_NUM >= 120000 + usercxt->pathman_config_scan = table_beginscan(usercxt->pathman_config, + usercxt->snapshot, 0, NULL); +#else usercxt->pathman_config_scan = heap_beginscan(usercxt->pathman_config, usercxt->snapshot, 0, NULL); +#endif usercxt->current_prel = NULL; /* Create tuple descriptor */ - tupdesc = CreateTemplateTupleDesc(Natts_pathman_partition_list, false); + tupdesc = CreateTemplateTupleDescCompat(Natts_pathman_partition_list, false); TupleDescInitEntry(tupdesc, Anum_pathman_pl_parent, "parent", REGCLASSOID, -1, 0); @@ -555,7 +571,11 @@ show_partition_list_internal(PG_FUNCTION_ARGS) } /* Clean resources */ +#if PG_VERSION_NUM >= 120000 + table_endscan(usercxt->pathman_config_scan); +#else heap_endscan(usercxt->pathman_config_scan); +#endif UnregisterSnapshot(usercxt->snapshot); heap_close(usercxt->pathman_config, AccessShareLock); diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 0d3ca9d7..27361dd3 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -15,6 +15,9 @@ #include "utils.h" #include "xact_handling.h" +#if PG_VERSION_NUM >= 120000 +#include "access/table.h" +#endif #include "access/transam.h" #include "access/xact.h" #include "catalog/heap.h" @@ -26,6 +29,9 @@ #include "parser/parse_relation.h" #include "parser/parse_expr.h" #include "utils/array.h" +#if PG_VERSION_NUM >= 120000 +#include "utils/float.h" +#endif #include "utils/builtins.h" #include "utils/lsyscache.h" #include "utils/numeric.h" @@ -1084,6 +1090,8 @@ build_range_condition(PG_FUNCTION_ARGS) else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("'expression' should not be NULL")));; + /* lock the partition */ + LockRelationOid(partition_relid, ShareUpdateExclusiveLock); min = PG_ARGISNULL(2) ? MakeBoundInf(MINUS_INFINITY) : MakeBound(PG_GETARG_DATUM(2)); @@ -1329,7 +1337,7 @@ deparse_constraint(Oid relid, Node *expr) /* Initialize parse state */ pstate = make_parsestate(NULL); - rte = addRangeTableEntryForRelation(pstate, rel, NULL, false, true); + rte = addRangeTableEntryForRelationCompat(pstate, rel, AccessShareLock, NULL, false, true); addRTEtoQuery(pstate, rte, true, true, true); /* Transform constraint into executable expression (i.e. cook it) */ diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 4766ded1..2c14959e 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -20,6 +20,9 @@ #include "relation_info.h" #include "rewrite/rewriteManip.h" +#if PG_VERSION_NUM >= 120000 +#include "access/table.h" +#endif #include "access/htup_details.h" #include "foreign/fdwapi.h" #include "miscadmin.h" diff --git a/src/relation_info.c b/src/relation_info.c index d24af71d..0c79b504 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -16,6 +16,10 @@ #include "xact_handling.h" #include "access/htup_details.h" +#if PG_VERSION_NUM >= 120000 +#include "access/genam.h" +#include "access/table.h" +#endif #include "access/xact.h" #include "catalog/catalog.h" #include "catalog/indexing.h" @@ -24,8 +28,12 @@ #include "catalog/pg_type.h" #include "miscadmin.h" #include "nodes/nodeFuncs.h" +#if PG_VERSION_NUM >= 120000 +#include "optimizer/optimizer.h" +#else #include "optimizer/clauses.h" #include "optimizer/var.h" +#endif #include "parser/analyze.h" #include "parser/parser.h" #include "storage/lmgr.h" diff --git a/src/runtime_merge_append.c b/src/runtime_merge_append.c index 836a1fdd..92ae3e60 100644 --- a/src/runtime_merge_append.c +++ b/src/runtime_merge_append.c @@ -19,10 +19,14 @@ #include "miscadmin.h" #include "nodes/nodeFuncs.h" #include "nodes/plannodes.h" +#if PG_VERSION_NUM >= 120000 +#include "optimizer/optimizer.h" +#else #include "optimizer/cost.h" +#include "optimizer/var.h" +#endif #include "optimizer/planmain.h" #include "optimizer/tlist.h" -#include "optimizer/var.h" #include "utils/builtins.h" #include "utils/guc.h" #include "utils/lsyscache.h" diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 9683914b..2b5a5956 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -18,6 +18,10 @@ #include "partition_filter.h" #include "access/htup_details.h" +#if PG_VERSION_NUM >= 120000 +#include "access/heapam.h" +#include "access/table.h" +#endif #include "access/sysattr.h" #include "access/xact.h" #include "catalog/namespace.h" @@ -501,7 +505,11 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, estate->es_result_relations = parent_rri; estate->es_num_result_relations = 1; estate->es_result_relation_info = parent_rri; +#if PG_VERSION_NUM >= 120000 + ExecInitRangeTable(estate, range_table); +#else estate->es_range_table = range_table; +#endif /* Initialize ResultPartsStorage */ init_result_parts_storage(&parts_storage, @@ -513,9 +521,11 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, RPS_RRI_CB(finish_rri_for_copy, NULL)); /* Set up a tuple slot too */ - myslot = ExecInitExtraTupleSlotCompat(estate, NULL); + myslot = ExecInitExtraTupleSlotCompat(estate, NULL, &TTSOpsHeapTuple); /* Triggers might need a slot as well */ - estate->es_trig_tuple_slot = ExecInitExtraTupleSlotCompat(estate, tupDesc); +#if PG_VERSION_NUM < 120000 + estate->es_trig_tuple_slot = ExecInitExtraTupleSlotCompat(estate, tupDesc, nothing_here); +#endif /* Prepare to catch AFTER triggers. */ AfterTriggerBeginQuery(); @@ -535,7 +545,9 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, { TupleTableSlot *slot; bool skip_tuple = false; +#if PG_VERSION_NUM < 120000 Oid tuple_oid = InvalidOid; +#endif ExprContext *econtext = GetPerTupleExprContext(estate); ResultRelInfoHolder *rri_holder; @@ -548,19 +560,25 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, /* Switch into per tuple memory context */ MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); - if (!NextCopyFrom(cstate, econtext, values, nulls, &tuple_oid)) + if (!NextCopyFromCompat(cstate, econtext, values, nulls, &tuple_oid)) break; /* We can form the input tuple */ tuple = heap_form_tuple(tupDesc, values, nulls); +#if PG_VERSION_NUM < 120000 if (tuple_oid != InvalidOid) HeapTupleSetOid(tuple, tuple_oid); +#endif /* Place tuple in tuple slot --- but slot shouldn't free it */ slot = myslot; ExecSetSlotDescriptor(slot, tupDesc); +#if PG_VERSION_NUM >= 120000 + ExecStoreHeapTuple(tuple, slot, false); +#else ExecStoreTuple(tuple, slot, InvalidBuffer, false); +#endif /* Search for a matching partition */ rri_holder = select_partition_for_insert(&parts_storage, slot); @@ -581,13 +599,21 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, HeapTuple tuple_old; tuple_old = tuple; +#if PG_VERSION_NUM >= 120000 + tuple = execute_attr_map_tuple(tuple, rri_holder->tuple_map); +#else tuple = do_convert_tuple(tuple, rri_holder->tuple_map); +#endif heap_freetuple(tuple_old); } /* Now we can set proper tuple descriptor according to child relation */ ExecSetSlotDescriptor(slot, RelationGetDescr(child_rri->ri_RelationDesc)); +#if PG_VERSION_NUM >= 120000 + ExecStoreHeapTuple(tuple, slot, false); +#else ExecStoreTuple(tuple, slot, InvalidBuffer, false); +#endif /* Triggers and stuff need to be invoked in query context. */ MemoryContextSwitchTo(query_mcxt); @@ -596,12 +622,21 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, if (child_rri->ri_TrigDesc && child_rri->ri_TrigDesc->trig_insert_before_row) { +#if PG_VERSION_NUM >= 120000 + if (!ExecBRInsertTriggers(estate, child_rri, slot)) + skip_tuple = true; + else /* trigger might have changed tuple */ + tuple = ExecFetchSlotHeapTuple(slot, false, NULL); +#else slot = ExecBRInsertTriggers(estate, child_rri, slot); if (slot == NULL) /* "do nothing" */ skip_tuple = true; else /* trigger might have changed tuple */ + { tuple = ExecMaterializeSlot(slot); + } +#endif } /* Proceed if we still have a tuple */ @@ -618,11 +653,16 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, { /* OK, now store the tuple... */ simple_heap_insert(child_rri->ri_RelationDesc, tuple); +#if PG_VERSION_NUM >= 120000 /* since 12, tid lives directly in slot */ + ItemPointerCopy(&tuple->t_self, &slot->tts_tid); + /* and we must stamp tableOid as we go around table_tuple_insert */ + slot->tts_tableOid = RelationGetRelid(child_rri->ri_RelationDesc); +#endif /* ... and create index entries for it */ if (child_rri->ri_NumIndices > 0) - recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self), - estate, false, NULL, NIL); + recheckIndexes = ExecInsertIndexTuplesCompat(slot, &(tuple->t_self), + estate, false, NULL, NIL); } #ifdef PG_SHARDMAN /* Handle foreign tables */ @@ -635,8 +675,13 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, #endif /* AFTER ROW INSERT Triggers (FIXME: NULL transition) */ +#if PG_VERSION_NUM >= 120000 + ExecARInsertTriggersCompat(estate, child_rri, slot, + recheckIndexes, NULL); +#else ExecARInsertTriggersCompat(estate, child_rri, tuple, recheckIndexes, NULL); +#endif list_free(recheckIndexes); @@ -798,7 +843,7 @@ PathmanRenameSequence(Oid parent_relid, /* parent Oid */ return; /* Finally, rename auto naming sequence */ - RenameRelationInternal(seq_relid, new_seq_name, false); + RenameRelationInternalCompat(seq_relid, new_seq_name, false, false); pfree(seq_nsp_name); pfree(old_seq_name); From e4bb77b683a338f0ae1c3dec6c99f861a8619f84 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 18 Nov 2019 22:50:19 +0300 Subject: [PATCH 431/528] Pgpro-specific part of porting to 12. --- expected/pathman_hashjoin.out | 5 +++ expected/pathman_hashjoin_1.out | 5 +++ expected/pathman_hashjoin_2.out | 5 +++ expected/pathman_hashjoin_3.out | 70 ++++++++++++++++++++++++++++++++ expected/pathman_mergejoin.out | 5 +++ expected/pathman_mergejoin_1.out | 5 +++ expected/pathman_mergejoin_2.out | 5 +++ expected/pathman_mergejoin_3.out | 68 +++++++++++++++++++++++++++++++ sql/pathman_hashjoin.sql | 6 +++ sql/pathman_mergejoin.sql | 6 +++ src/include/compat/pg_compat.h | 6 +-- 11 files changed, 183 insertions(+), 3 deletions(-) create mode 100644 expected/pathman_hashjoin_3.out create mode 100644 expected/pathman_mergejoin_3.out diff --git a/expected/pathman_hashjoin.out b/expected/pathman_hashjoin.out index 71ea1085..1e5b2783 100644 --- a/expected/pathman_hashjoin.out +++ b/expected/pathman_hashjoin.out @@ -1,3 +1,8 @@ +/* + * pathman_hashjoin_1.out and pathman_hashjoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_hashjoin_3.out + */ \set VERBOSITY terse SET search_path = 'public'; CREATE SCHEMA pathman; diff --git a/expected/pathman_hashjoin_1.out b/expected/pathman_hashjoin_1.out index 8e0007d4..af569764 100644 --- a/expected/pathman_hashjoin_1.out +++ b/expected/pathman_hashjoin_1.out @@ -1,3 +1,8 @@ +/* + * pathman_hashjoin_1.out and pathman_hashjoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_hashjoin_3.out + */ \set VERBOSITY terse SET search_path = 'public'; CREATE SCHEMA pathman; diff --git a/expected/pathman_hashjoin_2.out b/expected/pathman_hashjoin_2.out index d0cba65d..c77146d1 100644 --- a/expected/pathman_hashjoin_2.out +++ b/expected/pathman_hashjoin_2.out @@ -1,3 +1,8 @@ +/* + * pathman_hashjoin_1.out and pathman_hashjoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_hashjoin_3.out + */ \set VERBOSITY terse SET search_path = 'public'; CREATE SCHEMA pathman; diff --git a/expected/pathman_hashjoin_3.out b/expected/pathman_hashjoin_3.out new file mode 100644 index 00000000..93613919 --- /dev/null +++ b/expected/pathman_hashjoin_3.out @@ -0,0 +1,70 @@ +/* + * pathman_hashjoin_1.out and pathman_hashjoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_hashjoin_3.out + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) + SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SET pg_pathman.enable_runtimeappend = OFF; +SET pg_pathman.enable_runtimemergeappend = OFF; +VACUUM; +/* + * Hash join + */ +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +SET enable_nestloop = OFF; +SET enable_hashjoin = ON; +SET enable_mergejoin = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Hash Join + Hash Cond: (j3.id = j2.id) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_1 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_2 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_3 + -> Hash + -> Index Scan using range_rel_2_dt_idx on range_rel_2 j2 + Filter: (id IS NOT NULL) +(12 rows) + +DROP SCHEMA test CASCADE; +NOTICE: drop cascades to 12 other objects +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman CASCADE; diff --git a/expected/pathman_mergejoin.out b/expected/pathman_mergejoin.out index ff2ae5bb..1bd9da6f 100644 --- a/expected/pathman_mergejoin.out +++ b/expected/pathman_mergejoin.out @@ -1,3 +1,8 @@ +/* + * pathman_mergejoin_1.out and pathman_mergejoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_mergejoin_3.out + */ \set VERBOSITY terse SET search_path = 'public'; CREATE SCHEMA pathman; diff --git a/expected/pathman_mergejoin_1.out b/expected/pathman_mergejoin_1.out index de87f09b..5b903dc1 100644 --- a/expected/pathman_mergejoin_1.out +++ b/expected/pathman_mergejoin_1.out @@ -1,3 +1,8 @@ +/* + * pathman_mergejoin_1.out and pathman_mergejoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_mergejoin_3.out + */ \set VERBOSITY terse SET search_path = 'public'; CREATE SCHEMA pathman; diff --git a/expected/pathman_mergejoin_2.out b/expected/pathman_mergejoin_2.out index acff2247..0168d556 100644 --- a/expected/pathman_mergejoin_2.out +++ b/expected/pathman_mergejoin_2.out @@ -1,3 +1,8 @@ +/* + * pathman_mergejoin_1.out and pathman_mergejoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_mergejoin_3.out + */ \set VERBOSITY terse SET search_path = 'public'; CREATE SCHEMA pathman; diff --git a/expected/pathman_mergejoin_3.out b/expected/pathman_mergejoin_3.out new file mode 100644 index 00000000..3d4a441c --- /dev/null +++ b/expected/pathman_mergejoin_3.out @@ -0,0 +1,68 @@ +/* + * pathman_mergejoin_1.out and pathman_mergejoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_mergejoin_3.out + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +INSERT INTO test.num_range_rel SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +/* + * Merge join between 3 partitioned tables + * + * test case for the fix of sorting, merge append and index scan issues + * details in commit 54dd0486fc55b2d25cf7d095f83dee6ff4adee06 + */ +SET enable_hashjoin = OFF; +SET enable_nestloop = OFF; +SET enable_mergejoin = ON; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Merge Join + Merge Cond: (j2.id = j3.id) + -> Index Scan using range_rel_2_pkey on range_rel_2 j2 + Index Cond: (id IS NOT NULL) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_1 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_2 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_3 +(11 rows) + +SET enable_hashjoin = ON; +SET enable_nestloop = ON; +DROP SCHEMA test CASCADE; +NOTICE: drop cascades to 12 other objects +DROP EXTENSION pg_pathman; +DROP SCHEMA pathman CASCADE; diff --git a/sql/pathman_hashjoin.sql b/sql/pathman_hashjoin.sql index 411e0a7f..8a08569f 100644 --- a/sql/pathman_hashjoin.sql +++ b/sql/pathman_hashjoin.sql @@ -1,3 +1,9 @@ +/* + * pathman_hashjoin_1.out and pathman_hashjoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_hashjoin_3.out + */ + \set VERBOSITY terse SET search_path = 'public'; diff --git a/sql/pathman_mergejoin.sql b/sql/pathman_mergejoin.sql index 9b0b95b1..e85cc934 100644 --- a/sql/pathman_mergejoin.sql +++ b/sql/pathman_mergejoin.sql @@ -1,3 +1,9 @@ +/* + * pathman_mergejoin_1.out and pathman_mergejoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_mergejoin_3.out + */ + \set VERBOSITY terse SET search_path = 'public'; diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 26931fd9..c915503c 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -246,10 +246,10 @@ create_append_path(NULL, (rel), (subpaths), NIL, NIL, (required_outer), \ (parallel_workers), false, NIL, -1) #else -/* TODO pgpro version */ +/* TODO pgpro version? Looks like something is not ported yet */ #define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ - create_append_path(NULL, (rel), (subpaths), NIL, (required_outer), \ - (parallel_workers), false, NIL, -1, false, NIL) + create_append_path(NULL, (rel), (subpaths), NIL, NIL, (required_outer), \ + (parallel_workers), false, NIL, -1, false) #endif /* PGPRO_VERSION */ #elif PG_VERSION_NUM >= 110000 From 7954c34976047b878c1a28005d7b07c7011cbfe8 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 18 Nov 2019 22:57:05 +0300 Subject: [PATCH 432/528] Add 12 to travis. --- .travis.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.travis.yml b/.travis.yml index 946eb606..ff2fac20 100644 --- a/.travis.yml +++ b/.travis.yml @@ -18,6 +18,8 @@ notifications: on_failure: always env: + - PG_VERSION=12 LEVEL=hardcore + - PG_VERSION=12 - PG_VERSION=11 LEVEL=hardcore - PG_VERSION=11 - PG_VERSION=10 LEVEL=hardcore From a30c0a516cb7f2657c9d8247798887127773e1f6 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 19 Nov 2019 18:13:43 +0300 Subject: [PATCH 433/528] Not sure why I do that, but fix cmocka tests. --- tests/cmocka/Makefile | 1 + tests/cmocka/missing_basic.c | 1 + tests/cmocka/missing_bitmapset.c | 1 + tests/cmocka/missing_list.c | 1 + tests/cmocka/missing_stringinfo.c | 1 + 5 files changed, 5 insertions(+) diff --git a/tests/cmocka/Makefile b/tests/cmocka/Makefile index e31e6d95..5216a467 100644 --- a/tests/cmocka/Makefile +++ b/tests/cmocka/Makefile @@ -8,6 +8,7 @@ CFLAGS += $(shell $(PG_CONFIG) --cflags_sl) CFLAGS += $(shell $(PG_CONFIG) --cflags) CFLAGS += $(CFLAGS_SL) CFLAGS += $(PG_CPPFLAGS) +CFLAGS += -D_GNU_SOURCE LDFLAGS += -lcmocka TEST_BIN = rangeset_tests diff --git a/tests/cmocka/missing_basic.c b/tests/cmocka/missing_basic.c index d6c3808e..7524abb5 100644 --- a/tests/cmocka/missing_basic.c +++ b/tests/cmocka/missing_basic.c @@ -1,6 +1,7 @@ #include #include "postgres.h" +#include "undef_printf.h" void * diff --git a/tests/cmocka/missing_bitmapset.c b/tests/cmocka/missing_bitmapset.c index 7e986d5a..84e7e771 100644 --- a/tests/cmocka/missing_bitmapset.c +++ b/tests/cmocka/missing_bitmapset.c @@ -1,4 +1,5 @@ #include "postgres.h" +#include "undef_printf.h" #include "nodes/bitmapset.h" diff --git a/tests/cmocka/missing_list.c b/tests/cmocka/missing_list.c index 9c07bc10..5ddce8a8 100644 --- a/tests/cmocka/missing_list.c +++ b/tests/cmocka/missing_list.c @@ -13,6 +13,7 @@ * *------------------------------------------------------------------------- */ +#define _GNU_SOURCE #include "postgres.h" #include "nodes/pg_list.h" diff --git a/tests/cmocka/missing_stringinfo.c b/tests/cmocka/missing_stringinfo.c index 8596bf7e..edf4d8a4 100644 --- a/tests/cmocka/missing_stringinfo.c +++ b/tests/cmocka/missing_stringinfo.c @@ -14,6 +14,7 @@ *------------------------------------------------------------------------- */ #include "postgres.h" +#include "undef_printf.h" #include "lib/stringinfo.h" #include "utils/memutils.h" From 6f51eb4f7b2414ae0307037ae94d6327c1e97385 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 19 Nov 2019 18:26:21 +0300 Subject: [PATCH 434/528] Forgot undef file. --- tests/cmocka/undef_printf.h | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 tests/cmocka/undef_printf.h diff --git a/tests/cmocka/undef_printf.h b/tests/cmocka/undef_printf.h new file mode 100644 index 00000000..63ba700c --- /dev/null +++ b/tests/cmocka/undef_printf.h @@ -0,0 +1,24 @@ +#ifdef vsnprintf +#undef vsnprintf +#endif +#ifdef snprintf +#undef snprintf +#endif +#ifdef vsprintf +#undef vsprintf +#endif +#ifdef sprintf +#undef sprintf +#endif +#ifdef vfprintf +#undef vfprintf +#endif +#ifdef fprintf +#undef fprintf +#endif +#ifdef vprintf +#undef vprintf +#endif +#ifdef printf +#undef printf +#endif From 44b8962b80d8917d52fbf7d34f9ab7c8384a2f30 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Fri, 22 Nov 2019 18:49:27 +0300 Subject: [PATCH 435/528] Bump 1.5.10 lib version. --- META.json | 4 ++-- expected/pathman_calamity.out | 2 +- expected/pathman_calamity_1.out | 2 +- src/include/init.h | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/META.json b/META.json index cd55fcb4..1201812c 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_pathman", "abstract": "Fast partitioning tool for PostgreSQL", "description": "pg_pathman provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.5.9", + "version": "1.5.10", "maintainer": [ "Arseny Sher " ], @@ -22,7 +22,7 @@ "pg_pathman": { "file": "pg_pathman--1.5.sql", "docfile": "README.md", - "version": "1.5.9", + "version": "1.5.10", "abstract": "Effective partitioning tool for PostgreSQL 9.5 and higher" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index c258b5cc..759d7dca 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -20,7 +20,7 @@ SELECT debug_capture(); SELECT pathman_version(); pathman_version ----------------- - 1.5.9 + 1.5.10 (1 row) set client_min_messages = NOTICE; diff --git a/expected/pathman_calamity_1.out b/expected/pathman_calamity_1.out index ee422784..e434f2eb 100644 --- a/expected/pathman_calamity_1.out +++ b/expected/pathman_calamity_1.out @@ -20,7 +20,7 @@ SELECT debug_capture(); SELECT pathman_version(); pathman_version ----------------- - 1.5.9 + 1.5.10 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index 15efae16..931528ef 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -158,7 +158,7 @@ simplify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT "1.5.0" /* Current version of native C library */ -#define CURRENT_LIB_VERSION "1.5.9" +#define CURRENT_LIB_VERSION "1.5.10" void *pathman_cache_search_relid(HTAB *cache_table, From 30d07062dc038b13f576b9a6644621082c4837cc Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 26 Nov 2019 19:42:57 +0300 Subject: [PATCH 436/528] Create lateral test output file for pgpro. --- expected/pathman_lateral.out | 2 + expected/pathman_lateral_1.out | 121 +++++++++++++++++++++++++++++++++ sql/pathman_lateral.sql | 3 + 3 files changed, 126 insertions(+) create mode 100644 expected/pathman_lateral_1.out diff --git a/expected/pathman_lateral.out b/expected/pathman_lateral.out index e5148664..9bff1e57 100644 --- a/expected/pathman_lateral.out +++ b/expected/pathman_lateral.out @@ -1,3 +1,5 @@ +-- Sometimes join selectivity improvements patches in pgpro force nested loop +-- members swap -- in pathman_lateral_1.out \set VERBOSITY terse SET search_path = 'public'; CREATE EXTENSION pg_pathman; diff --git a/expected/pathman_lateral_1.out b/expected/pathman_lateral_1.out new file mode 100644 index 00000000..1dc67fe2 --- /dev/null +++ b/expected/pathman_lateral_1.out @@ -0,0 +1,121 @@ +-- Sometimes join selectivity improvements patches in pgpro force nested loop +-- members swap -- in pathman_lateral_1.out +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_lateral; +/* create table partitioned by HASH */ +create table test_lateral.data(id int8 not null); +select create_hash_partitions('test_lateral.data', 'id', 10); + create_hash_partitions +------------------------ + 10 +(1 row) + +insert into test_lateral.data select generate_series(1, 10000); +VACUUM ANALYZE; +set enable_hashjoin = off; +set enable_mergejoin = off; +/* all credits go to Ivan Frolkov */ +explain (costs off) +select * from + test_lateral.data as t1, + lateral(select * from test_lateral.data as t2 where t2.id > t1.id) t2, + lateral(select * from test_lateral.data as t3 where t3.id = t2.id + t1.id) t3 + where t1.id between 1 and 100 and + t2.id between 2 and 299 and + t1.id > t2.id and + exists(select * from test_lateral.data t + where t1.id = t2.id and t.id = t3.id); + QUERY PLAN +-------------------------------------------------------------------------------------- + Nested Loop + -> Nested Loop + Join Filter: ((t2.id + t1.id) = t.id) + -> Nested Loop + Join Filter: ((t2.id > t1.id) AND (t1.id > t2.id) AND (t1.id = t2.id)) + -> Append + -> Seq Scan on data_0 t2 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_1 t2_1 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_2 t2_2 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_3 t2_3 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_4 t2_4 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_5 t2_5 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_6 t2_6 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_7 t2_7 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_8 t2_8 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_9 t2_9 + Filter: ((id >= 2) AND (id <= 299)) + -> Materialize + -> Append + -> Seq Scan on data_0 t1 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_1 t1_1 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_2 t1_2 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_3 t1_3 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_4 t1_4 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_5 t1_5 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_6 t1_6 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_7 t1_7 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_8 t1_8 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_9 t1_9 + Filter: ((id >= 1) AND (id <= 100)) + -> HashAggregate + Group Key: t.id + -> Append + -> Seq Scan on data_0 t + -> Seq Scan on data_1 t_1 + -> Seq Scan on data_2 t_2 + -> Seq Scan on data_3 t_3 + -> Seq Scan on data_4 t_4 + -> Seq Scan on data_5 t_5 + -> Seq Scan on data_6 t_6 + -> Seq Scan on data_7 t_7 + -> Seq Scan on data_8 t_8 + -> Seq Scan on data_9 t_9 + -> Custom Scan (RuntimeAppend) + Prune by: (t.id = t3.id) + -> Seq Scan on data_0 t3 + Filter: (t.id = id) + -> Seq Scan on data_1 t3 + Filter: (t.id = id) + -> Seq Scan on data_2 t3 + Filter: (t.id = id) + -> Seq Scan on data_3 t3 + Filter: (t.id = id) + -> Seq Scan on data_4 t3 + Filter: (t.id = id) + -> Seq Scan on data_5 t3 + Filter: (t.id = id) + -> Seq Scan on data_6 t3 + Filter: (t.id = id) + -> Seq Scan on data_7 t3 + Filter: (t.id = id) + -> Seq Scan on data_8 t3 + Filter: (t.id = id) + -> Seq Scan on data_9 t3 + Filter: (t.id = id) +(83 rows) + +set enable_hashjoin = on; +set enable_mergejoin = on; +DROP SCHEMA test_lateral CASCADE; +NOTICE: drop cascades to 11 other objects +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_lateral.sql b/sql/pathman_lateral.sql index 49dee604..645e5f93 100644 --- a/sql/pathman_lateral.sql +++ b/sql/pathman_lateral.sql @@ -1,3 +1,6 @@ +-- Sometimes join selectivity improvements patches in pgpro force nested loop +-- members swap -- in pathman_lateral_1.out + \set VERBOSITY terse SET search_path = 'public'; From f463e0e1b6cec2e1913760d637f8652fb6949048 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 3 Dec 2019 15:56:46 +0300 Subject: [PATCH 437/528] Add some quotes to SPI call in partition creation. --- src/partition_creation.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index e162e99e..3e578e70 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -605,15 +605,15 @@ spawn_partitions_val(Oid parent_relid, /* parent's Oid */ /* Construct call to create_single_range_partition() */ create_sql = psprintf( "select %s.create_single_range_partition('%s.%s', '%s'::%s, '%s'::%s, '%s.%s')", - get_namespace_name(get_pathman_schema()), - parent_nsp_name, - get_rel_name(parent_relid), + quote_identifier(get_namespace_name(get_pathman_schema())), + quote_identifier(parent_nsp_name), + quote_identifier(get_rel_name(parent_relid)), IsInfinite(&bounds[0]) ? "NULL" : datum_to_cstring(bounds[0].value, range_bound_type), typname, IsInfinite(&bounds[1]) ? "NULL" : datum_to_cstring(bounds[1].value, range_bound_type), typname, - parent_nsp_name, - partition_name + quote_identifier(parent_nsp_name), + quote_identifier(partition_name) ); /* ...and call it. */ From fa068e7a5cd04c76bb38dc2e073c6e01a14791ff Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 16 Dec 2019 17:06:37 +0300 Subject: [PATCH 438/528] Silence a couple of windows warnings. --- src/include/init.h | 2 ++ src/partition_router.c | 1 + 2 files changed, 3 insertions(+) diff --git a/src/include/init.h b/src/include/init.h index 931528ef..2e7b49d9 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -92,6 +92,8 @@ simplify_mcxt_name(MemoryContext mcxt) return PATHMAN_BOUNDS_CACHE; else elog(ERROR, "unknown memory context"); + + return NULL; /* keep compiler quiet */ } diff --git a/src/partition_router.c b/src/partition_router.c index 8c3bac55..b602347b 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -390,6 +390,7 @@ router_extract_ctid(PartitionRouterState *state, TupleTableSlot *slot) elog(ERROR, UPDATE_NODE_NAME " does not support foreign tables"); else elog(ERROR, UPDATE_NODE_NAME " cannot handle relkind %u", relkind); + return *(ItemPointer) NULL; /* keep compiler quiet, lol */ } /* This is a heavily modified copy of ExecDelete from nodeModifyTable.c */ From 8f68671ad22f175a0b232d40695cef7fe6fb77d3 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Thu, 2 Apr 2020 21:00:26 +0300 Subject: [PATCH 439/528] [PGPRO-3725] zero out garbage in append_rel_array if we allocate it. Since 1d9056f563f3 (who uses AppendRelInfo* existence as a mark this rel is child) in 11.7 this led to (known) random segfaults. --- src/hooks.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 12c053b2..ca1db9be 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -498,9 +498,13 @@ pathman_rel_pathlist_hook(PlannerInfo *root, irange_len * sizeof(RangeTblEntry *)); #if PG_VERSION_NUM >= 110000 - /* Make sure append_rel_array is wide enough */ + /* + * Make sure append_rel_array is wide enough; if it hasn't been + * allocated previously, care to zero out [0; current_len) part. + */ if (root->append_rel_array == NULL) - root->append_rel_array = (AppendRelInfo **) palloc0(0); + root->append_rel_array = (AppendRelInfo **) + palloc0(current_len * sizeof(AppendRelInfo *)); root->append_rel_array = (AppendRelInfo **) repalloc(root->append_rel_array, new_len * sizeof(AppendRelInfo *)); From 7258169d0514779ea0bcbe11b339e037731b2d19 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 7 Apr 2020 22:32:05 +0300 Subject: [PATCH 440/528] Bump 1.5.11 lib version. --- META.json | 4 ++-- expected/pathman_calamity.out | 2 +- expected/pathman_calamity_1.out | 2 +- src/include/init.h | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/META.json b/META.json index 1201812c..6bd1607d 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_pathman", "abstract": "Fast partitioning tool for PostgreSQL", "description": "pg_pathman provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.5.10", + "version": "1.5.11", "maintainer": [ "Arseny Sher " ], @@ -22,7 +22,7 @@ "pg_pathman": { "file": "pg_pathman--1.5.sql", "docfile": "README.md", - "version": "1.5.10", + "version": "1.5.11", "abstract": "Effective partitioning tool for PostgreSQL 9.5 and higher" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 759d7dca..0943bc5c 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -20,7 +20,7 @@ SELECT debug_capture(); SELECT pathman_version(); pathman_version ----------------- - 1.5.10 + 1.5.11 (1 row) set client_min_messages = NOTICE; diff --git a/expected/pathman_calamity_1.out b/expected/pathman_calamity_1.out index e434f2eb..b2e192e1 100644 --- a/expected/pathman_calamity_1.out +++ b/expected/pathman_calamity_1.out @@ -20,7 +20,7 @@ SELECT debug_capture(); SELECT pathman_version(); pathman_version ----------------- - 1.5.10 + 1.5.11 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index 2e7b49d9..f7f3df59 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -160,7 +160,7 @@ simplify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT "1.5.0" /* Current version of native C library */ -#define CURRENT_LIB_VERSION "1.5.10" +#define CURRENT_LIB_VERSION "1.5.11" void *pathman_cache_search_relid(HTAB *cache_table, From cbbf906760ccf676ae9ed0af810337e36a26dde6 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Wed, 8 Apr 2020 20:35:59 +0300 Subject: [PATCH 441/528] Fix func signature change in minor pgpro releases, arrgh. --- src/include/compat/pg_compat.h | 14 +++++++++++++- src/nodes_common.c | 2 +- src/partition_filter.c | 2 +- src/planner_tree_modification.c | 16 ++++++++-------- 4 files changed, 23 insertions(+), 11 deletions(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index c915503c..032840c5 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -992,7 +992,19 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, AddRelationNewConstraints((rel), (newColDefaults), (newConstrains), (allow_merge), (is_local), (is_internal)) #endif - +/* + * [PGPRO-3725] Since 11.7 and 12.1 in pgpro standard and ee PGPRO-2843 + * appeared, changing the signature, wow. It is not present in pgpro 1c + * though; PG_VERSION_STR is defined in std and ee but not in 1c, so it is + * hackishly used for distinguishing them. + */ +#if defined(PGPRO_VERSION_STR) && (PG_VERSION_NUM >= 110006) +#define expression_tree_mutator_compat(node, mutator, context) \ + expression_tree_mutator((node), (mutator), (context), 0) +#else +#define expression_tree_mutator_compat(node, mutator, context) \ + expression_tree_mutator((node), (mutator), (context)) +#endif /* * ------------- diff --git a/src/nodes_common.c b/src/nodes_common.c index 8adf81dd..cf273fe6 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -373,7 +373,7 @@ canonicalize_custom_exprs_mutator(Node *node, void *cxt) return (Node *) var; } - return expression_tree_mutator(node, canonicalize_custom_exprs_mutator, NULL); + return expression_tree_mutator_compat(node, canonicalize_custom_exprs_mutator, NULL); } static List * diff --git a/src/partition_filter.c b/src/partition_filter.c index a923c650..f6cb5b60 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -1164,7 +1164,7 @@ fix_returning_list_mutator(Node *node, void *state) return (Node *) var; } - return expression_tree_mutator(node, fix_returning_list_mutator, state); + return expression_tree_mutator_compat(node, fix_returning_list_mutator, state); } diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 2c14959e..6fc55c7b 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -709,15 +709,15 @@ adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context) SubLink *sl = (SubLink *) node; /* Examine its expression */ - sl->testexpr = expression_tree_mutator(sl->testexpr, - adjust_appendrel_varnos, - context); + sl->testexpr = expression_tree_mutator_compat(sl->testexpr, + adjust_appendrel_varnos, + context); return (Node *) sl; } - return expression_tree_mutator(node, - adjust_appendrel_varnos, - context); + return expression_tree_mutator_compat(node, + adjust_appendrel_varnos, + context); } @@ -1063,8 +1063,8 @@ eval_extern_params_mutator(Node *node, ParamListInfo params) } } - return expression_tree_mutator(node, eval_extern_params_mutator, - (void *) params); + return expression_tree_mutator_compat(node, eval_extern_params_mutator, + (void *) params); } /* Check whether Var translation list is trivial (no shuffle) */ From bf0a84ca516494e4459df3924108caf99edec734 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 14 Apr 2020 13:48:24 +0300 Subject: [PATCH 442/528] Use more specific macro for previous cbbf906760ccf6. --- src/include/compat/pg_compat.h | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 032840c5..c1805f80 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -26,6 +26,7 @@ #include "commands/trigger.h" #include "executor/executor.h" #include "nodes/memnodes.h" +#include "nodes/nodeFuncs.h" #if PG_VERSION_NUM >= 120000 #include "nodes/pathnodes.h" #else @@ -994,11 +995,11 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, /* * [PGPRO-3725] Since 11.7 and 12.1 in pgpro standard and ee PGPRO-2843 - * appeared, changing the signature, wow. It is not present in pgpro 1c - * though; PG_VERSION_STR is defined in std and ee but not in 1c, so it is - * hackishly used for distinguishing them. + * appeared, changing the signature, wow. There is no numeric pgpro edition + * macro (and never will be, for old versions), so distinguish via macro added + * by the commit. */ -#if defined(PGPRO_VERSION_STR) && (PG_VERSION_NUM >= 110006) +#ifdef QTW_DONT_COPY_DEFAULT #define expression_tree_mutator_compat(node, mutator, context) \ expression_tree_mutator((node), (mutator), (context), 0) #else From 4de7727d11a77d0e6d708c4298a1393f738e381a Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 8 Sep 2020 14:26:58 +0300 Subject: [PATCH 443/528] Adapt to 3737965249c fix of CREATE TABLE LIKE with inheritance. Since it LIKE must be handled after DefineRelation -- do it so. (added ifdef won't work for current dev branches as PG_VERSION_NUM is not bumped yet, but will do its job after releases) --- src/partition_creation.c | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/src/partition_creation.c b/src/partition_creation.c index 3e578e70..cd2a7b82 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -854,6 +854,37 @@ create_single_partition_internal(Oid parent_relid, { elog(ERROR, "FDW partition creation is not implemented yet"); } + /* + * 3737965249cd fix (since 12.5, 11.10, etc) reworked LIKE handling + * to process it after DefineRelation. + */ +#if (PG_VERSION_NUM >= 130000) || \ + ((PG_VERSION_NUM < 130000) && (PG_VERSION_NUM >= 120005)) || \ + ((PG_VERSION_NUM < 120000) && (PG_VERSION_NUM >= 110010)) || \ + ((PG_VERSION_NUM < 110000) && (PG_VERSION_NUM >= 100015)) || \ + ((PG_VERSION_NUM < 100000) && (PG_VERSION_NUM >= 90620)) || \ + ((PG_VERSION_NUM < 90600) && (PG_VERSION_NUM >= 90524)) + else if (IsA(cur_stmt, TableLikeClause)) + { + /* + * Do delayed processing of LIKE options. This + * will result in additional sub-statements for us + * to process. We can just tack those onto the + * to-do list. + */ + TableLikeClause *like = (TableLikeClause *) cur_stmt; + RangeVar *rv = create_stmt.relation; + List *morestmts; + + morestmts = expandTableLikeClause(rv, like); + create_stmts = list_concat(create_stmts, morestmts); + + /* + * We don't need a CCI now + */ + continue; + } +#endif else { /* From 34f4698df1c637a1e8e6a8afd12bcf175e1880de Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Thu, 22 Oct 2020 22:18:29 -0400 Subject: [PATCH 444/528] use python3 instead of python in tests/python/Makefile and tests/update/check_update.py --- tests/python/Makefile | 4 ++-- tests/update/check_update.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/python/Makefile b/tests/python/Makefile index f8a71e41..fed17cf3 100644 --- a/tests/python/Makefile +++ b/tests/python/Makefile @@ -1,6 +1,6 @@ partitioning_tests: ifneq ($(CASE),) - python partitioning_test.py Tests.$(CASE) + python3 partitioning_test.py Tests.$(CASE) else - python partitioning_test.py + python3 partitioning_test.py endif diff --git a/tests/update/check_update.py b/tests/update/check_update.py index 9ac4db62..4bd740f6 100755 --- a/tests/update/check_update.py +++ b/tests/update/check_update.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 #coding: utf-8 import shutil From 347f8dc423fd8528119961de7da7ba159bbdac14 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Fri, 23 Oct 2020 09:21:49 +0300 Subject: [PATCH 445/528] PostgreSQL 13 compatibility. Also try to log tail of PG logs on CI in a brave hope of catching troubles there. Currently travis job on 10 rarely fails at test_concurrent_updates, but we've failed to reproduce the issue locally. Note that test_concurrent_updates actually doesn't perform anything useful as testgres nodes aren't pickable due to weird logging implemented in separate thread, but nobody cared to check apply_async result so this has went unnoticed for a long time. --- .travis.yml | 2 + Makefile | 4 +- README.md | 4 +- expected/pathman_basic.out | 3 + expected/pathman_basic_1.out | 3 + expected/pathman_basic_2.out | 1819 ++++++++++++++++++++++++++++ expected/pathman_calamity.out | 3 + expected/pathman_calamity_1.out | 3 + expected/pathman_calamity_2.out | 1064 ++++++++++++++++ expected/pathman_column_type.out | 4 + expected/pathman_column_type_1.out | 203 ++++ expected/pathman_hashjoin.out | 3 + expected/pathman_hashjoin_1.out | 3 + expected/pathman_hashjoin_2.out | 3 + expected/pathman_hashjoin_3.out | 3 + expected/pathman_hashjoin_4.out | 81 ++ expected/pathman_hashjoin_5.out | 73 ++ expected/pathman_inserts.out | 4 + expected/pathman_inserts_1.out | 4 + expected/pathman_inserts_2.out | 1071 ++++++++++++++++ expected/pathman_lateral.out | 9 +- expected/pathman_lateral_2.out | 127 ++ expected/pathman_lateral_3.out | 126 ++ expected/pathman_mergejoin.out | 7 + expected/pathman_mergejoin_1.out | 7 + expected/pathman_mergejoin_2.out | 7 + expected/pathman_mergejoin_3.out | 7 + expected/pathman_mergejoin_4.out | 84 ++ expected/pathman_mergejoin_5.out | 75 ++ expected/pathman_only.out | 3 + expected/pathman_only_1.out | 3 + expected/pathman_only_2.out | 280 +++++ expected/pathman_rowmarks.out | 3 + expected/pathman_rowmarks_1.out | 3 + expected/pathman_rowmarks_2.out | 3 + expected/pathman_rowmarks_3.out | 390 ++++++ run_tests.sh | 2 + sql/pathman_basic.sql | 3 + sql/pathman_calamity.sql | 3 + sql/pathman_column_type.sql | 5 + sql/pathman_hashjoin.sql | 3 + sql/pathman_inserts.sql | 5 + sql/pathman_lateral.sql | 10 +- sql/pathman_mergejoin.sql | 7 + sql/pathman_only.sql | 3 + sql/pathman_rowmarks.sql | 3 + src/hooks.c | 48 +- src/include/compat/pg_compat.h | 46 +- src/include/hooks.h | 15 +- src/include/partition_filter.h | 8 +- src/include/relation_info.h | 7 +- src/init.c | 18 +- src/nodes_common.c | 19 +- src/partition_creation.c | 51 +- src/partition_filter.c | 52 +- src/pg_pathman.c | 8 +- src/pl_funcs.c | 57 +- src/pl_range_funcs.c | 17 +- src/planner_tree_modification.c | 14 +- src/rangeset.c | 15 +- src/relation_info.c | 42 +- src/runtime_merge_append.c | 8 +- src/utility_stmt_hooking.c | 6 +- tests/cmocka/missing_basic.c | 5 + tests/cmocka/missing_list.c | 310 ++++- tests/python/Makefile | 4 +- tests/python/partitioning_test.py | 17 +- 67 files changed, 6202 insertions(+), 100 deletions(-) create mode 100644 expected/pathman_basic_2.out create mode 100644 expected/pathman_calamity_2.out create mode 100644 expected/pathman_column_type_1.out create mode 100644 expected/pathman_hashjoin_4.out create mode 100644 expected/pathman_hashjoin_5.out create mode 100644 expected/pathman_inserts_2.out create mode 100644 expected/pathman_lateral_2.out create mode 100644 expected/pathman_lateral_3.out create mode 100644 expected/pathman_mergejoin_4.out create mode 100644 expected/pathman_mergejoin_5.out create mode 100644 expected/pathman_only_2.out create mode 100644 expected/pathman_rowmarks_3.out diff --git a/.travis.yml b/.travis.yml index ff2fac20..b020780b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -18,6 +18,8 @@ notifications: on_failure: always env: + - PG_VERSION=13 LEVEL=hardcore + - PG_VERSION=13 - PG_VERSION=12 LEVEL=hardcore - PG_VERSION=12 - PG_VERSION=11 LEVEL=hardcore diff --git a/Makefile b/Makefile index c1281871..9ec19548 100644 --- a/Makefile +++ b/Makefile @@ -66,7 +66,8 @@ REGRESS = pathman_array_qual \ EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add -EXTRA_CLEAN = pg_pathman--$(EXTVERSION).sql ./isolation_output +CMOCKA_EXTRA_CLEAN = missing_basic.o missing_list.o missing_stringinfo.o missing_bitmapset.o rangeset_tests.o rangeset_tests +EXTRA_CLEAN = ./isolation_output $(patsubst %,tests/cmocka/%, $(CMOCKA_EXTRA_CLEAN)) ifdef USE_PGXS PG_CONFIG=pg_config @@ -74,6 +75,7 @@ PGXS := $(shell $(PG_CONFIG) --pgxs) VNUM := $(shell $(PG_CONFIG) --version | awk '{print $$2}') # check for declarative syntax +# this feature will not be ported to >=12 ifeq ($(VNUM),$(filter 10% 11%,$(VNUM))) REGRESS += pathman_declarative OBJS += src/declarative.o diff --git a/README.md b/README.md index b49c20ec..39ce5df9 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ ### NOTE: this project is not under development anymore -`pg_pathman` supports Postgres versions [9.5..12], but most probably it won't be ported to 13 and later releases. [Native partitioning](https://www.postgresql.org/docs/current/ddl-partitioning.html) is pretty mature now and has almost everything implemented in `pg_pathman`'; we encourage users switching to it. We are still maintaining the project (fixing bugs in supported versions), but no new development is going to happen here. +`pg_pathman` supports Postgres versions [9.5..13], but most probably it won't be ported to 14 and later releases. [Native partitioning](https://www.postgresql.org/docs/current/ddl-partitioning.html) is pretty mature now and has almost everything implemented in `pg_pathman`'; we encourage users switching to it. We are still maintaining the project (fixing bugs in supported versions), but no new development is going to happen here. # pg_pathman @@ -13,7 +13,7 @@ The `pg_pathman` module provides optimized partitioning mechanism and functions The extension is compatible with: - * PostgreSQL 9.5, 9.6, 10, 11, 12; + * PostgreSQL 9.5, 9.6, 10, 11, 12, 13; * Postgres Pro Standard 9.5, 9.6, 10, 11, 12; * Postgres Pro Enterprise; diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index aa5b5ab6..4117a00c 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -2,6 +2,9 @@ * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, * causing different output. Also, EXPLAIN now always shows key first in quals * ('test commutator' queries). + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_basic_1.out b/expected/pathman_basic_1.out index d1403c77..702f9027 100644 --- a/expected/pathman_basic_1.out +++ b/expected/pathman_basic_1.out @@ -2,6 +2,9 @@ * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, * causing different output. Also, EXPLAIN now always shows key first in quals * ('test commutator' queries). + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_basic_2.out b/expected/pathman_basic_2.out new file mode 100644 index 00000000..28e46c14 --- /dev/null +++ b/expected/pathman_basic_2.out @@ -0,0 +1,1819 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. Also, EXPLAIN now always shows key first in quals + * ('test commutator' queries). + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.hash_rel ( + id SERIAL PRIMARY KEY, + value INTEGER); +INSERT INTO test.hash_rel VALUES (1, 1); +INSERT INTO test.hash_rel VALUES (2, 2); +INSERT INTO test.hash_rel VALUES (3, 3); +\set VERBOSITY default +SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3); +ERROR: failed to analyze partitioning expression "value" +DETAIL: column "value" should be marked NOT NULL +CONTEXT: SQL statement "SELECT pathman.validate_expression(parent_relid, expression)" +PL/pgSQL function pathman.prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT pathman.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function pathman.create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +\set VERBOSITY terse +ALTER TABLE test.hash_rel ALTER COLUMN value SET NOT NULL; +SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3, partition_data:=false); + create_hash_partitions +------------------------ + 3 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +------------------------------------------- + Append + -> Seq Scan on hash_rel hash_rel_1 + -> Seq Scan on hash_rel_0 + -> Seq Scan on hash_rel_1 hash_rel_1_1 + -> Seq Scan on hash_rel_2 +(5 rows) + +SELECT * FROM test.hash_rel; + id | value +----+------- + 1 | 1 + 2 | 2 + 3 | 3 +(3 rows) + +SELECT pathman.set_enable_parent('test.hash_rel', false); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_0 + -> Seq Scan on hash_rel_1 + -> Seq Scan on hash_rel_2 +(4 rows) + +SELECT * FROM test.hash_rel; + id | value +----+------- +(0 rows) + +SELECT pathman.set_enable_parent('test.hash_rel', true); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +------------------------------------------- + Append + -> Seq Scan on hash_rel hash_rel_1 + -> Seq Scan on hash_rel_0 + -> Seq Scan on hash_rel_1 hash_rel_1_1 + -> Seq Scan on hash_rel_2 +(5 rows) + +SELECT * FROM test.hash_rel; + id | value +----+------- + 1 | 1 + 2 | 2 + 3 | 3 +(3 rows) + +SELECT pathman.drop_partitions('test.hash_rel'); +NOTICE: 0 rows copied from test.hash_rel_0 +NOTICE: 0 rows copied from test.hash_rel_1 +NOTICE: 0 rows copied from test.hash_rel_2 + drop_partitions +----------------- + 3 +(1 row) + +SELECT pathman.create_hash_partitions('test.hash_rel', 'Value', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT COUNT(*) FROM test.hash_rel; + count +------- + 3 +(1 row) + +SELECT COUNT(*) FROM ONLY test.hash_rel; + count +------- + 0 +(1 row) + +INSERT INTO test.hash_rel VALUES (4, 4); +INSERT INTO test.hash_rel VALUES (5, 5); +INSERT INTO test.hash_rel VALUES (6, 6); +SELECT COUNT(*) FROM test.hash_rel; + count +------- + 6 +(1 row) + +SELECT COUNT(*) FROM ONLY test.hash_rel; + count +------- + 0 +(1 row) + +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +\set VERBOSITY default +SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL); +ERROR: failed to analyze partitioning expression "dt" +DETAIL: column "dt" should be marked NOT NULL +CONTEXT: SQL statement "SELECT pathman.validate_expression(parent_relid, expression)" +PL/pgSQL function pathman.prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT pathman.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function pathman.create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM +\set VERBOSITY terse +ALTER TABLE test.range_rel ALTER COLUMN dt SET NOT NULL; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL, 2); +ERROR: not enough partitions to fit all values of "dt" +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +SELECT COUNT(*) FROM test.range_rel; + count +------- + 120 +(1 row) + +SELECT COUNT(*) FROM ONLY test.range_rel; + count +------- + 0 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +SELECT COUNT(*) FROM test.num_range_rel; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM ONLY test.num_range_rel; + count +------- + 0 +(1 row) + +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SELECT COUNT(*) FROM test.num_range_rel; + count +------- + 3000 +(1 row) + +SELECT COUNT(*) FROM ONLY test.num_range_rel; + count +------- + 0 +(1 row) + +/* since rel_1_2_beta: check append_child_relation(), make_ands_explicit(), dummy path */ +CREATE TABLE test.improved_dummy (id BIGSERIAL, name TEXT NOT NULL); +INSERT INTO test.improved_dummy (name) SELECT md5(g::TEXT) FROM generate_series(1, 100) as g; +SELECT pathman.create_range_partitions('test.improved_dummy', 'id', 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +INSERT INTO test.improved_dummy (name) VALUES ('test'); /* spawns new partition */ +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; + QUERY PLAN +---------------------------------------------------- + Append + -> Seq Scan on improved_dummy_1 + Filter: ((id = 5) AND (name = 'ib'::text)) + -> Seq Scan on improved_dummy_11 + Filter: (id = 101) +(5 rows) + +SELECT pathman.set_enable_parent('test.improved_dummy', true); /* enable parent */ + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; + QUERY PLAN +-------------------------------------------------------------------- + Append + -> Seq Scan on improved_dummy improved_dummy_1 + Filter: ((id = 101) OR ((id = 5) AND (name = 'ib'::text))) + -> Seq Scan on improved_dummy_1 improved_dummy_1_1 + Filter: ((id = 5) AND (name = 'ib'::text)) + -> Seq Scan on improved_dummy_11 + Filter: (id = 101) +(7 rows) + +SELECT pathman.set_enable_parent('test.improved_dummy', false); /* disable parent */ + set_enable_parent +------------------- + +(1 row) + +ALTER TABLE test.improved_dummy_1 ADD CHECK (name != 'ib'); /* make test.improved_dummy_1 disappear */ +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; + QUERY PLAN +------------------------------- + Seq Scan on improved_dummy_11 + Filter: (id = 101) +(2 rows) + +SELECT pathman.set_enable_parent('test.improved_dummy', true); /* enable parent */ + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; + QUERY PLAN +-------------------------------------------------------------------- + Append + -> Seq Scan on improved_dummy improved_dummy_1 + Filter: ((id = 101) OR ((id = 5) AND (name = 'ib'::text))) + -> Seq Scan on improved_dummy_11 + Filter: (id = 101) +(5 rows) + +DROP TABLE test.improved_dummy CASCADE; +NOTICE: drop cascades to 12 other objects +/* since rel_1_4_beta: check create_range_partitions(bounds array) */ +CREATE TABLE test.improved_dummy (val INT NOT NULL); +SELECT pathman.create_range_partitions('test.improved_dummy', 'val', + pathman.generate_range_bounds(1, 1, 2)); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT * FROM pathman.pathman_partition_list +WHERE parent = 'test.improved_dummy'::REGCLASS +ORDER BY partition; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + test.improved_dummy | test.improved_dummy_1 | 2 | val | 1 | 2 + test.improved_dummy | test.improved_dummy_2 | 2 | val | 2 | 3 +(2 rows) + +SELECT pathman.drop_partitions('test.improved_dummy'); +NOTICE: 0 rows copied from test.improved_dummy_1 +NOTICE: 0 rows copied from test.improved_dummy_2 + drop_partitions +----------------- + 2 +(1 row) + +SELECT pathman.create_range_partitions('test.improved_dummy', 'val', + pathman.generate_range_bounds(1, 1, 2), + partition_names := '{p1, p2}'); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT * FROM pathman.pathman_partition_list +WHERE parent = 'test.improved_dummy'::REGCLASS +ORDER BY partition; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------+----------+------+-----------+----------- + test.improved_dummy | p1 | 2 | val | 1 | 2 + test.improved_dummy | p2 | 2 | val | 2 | 3 +(2 rows) + +SELECT pathman.drop_partitions('test.improved_dummy'); +NOTICE: 0 rows copied from p1 +NOTICE: 0 rows copied from p2 + drop_partitions +----------------- + 2 +(1 row) + +SELECT pathman.create_range_partitions('test.improved_dummy', 'val', + pathman.generate_range_bounds(1, 1, 2), + partition_names := '{p1, p2}', + tablespaces := '{pg_default, pg_default}'); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT * FROM pathman.pathman_partition_list +WHERE parent = 'test.improved_dummy'::REGCLASS +ORDER BY partition; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------+----------+------+-----------+----------- + test.improved_dummy | p1 | 2 | val | 1 | 2 + test.improved_dummy | p2 | 2 | val | 2 | 3 +(2 rows) + +DROP TABLE test.improved_dummy CASCADE; +NOTICE: drop cascades to 3 other objects +/* Test pathman_rel_pathlist_hook() with INSERT query */ +CREATE TABLE test.insert_into_select(val int NOT NULL); +INSERT INTO test.insert_into_select SELECT generate_series(1, 100); +SELECT pathman.create_range_partitions('test.insert_into_select', 'val', 1, 20); + create_range_partitions +------------------------- + 5 +(1 row) + +CREATE TABLE test.insert_into_select_copy (LIKE test.insert_into_select); /* INSERT INTO ... SELECT ... */ +EXPLAIN (COSTS OFF) +INSERT INTO test.insert_into_select_copy +SELECT * FROM test.insert_into_select +WHERE val <= 80; + QUERY PLAN +---------------------------------------------- + Insert on insert_into_select_copy + -> Append + -> Seq Scan on insert_into_select_1 + -> Seq Scan on insert_into_select_2 + -> Seq Scan on insert_into_select_3 + -> Seq Scan on insert_into_select_4 + Filter: (val <= 80) +(7 rows) + +SELECT pathman.set_enable_parent('test.insert_into_select', true); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) +INSERT INTO test.insert_into_select_copy +SELECT * FROM test.insert_into_select +WHERE val <= 80; + QUERY PLAN +--------------------------------------------------------------------- + Insert on insert_into_select_copy + -> Append + -> Seq Scan on insert_into_select insert_into_select_1 + Filter: (val <= 80) + -> Seq Scan on insert_into_select_1 insert_into_select_1_1 + -> Seq Scan on insert_into_select_2 + -> Seq Scan on insert_into_select_3 + -> Seq Scan on insert_into_select_4 + Filter: (val <= 80) +(9 rows) + +INSERT INTO test.insert_into_select_copy SELECT * FROM test.insert_into_select; +SELECT count(*) FROM test.insert_into_select_copy; + count +------- + 100 +(1 row) + +DROP TABLE test.insert_into_select_copy, test.insert_into_select CASCADE; +NOTICE: drop cascades to 6 other objects +SET pg_pathman.enable_runtimeappend = OFF; +SET pg_pathman.enable_runtimemergeappend = OFF; +VACUUM; +SET enable_indexscan = OFF; +SET enable_bitmapscan = OFF; +SET enable_seqscan = ON; +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_0 + -> Seq Scan on hash_rel_1 + -> Seq Scan on hash_rel_2 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE false; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = NULL; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; + QUERY PLAN +------------------------ + Seq Scan on hash_rel_1 + Filter: (value = 2) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE 2 = value; /* test commutator */ + QUERY PLAN +------------------------ + Seq Scan on hash_rel_1 + Filter: (2 = value) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_1 + Filter: (value = 2) + -> Seq Scan on hash_rel_2 + Filter: (value = 1) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 = id; /* test commutator */ + QUERY PLAN +----------------------------- + Seq Scan on num_range_rel_3 + Filter: (2500 = id) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 < id; /* test commutator */ + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_3 + Filter: (2500 < id) + -> Seq Scan on num_range_rel_4 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_3 + Filter: (id > 2500) + -> Seq Scan on num_range_rel_4 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1000 AND id < 3000; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_2 + -> Seq Scan on num_range_rel_3 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1500 AND id < 2500; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_2 + Filter: (id >= 1500) + -> Seq Scan on num_range_rel_3 + Filter: (id < 2500) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE (id >= 500 AND id < 1500) OR (id > 2500); + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_1 + Filter: (id >= 500) + -> Seq Scan on num_range_rel_2 + Filter: (id < 1500) + -> Seq Scan on num_range_rel_3 + Filter: (id > 2500) + -> Seq Scan on num_range_rel_4 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2015-02-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_2 + Filter: (dt > 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_3 + -> Seq Scan on range_rel_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE '2015-02-15' < dt; /* test commutator */ + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_2 + Filter: ('Sun Feb 15 00:00:00 2015'::timestamp without time zone < dt) + -> Seq Scan on range_rel_3 + -> Seq Scan on range_rel_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-01'; + QUERY PLAN +------------------------- + Seq Scan on range_rel_2 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-15' AND dt < '2015-03-15'; + QUERY PLAN +--------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_2 + Filter: (dt >= 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_3 + Filter: (dt < 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE (dt >= '2015-01-15' AND dt < '2015-02-15') OR (dt > '2015-03-15'); + QUERY PLAN +--------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_1 + Filter: (dt >= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_2 + Filter: (dt < 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_3 + Filter: (dt > 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_4 +(8 rows) + +SET enable_indexscan = ON; +SET enable_bitmapscan = OFF; +SET enable_seqscan = OFF; +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_0 + -> Seq Scan on hash_rel_1 + -> Seq Scan on hash_rel_2 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE false; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = NULL; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; + QUERY PLAN +------------------------ + Seq Scan on hash_rel_1 + Filter: (value = 2) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE 2 = value; /* test commutator */ + QUERY PLAN +------------------------ + Seq Scan on hash_rel_1 + Filter: (2 = value) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_1 + Filter: (value = 2) + -> Seq Scan on hash_rel_2 + Filter: (value = 1) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 = id; /* test commutator */ + QUERY PLAN +---------------------------------------------------------- + Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (id = 2500) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 < id; /* test commutator */ + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (id > 2500) + -> Seq Scan on num_range_rel_4 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (id > 2500) + -> Seq Scan on num_range_rel_4 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1000 AND id < 3000; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_2 + -> Seq Scan on num_range_rel_3 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1500 AND id < 2500; + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 + Index Cond: (id >= 1500) + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (id < 2500) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE (id >= 500 AND id < 1500) OR (id > 2500); + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 + Index Cond: (id >= 500) + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 + Index Cond: (id < 1500) + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (id > 2500) + -> Seq Scan on num_range_rel_4 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel ORDER BY id; + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id <= 2500 ORDER BY id; + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (id <= 2500) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2015-02-15'; + QUERY PLAN +------------------------------------------------------------------------------------ + Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 + Index Cond: (dt > 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_3 + -> Seq Scan on range_rel_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE '2015-02-15' < dt; /* test commutator */ + QUERY PLAN +------------------------------------------------------------------------------------ + Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 + Index Cond: (dt > 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_3 + -> Seq Scan on range_rel_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-01'; + QUERY PLAN +------------------------- + Seq Scan on range_rel_2 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-15' AND dt < '2015-03-15'; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 + Index Cond: (dt >= 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Index Scan using range_rel_3_dt_idx on range_rel_3 + Index Cond: (dt < 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE (dt >= '2015-01-15' AND dt < '2015-02-15') OR (dt > '2015-03-15'); + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt >= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) + -> Index Scan using range_rel_2_dt_idx on range_rel_2 + Index Cond: (dt < 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Index Scan using range_rel_3_dt_idx on range_rel_3 + Index Cond: (dt > 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_4 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel ORDER BY dt; + QUERY PLAN +---------------------------------------------------------- + Append + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + -> Index Scan using range_rel_2_dt_idx on range_rel_2 + -> Index Scan using range_rel_3_dt_idx on range_rel_3 + -> Index Scan using range_rel_4_dt_idx on range_rel_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-01-15' ORDER BY dt DESC; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan Backward using range_rel_4_dt_idx on range_rel_4 + -> Index Scan Backward using range_rel_3_dt_idx on range_rel_3 + -> Index Scan Backward using range_rel_2_dt_idx on range_rel_2 + -> Index Scan Backward using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt >= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(6 rows) + +/* + * Sorting + */ +SET enable_indexscan = OFF; +SET enable_seqscan = ON; +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2015-03-01' ORDER BY dt; + QUERY PLAN +------------------------------------- + Sort + Sort Key: range_rel_1.dt + -> Append + -> Seq Scan on range_rel_1 + -> Seq Scan on range_rel_2 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel_1 UNION ALL SELECT * FROM test.range_rel_2 ORDER BY dt; + QUERY PLAN +------------------------------------- + Sort + Sort Key: range_rel_1.dt + -> Append + -> Seq Scan on range_rel_1 + -> Seq Scan on range_rel_2 +(5 rows) + +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2015-03-01' ORDER BY dt; + QUERY PLAN +---------------------------------------------------------- + Append + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + -> Index Scan using range_rel_2_dt_idx on range_rel_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel_1 UNION ALL SELECT * FROM test.range_rel_2 ORDER BY dt; + QUERY PLAN +---------------------------------------------------------- + Merge Append + Sort Key: range_rel_1.dt + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + -> Index Scan using range_rel_2_dt_idx on range_rel_2 +(4 rows) + +/* + * Test inlined SQL functions + */ +CREATE TABLE test.sql_inline (id INT NOT NULL); +SELECT pathman.create_hash_partitions('test.sql_inline', 'id', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +CREATE OR REPLACE FUNCTION test.sql_inline_func(i_id int) RETURNS SETOF INT AS $$ + select * from test.sql_inline where id = i_id limit 1; +$$ LANGUAGE sql STABLE; +EXPLAIN (COSTS OFF) SELECT * FROM test.sql_inline_func(5); + QUERY PLAN +-------------------------------- + Limit + -> Seq Scan on sql_inline_0 + Filter: (id = 5) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.sql_inline_func(1); + QUERY PLAN +-------------------------------- + Limit + -> Seq Scan on sql_inline_2 + Filter: (id = 1) +(3 rows) + +DROP FUNCTION test.sql_inline_func(int); +DROP TABLE test.sql_inline CASCADE; +NOTICE: drop cascades to 3 other objects +/* + * Test by @baiyinqiqi (issue #60) + */ +CREATE TABLE test.hash_varchar(val VARCHAR(40) NOT NULL); +INSERT INTO test.hash_varchar SELECT generate_series(1, 20); +SELECT pathman.create_hash_partitions('test.hash_varchar', 'val', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +SELECT * FROM test.hash_varchar WHERE val = 'a'; + val +----- +(0 rows) + +SELECT * FROM test.hash_varchar WHERE val = '12'::TEXT; + val +----- + 12 +(1 row) + +DROP TABLE test.hash_varchar CASCADE; +NOTICE: drop cascades to 4 other objects +/* + * Test split and merge + */ +/* Split first partition in half */ +SELECT pathman.split_range_partition('test.num_range_rel_1', 500); + split_range_partition +----------------------- + test.num_range_rel_5 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 700; + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 + Index Cond: (id >= 100) + -> Index Scan using num_range_rel_5_pkey on num_range_rel_5 + Index Cond: (id <= 700) +(5 rows) + +SELECT tableoid::regclass, id FROM test.num_range_rel WHERE id IN (499, 500, 501) ORDER BY id; + tableoid | id +----------------------+----- + test.num_range_rel_1 | 499 + test.num_range_rel_5 | 500 + test.num_range_rel_5 | 501 +(3 rows) + +SELECT pathman.split_range_partition('test.range_rel_1', '2015-01-15'::DATE); + split_range_partition +----------------------- + test.range_rel_5 +(1 row) + +/* Merge two partitions into one */ +SELECT pathman.merge_range_partitions('test.num_range_rel_1', 'test.num_range_rel_' || currval('test.num_range_rel_seq')); + merge_range_partitions +------------------------ + test.num_range_rel_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 700; + QUERY PLAN +---------------------------------------------------------- + Index Scan using num_range_rel_1_pkey on num_range_rel_1 + Index Cond: ((id >= 100) AND (id <= 700)) +(2 rows) + +SELECT pathman.merge_range_partitions('test.range_rel_1', 'test.range_rel_' || currval('test.range_rel_seq')); + merge_range_partitions +------------------------ + test.range_rel_1 +(1 row) + +/* Append and prepend partitions */ +SELECT pathman.append_range_partition('test.num_range_rel'); + append_range_partition +------------------------ + test.num_range_rel_6 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 4000; + QUERY PLAN +----------------------------- + Seq Scan on num_range_rel_6 +(1 row) + +SELECT pathman.prepend_range_partition('test.num_range_rel'); + prepend_range_partition +------------------------- + test.num_range_rel_7 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id < 0; + QUERY PLAN +----------------------------- + Seq Scan on num_range_rel_7 +(1 row) + +SELECT pathman.drop_range_partition('test.num_range_rel_7'); + drop_range_partition +---------------------- + test.num_range_rel_7 +(1 row) + +SELECT pathman.drop_range_partition_expand_next('test.num_range_rel_4'); + drop_range_partition_expand_next +---------------------------------- + +(1 row) + +SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.num_range_rel'::regclass; + parent | partition | parttype | expr | range_min | range_max +--------------------+----------------------+----------+------+-----------+----------- + test.num_range_rel | test.num_range_rel_1 | 2 | id | 0 | 1000 + test.num_range_rel | test.num_range_rel_2 | 2 | id | 1000 | 2000 + test.num_range_rel | test.num_range_rel_3 | 2 | id | 2000 | 3000 + test.num_range_rel | test.num_range_rel_6 | 2 | id | 3000 | 5000 +(4 rows) + +SELECT pathman.drop_range_partition_expand_next('test.num_range_rel_6'); + drop_range_partition_expand_next +---------------------------------- + +(1 row) + +SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.num_range_rel'::regclass; + parent | partition | parttype | expr | range_min | range_max +--------------------+----------------------+----------+------+-----------+----------- + test.num_range_rel | test.num_range_rel_1 | 2 | id | 0 | 1000 + test.num_range_rel | test.num_range_rel_2 | 2 | id | 1000 | 2000 + test.num_range_rel | test.num_range_rel_3 | 2 | id | 2000 | 3000 +(3 rows) + +SELECT pathman.append_range_partition('test.range_rel'); + append_range_partition +------------------------ + test.range_rel_6 +(1 row) + +SELECT pathman.prepend_range_partition('test.range_rel'); + prepend_range_partition +------------------------- + test.range_rel_7 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-12-15' AND '2015-01-15'; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan using range_rel_7_dt_idx on range_rel_7 + Index Cond: (dt >= 'Mon Dec 15 00:00:00 2014'::timestamp without time zone) + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(5 rows) + +SELECT pathman.drop_range_partition('test.range_rel_7'); + drop_range_partition +---------------------- + test.range_rel_7 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-12-15' AND '2015-01-15'; + QUERY PLAN +------------------------------------------------------------------------------- + Index Scan using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(2 rows) + +SELECT pathman.add_range_partition('test.range_rel', '2014-12-01'::DATE, '2015-01-02'::DATE); +ERROR: specified range [12-01-2014, 01-02-2015) overlaps with existing partitions +SELECT pathman.add_range_partition('test.range_rel', '2014-12-01'::DATE, '2015-01-01'::DATE); + add_range_partition +--------------------- + test.range_rel_8 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-12-15' AND '2015-01-15'; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan using range_rel_8_dt_idx on range_rel_8 + Index Cond: (dt >= 'Mon Dec 15 00:00:00 2014'::timestamp without time zone) + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(5 rows) + +CREATE TABLE test.range_rel_archive (LIKE test.range_rel INCLUDING ALL); +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_archive', '2014-01-01'::DATE, '2015-01-01'::DATE); +ERROR: specified range [01-01-2014, 01-01-2015) overlaps with existing partitions +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_archive', '2014-01-01'::DATE, '2014-12-01'::DATE); + attach_range_partition +------------------------ + test.range_rel_archive +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-11-15' AND '2015-01-15'; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan using range_rel_archive_dt_idx on range_rel_archive + Index Cond: (dt >= 'Sat Nov 15 00:00:00 2014'::timestamp without time zone) + -> Seq Scan on range_rel_8 + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(6 rows) + +SELECT pathman.detach_range_partition('test.range_rel_archive'); + detach_range_partition +------------------------ + test.range_rel_archive +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-11-15' AND '2015-01-15'; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_8 + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(4 rows) + +CREATE TABLE test.range_rel_test1 ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP, + txt TEXT, + abc INTEGER); +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_test1', '2013-01-01'::DATE, '2014-01-01'::DATE); +ERROR: partition must have a compatible tuple format +CREATE TABLE test.range_rel_test2 ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP); +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_test2', '2013-01-01'::DATE, '2014-01-01'::DATE); +ERROR: column "dt" in child table must be marked NOT NULL +/* Half open ranges */ +SELECT pathman.add_range_partition('test.range_rel', NULL, '2014-12-01'::DATE, 'test.range_rel_minus_infinity'); + add_range_partition +------------------------------- + test.range_rel_minus_infinity +(1 row) + +SELECT pathman.add_range_partition('test.range_rel', '2015-06-01'::DATE, NULL, 'test.range_rel_plus_infinity'); + add_range_partition +------------------------------ + test.range_rel_plus_infinity +(1 row) + +SELECT pathman.append_range_partition('test.range_rel'); +ERROR: Cannot append partition because last partition's range is half open +SELECT pathman.prepend_range_partition('test.range_rel'); +ERROR: Cannot prepend partition because first partition's range is half open +DROP TABLE test.range_rel_minus_infinity; +CREATE TABLE test.range_rel_minus_infinity (LIKE test.range_rel INCLUDING ALL); +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_minus_infinity', NULL, '2014-12-01'::DATE); + attach_range_partition +------------------------------- + test.range_rel_minus_infinity +(1 row) + +SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.range_rel'::REGCLASS; + parent | partition | parttype | expr | range_min | range_max +----------------+-------------------------------+----------+------+--------------------------+-------------------------- + test.range_rel | test.range_rel_minus_infinity | 2 | dt | | Mon Dec 01 00:00:00 2014 + test.range_rel | test.range_rel_8 | 2 | dt | Mon Dec 01 00:00:00 2014 | Thu Jan 01 00:00:00 2015 + test.range_rel | test.range_rel_1 | 2 | dt | Thu Jan 01 00:00:00 2015 | Sun Feb 01 00:00:00 2015 + test.range_rel | test.range_rel_2 | 2 | dt | Sun Feb 01 00:00:00 2015 | Sun Mar 01 00:00:00 2015 + test.range_rel | test.range_rel_3 | 2 | dt | Sun Mar 01 00:00:00 2015 | Wed Apr 01 00:00:00 2015 + test.range_rel | test.range_rel_4 | 2 | dt | Wed Apr 01 00:00:00 2015 | Fri May 01 00:00:00 2015 + test.range_rel | test.range_rel_6 | 2 | dt | Fri May 01 00:00:00 2015 | Mon Jun 01 00:00:00 2015 + test.range_rel | test.range_rel_plus_infinity | 2 | dt | Mon Jun 01 00:00:00 2015 | +(8 rows) + +INSERT INTO test.range_rel (dt) VALUES ('2012-06-15'); +INSERT INTO test.range_rel (dt) VALUES ('2015-12-15'); +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2015-01-01'; + QUERY PLAN +-------------------------------------------- + Append + -> Seq Scan on range_rel_minus_infinity + -> Seq Scan on range_rel_8 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-05-01'; + QUERY PLAN +------------------------------------------- + Append + -> Seq Scan on range_rel_6 + -> Seq Scan on range_rel_plus_infinity +(3 rows) + +/* + * Zero partitions count and adding partitions with specified name + */ +CREATE TABLE test.zero( + id SERIAL PRIMARY KEY, + value INT NOT NULL); +INSERT INTO test.zero SELECT g, g FROM generate_series(1, 100) as g; +SELECT pathman.create_range_partitions('test.zero', 'value', 50, 10, 0); + create_range_partitions +------------------------- + 0 +(1 row) + +SELECT pathman.append_range_partition('test.zero', 'test.zero_0'); +ERROR: relation "zero" has no partitions +SELECT pathman.prepend_range_partition('test.zero', 'test.zero_1'); +ERROR: relation "zero" has no partitions +SELECT pathman.add_range_partition('test.zero', 50, 70, 'test.zero_50'); + add_range_partition +--------------------- + test.zero_50 +(1 row) + +SELECT pathman.append_range_partition('test.zero', 'test.zero_appended'); + append_range_partition +------------------------ + test.zero_appended +(1 row) + +SELECT pathman.prepend_range_partition('test.zero', 'test.zero_prepended'); + prepend_range_partition +------------------------- + test.zero_prepended +(1 row) + +SELECT pathman.split_range_partition('test.zero_50', 60, 'test.zero_60'); + split_range_partition +----------------------- + test."test.zero_60" +(1 row) + +DROP TABLE test.zero CASCADE; +NOTICE: drop cascades to 5 other objects +/* + * Check that altering table columns doesn't break trigger + */ +ALTER TABLE test.hash_rel ADD COLUMN abc int; +INSERT INTO test.hash_rel (id, value, abc) VALUES (123, 456, 789); +SELECT * FROM test.hash_rel WHERE id = 123; + id | value | abc +-----+-------+----- + 123 | 456 | 789 +(1 row) + +/* Test replacing hash partition */ +CREATE TABLE test.hash_rel_extern (LIKE test.hash_rel INCLUDING ALL); +SELECT pathman.replace_hash_partition('test.hash_rel_0', 'test.hash_rel_extern'); + replace_hash_partition +------------------------ + test.hash_rel_extern +(1 row) + +/* Check the consistency of test.hash_rel_0 and test.hash_rel_extern relations */ +EXPLAIN(COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on hash_rel_extern + -> Seq Scan on hash_rel_1 + -> Seq Scan on hash_rel_2 +(4 rows) + +SELECT parent, partition, parttype +FROM pathman.pathman_partition_list +WHERE parent='test.hash_rel'::regclass +ORDER BY 2; + parent | partition | parttype +---------------+----------------------+---------- + test.hash_rel | test.hash_rel_1 | 1 + test.hash_rel | test.hash_rel_2 | 1 + test.hash_rel | test.hash_rel_extern | 1 +(3 rows) + +SELECT c.oid::regclass::text, + array_agg(pg_get_indexdef(i.indexrelid)) AS indexes, + array_agg(pg_get_triggerdef(t.oid)) AS triggers +FROM pg_class c + LEFT JOIN pg_index i ON c.oid=i.indrelid + LEFT JOIN pg_trigger t ON c.oid=t.tgrelid +WHERE c.oid IN ('test.hash_rel_0'::regclass, 'test.hash_rel_extern'::regclass) +GROUP BY 1 ORDER BY 1; + oid | indexes | triggers +----------------------+---------------------------------------------------------------------------------------+---------- + test.hash_rel_0 | {"CREATE UNIQUE INDEX hash_rel_0_pkey ON test.hash_rel_0 USING btree (id)"} | {NULL} + test.hash_rel_extern | {"CREATE UNIQUE INDEX hash_rel_extern_pkey ON test.hash_rel_extern USING btree (id)"} | {NULL} +(2 rows) + +SELECT pathman.is_tuple_convertible('test.hash_rel_0', 'test.hash_rel_extern'); + is_tuple_convertible +---------------------- + t +(1 row) + +INSERT INTO test.hash_rel SELECT * FROM test.hash_rel_0; +DROP TABLE test.hash_rel_0; +/* Table with which we are replacing partition must have exact same structure */ +CREATE TABLE test.hash_rel_wrong( + id INTEGER NOT NULL, + value INTEGER); +SELECT pathman.replace_hash_partition('test.hash_rel_1', 'test.hash_rel_wrong'); +ERROR: column "value" in child table must be marked NOT NULL +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on hash_rel_extern + -> Seq Scan on hash_rel_1 + -> Seq Scan on hash_rel_2 +(4 rows) + +/* + * Clean up + */ +SELECT pathman.drop_partitions('test.hash_rel'); +NOTICE: 3 rows copied from test.hash_rel_1 +NOTICE: 2 rows copied from test.hash_rel_2 +NOTICE: 2 rows copied from test.hash_rel_extern + drop_partitions +----------------- + 3 +(1 row) + +SELECT COUNT(*) FROM ONLY test.hash_rel; + count +------- + 7 +(1 row) + +SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT pathman.drop_partitions('test.hash_rel', TRUE); + drop_partitions +----------------- + 3 +(1 row) + +SELECT COUNT(*) FROM ONLY test.hash_rel; + count +------- + 0 +(1 row) + +DROP TABLE test.hash_rel CASCADE; +SELECT pathman.drop_partitions('test.num_range_rel'); +NOTICE: 999 rows copied from test.num_range_rel_1 +NOTICE: 1000 rows copied from test.num_range_rel_2 +NOTICE: 1000 rows copied from test.num_range_rel_3 + drop_partitions +----------------- + 3 +(1 row) + +DROP TABLE test.num_range_rel CASCADE; +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 10 other objects +/* Test attributes copying */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt DATE NOT NULL) +WITH (fillfactor = 70); +INSERT INTO test.range_rel (dt) + SELECT g FROM generate_series('2015-01-01', '2015-02-15', '1 month'::interval) AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2015-01-01'::date, '1 month'::interval); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT reloptions, relpersistence FROM pg_class WHERE oid='test.range_rel'::REGCLASS; + reloptions | relpersistence +-----------------+---------------- + {fillfactor=70} | p +(1 row) + +SELECT reloptions, relpersistence FROM pg_class WHERE oid='test.range_rel_1'::REGCLASS; + reloptions | relpersistence +-----------------+---------------- + {fillfactor=70} | p +(1 row) + +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 3 other objects +/* Test automatic partition creation */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + data TEXT); +SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '10 days'::INTERVAL, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +INSERT INTO test.range_rel (dt) +SELECT generate_series('2015-01-01', '2015-04-30', '1 day'::interval); +INSERT INTO test.range_rel (dt) +SELECT generate_series('2014-12-31', '2014-12-01', '-1 day'::interval); +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt = '2014-12-15'; + QUERY PLAN +-------------------------------------------------------------------------- + Seq Scan on range_rel_14 + Filter: (dt = 'Mon Dec 15 00:00:00 2014'::timestamp without time zone) +(2 rows) + +SELECT * FROM test.range_rel WHERE dt = '2014-12-15'; + id | dt | data +-----+--------------------------+------ + 137 | Mon Dec 15 00:00:00 2014 | +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt = '2015-03-15'; + QUERY PLAN +-------------------------------------------------------------------------- + Seq Scan on range_rel_8 + Filter: (dt = 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) +(2 rows) + +SELECT * FROM test.range_rel WHERE dt = '2015-03-15'; + id | dt | data +----+--------------------------+------ + 74 | Sun Mar 15 00:00:00 2015 | +(1 row) + +SELECT pathman.set_auto('test.range_rel', false); + set_auto +---------- + +(1 row) + +INSERT INTO test.range_rel (dt) VALUES ('2015-06-01'); +ERROR: no suitable partition for key 'Mon Jun 01 00:00:00 2015' +SELECT pathman.set_auto('test.range_rel', true); + set_auto +---------- + +(1 row) + +INSERT INTO test.range_rel (dt) VALUES ('2015-06-01'); +/* + * Test auto removing record from config on table DROP (but not on column drop + * as it used to be before version 1.2) + */ +ALTER TABLE test.range_rel DROP COLUMN data; +SELECT * FROM pathman.pathman_config; + partrel | expr | parttype | range_interval +----------------+------+----------+---------------- + test.range_rel | dt | 2 | @ 10 days +(1 row) + +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 21 other objects +SELECT * FROM pathman.pathman_config; + partrel | expr | parttype | range_interval +---------+------+----------+---------------- +(0 rows) + +/* Check overlaps */ +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 1000, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 4001, 5000); +ERROR: specified range [4001, 5000) overlaps with existing partitions +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 4000, 5000); +ERROR: specified range [4000, 5000) overlaps with existing partitions +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 3999, 5000); +ERROR: specified range [3999, 5000) overlaps with existing partitions +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 3000, 3500); +ERROR: specified range [3000, 3500) overlaps with existing partitions +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 0, 999); + check_range_available +----------------------- + +(1 row) + +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 0, 1000); + check_range_available +----------------------- + +(1 row) + +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 0, 1001); +ERROR: specified range [0, 1001) overlaps with existing partitions +/* CaMeL cAsE table names and attributes */ +CREATE TABLE test."TeSt" (a INT NOT NULL, b INT); +SELECT pathman.create_hash_partitions('test.TeSt', 'a', 3); +ERROR: relation "test.test" does not exist at character 39 +SELECT pathman.create_hash_partitions('test."TeSt"', 'a', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +INSERT INTO test."TeSt" VALUES (1, 1); +INSERT INTO test."TeSt" VALUES (2, 2); +INSERT INTO test."TeSt" VALUES (3, 3); +SELECT * FROM test."TeSt"; + a | b +---+--- + 3 | 3 + 2 | 2 + 1 | 1 +(3 rows) + +DROP TABLE test."TeSt" CASCADE; +NOTICE: drop cascades to 3 other objects +CREATE TABLE test."RangeRel" ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +INSERT INTO test."RangeRel" (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-01-03', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test."RangeRel"', 'dt', '2015-01-01'::DATE, '1 day'::INTERVAL); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT pathman.append_range_partition('test."RangeRel"'); + append_range_partition +------------------------ + test."RangeRel_4" +(1 row) + +SELECT pathman.prepend_range_partition('test."RangeRel"'); + prepend_range_partition +------------------------- + test."RangeRel_5" +(1 row) + +SELECT pathman.merge_range_partitions('test."RangeRel_1"', 'test."RangeRel_' || currval('test."RangeRel_seq"') || '"'); + merge_range_partitions +------------------------ + test."RangeRel_1" +(1 row) + +SELECT pathman.split_range_partition('test."RangeRel_1"', '2015-01-01'::DATE); + split_range_partition +----------------------- + test."RangeRel_6" +(1 row) + +DROP TABLE test."RangeRel" CASCADE; +NOTICE: drop cascades to 6 other objects +SELECT * FROM pathman.pathman_config; + partrel | expr | parttype | range_interval +--------------------+------+----------+---------------- + test.num_range_rel | id | 2 | 1000 +(1 row) + +CREATE TABLE test."RangeRel" ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +SELECT pathman.create_range_partitions('test."RangeRel"', 'id', 1, 100, 3); + create_range_partitions +------------------------- + 3 +(1 row) + +DROP TABLE test."RangeRel" CASCADE; +NOTICE: drop cascades to 4 other objects +DROP EXTENSION pg_pathman; +/* Test that everything works fine without schemas */ +CREATE EXTENSION pg_pathman; +/* Hash */ +CREATE TABLE test.hash_rel ( + id SERIAL PRIMARY KEY, + value INTEGER NOT NULL); +INSERT INTO test.hash_rel (value) SELECT g FROM generate_series(1, 10000) as g; +SELECT create_hash_partitions('test.hash_rel', 'value', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE id = 1234; + QUERY PLAN +------------------------------------------------------ + Append + -> Index Scan using hash_rel_0_pkey on hash_rel_0 + Index Cond: (id = 1234) + -> Index Scan using hash_rel_1_pkey on hash_rel_1 + Index Cond: (id = 1234) + -> Index Scan using hash_rel_2_pkey on hash_rel_2 + Index Cond: (id = 1234) +(7 rows) + +/* Range */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + value INTEGER); +INSERT INTO test.range_rel (dt, value) SELECT g, extract(day from g) FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') as g; +SELECT create_range_partitions('test.range_rel', 'dt', '2010-01-01'::date, '1 month'::interval, 12); + create_range_partitions +------------------------- + 12 +(1 row) + +SELECT merge_range_partitions('test.range_rel_1', 'test.range_rel_2'); + merge_range_partitions +------------------------ + test.range_rel_1 +(1 row) + +SELECT split_range_partition('test.range_rel_1', '2010-02-15'::date); + split_range_partition +----------------------- + test.range_rel_13 +(1 row) + +SELECT append_range_partition('test.range_rel'); + append_range_partition +------------------------ + test.range_rel_14 +(1 row) + +SELECT prepend_range_partition('test.range_rel'); + prepend_range_partition +------------------------- + test.range_rel_15 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2010-03-01'; + QUERY PLAN +-------------------------------- + Append + -> Seq Scan on range_rel_15 + -> Seq Scan on range_rel_1 + -> Seq Scan on range_rel_13 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2010-12-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_12 + Filter: (dt > 'Wed Dec 15 00:00:00 2010'::timestamp without time zone) + -> Seq Scan on range_rel_14 +(4 rows) + +/* Create range partitions from whole range */ +SELECT drop_partitions('test.range_rel'); +NOTICE: 45 rows copied from test.range_rel_1 +NOTICE: 31 rows copied from test.range_rel_3 +NOTICE: 30 rows copied from test.range_rel_4 +NOTICE: 31 rows copied from test.range_rel_5 +NOTICE: 30 rows copied from test.range_rel_6 +NOTICE: 31 rows copied from test.range_rel_7 +NOTICE: 31 rows copied from test.range_rel_8 +NOTICE: 30 rows copied from test.range_rel_9 +NOTICE: 31 rows copied from test.range_rel_10 +NOTICE: 30 rows copied from test.range_rel_11 +NOTICE: 31 rows copied from test.range_rel_12 +NOTICE: 14 rows copied from test.range_rel_13 +NOTICE: 0 rows copied from test.range_rel_14 +NOTICE: 0 rows copied from test.range_rel_15 + drop_partitions +----------------- + 14 +(1 row) + +/* Test NOT operator */ +CREATE TABLE bool_test(a INT NOT NULL, b BOOLEAN); +SELECT create_hash_partitions('bool_test', 'a', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +INSERT INTO bool_test SELECT g, (g % 4) = 0 FROM generate_series(1, 100) AS g; +SELECT count(*) FROM bool_test; + count +------- + 100 +(1 row) + +SELECT count(*) FROM bool_test WHERE (b = true AND b = false); + count +------- + 0 +(1 row) + +SELECT count(*) FROM bool_test WHERE b = false; /* 75 values */ + count +------- + 75 +(1 row) + +SELECT count(*) FROM bool_test WHERE b = true; /* 25 values */ + count +------- + 25 +(1 row) + +DROP TABLE bool_test CASCADE; +NOTICE: drop cascades to 3 other objects +/* Special test case (quals generation) -- fixing commit f603e6c5 */ +CREATE TABLE test.special_case_1_ind_o_s(val serial, comment text); +INSERT INTO test.special_case_1_ind_o_s SELECT generate_series(1, 200), NULL; +SELECT create_range_partitions('test.special_case_1_ind_o_s', 'val', 1, 50); + create_range_partitions +------------------------- + 4 +(1 row) + +INSERT INTO test.special_case_1_ind_o_s_2 SELECT 75 FROM generate_series(1, 6000); +CREATE INDEX ON test.special_case_1_ind_o_s_2 (val, comment); +VACUUM ANALYZE test.special_case_1_ind_o_s_2; +EXPLAIN (COSTS OFF) SELECT * FROM test.special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on special_case_1_ind_o_s_1 + Filter: (comment = 'a'::text) + -> Index Only Scan using special_case_1_ind_o_s_2_val_comment_idx on special_case_1_ind_o_s_2 + Index Cond: ((val < 75) AND (comment = 'a'::text)) +(5 rows) + +SELECT set_enable_parent('test.special_case_1_ind_o_s', true); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on special_case_1_ind_o_s special_case_1_ind_o_s_1 + Filter: ((val < 75) AND (comment = 'a'::text)) + -> Seq Scan on special_case_1_ind_o_s_1 special_case_1_ind_o_s_1_1 + Filter: (comment = 'a'::text) + -> Index Only Scan using special_case_1_ind_o_s_2_val_comment_idx on special_case_1_ind_o_s_2 + Index Cond: ((val < 75) AND (comment = 'a'::text)) +(7 rows) + +SELECT set_enable_parent('test.special_case_1_ind_o_s', false); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on special_case_1_ind_o_s_1 + Filter: (comment = 'a'::text) + -> Index Only Scan using special_case_1_ind_o_s_2_val_comment_idx on special_case_1_ind_o_s_2 + Index Cond: ((val < 75) AND (comment = 'a'::text)) +(5 rows) + +/* Test index scans on child relation under enable_parent is set */ +CREATE TABLE test.index_on_childs(c1 integer not null, c2 integer); +CREATE INDEX ON test.index_on_childs(c2); +INSERT INTO test.index_on_childs SELECT i, (random()*10000)::integer FROM generate_series(1, 10000) i; +SELECT create_range_partitions('test.index_on_childs', 'c1', 1, 1000, 0, false); + create_range_partitions +------------------------- + 0 +(1 row) + +SELECT add_range_partition('test.index_on_childs', 1, 1000, 'test.index_on_childs_1_1k'); + add_range_partition +--------------------------- + test.index_on_childs_1_1k +(1 row) + +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_1k_2k'); + append_range_partition +---------------------------- + test.index_on_childs_1k_2k +(1 row) + +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_2k_3k'); + append_range_partition +---------------------------- + test.index_on_childs_2k_3k +(1 row) + +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_3k_4k'); + append_range_partition +---------------------------- + test.index_on_childs_3k_4k +(1 row) + +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_4k_5k'); + append_range_partition +---------------------------- + test.index_on_childs_4k_5k +(1 row) + +SELECT set_enable_parent('test.index_on_childs', true); + set_enable_parent +------------------- + +(1 row) + +VACUUM ANALYZE test.index_on_childs; +EXPLAIN (COSTS OFF) SELECT * FROM test.index_on_childs WHERE c1 > 100 AND c1 < 2500 AND c2 = 500; + QUERY PLAN +------------------------------------------------------------------------------------ + Append + -> Index Scan using index_on_childs_c2_idx on index_on_childs index_on_childs_1 + Index Cond: (c2 = 500) + Filter: ((c1 > 100) AND (c1 < 2500)) + -> Index Scan using index_on_childs_1_1k_c2_idx on index_on_childs_1_1k + Index Cond: (c2 = 500) + Filter: (c1 > 100) + -> Index Scan using index_on_childs_1k_2k_c2_idx on index_on_childs_1k_2k + Index Cond: (c2 = 500) + -> Index Scan using index_on_childs_2k_3k_c2_idx on index_on_childs_2k_3k + Index Cond: (c2 = 500) + Filter: (c1 < 2500) +(12 rows) + +/* Test create_range_partitions() + partition_names */ +CREATE TABLE test.provided_part_names(id INT NOT NULL); +INSERT INTO test.provided_part_names SELECT generate_series(1, 10); +SELECT create_hash_partitions('test.provided_part_names', 'id', 2, + partition_names := ARRAY['p1', 'p2']::TEXT[]); /* ok */ + create_hash_partitions +------------------------ + 2 +(1 row) + +/* list partitions */ +SELECT partition FROM pathman_partition_list +WHERE parent = 'test.provided_part_names'::REGCLASS +ORDER BY partition; + partition +----------- + p1 + p2 +(2 rows) + +DROP TABLE test.provided_part_names CASCADE; +NOTICE: drop cascades to 2 other objects +/* test preventing of double expand of inherited tables */ +CREATE TABLE test.mixinh_parent (id INT PRIMARY KEY); +CREATE TABLE test.mixinh_child1 () INHERITS (test.mixinh_parent); +SELECT create_range_partitions('test.mixinh_child1', 'id', 1, 10, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +INSERT INTO test.mixinh_child1 VALUES (1); +SELECT * FROM test.mixinh_child1; + id +---- + 1 +(1 row) + +SELECT * FROM test.mixinh_parent; +ERROR: could not expand partitioned table "mixinh_child1" +DROP SCHEMA test CASCADE; +NOTICE: drop cascades to 32 other objects +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman CASCADE; diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 0943bc5c..50bfd803 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -4,6 +4,9 @@ * ERROR: invalid input syntax for type integer: "abc" * instead of * ERROR: invalid input syntax for integer: "15.6" + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_calamity_1.out b/expected/pathman_calamity_1.out index b2e192e1..20c2ea6c 100644 --- a/expected/pathman_calamity_1.out +++ b/expected/pathman_calamity_1.out @@ -4,6 +4,9 @@ * ERROR: invalid input syntax for type integer: "abc" * instead of * ERROR: invalid input syntax for integer: "15.6" + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_calamity_2.out b/expected/pathman_calamity_2.out new file mode 100644 index 00000000..0c7757a9 --- /dev/null +++ b/expected/pathman_calamity_2.out @@ -0,0 +1,1064 @@ +/* + * pathman_calamity.out and pathman_calamity_1.out differ only in that since + * 12 we get + * ERROR: invalid input syntax for type integer: "abc" + * instead of + * ERROR: invalid input syntax for integer: "15.6" + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA calamity; +/* call for coverage test */ +set client_min_messages = ERROR; +SELECT debug_capture(); + debug_capture +--------------- + +(1 row) + +SELECT pathman_version(); + pathman_version +----------------- + 1.5.11 +(1 row) + +set client_min_messages = NOTICE; +/* create table to be partitioned */ +CREATE TABLE calamity.part_test(val serial); +/* test pg_pathman's cache */ +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 + drop_partitions +----------------- + 3 +(1 row) + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 + drop_partitions +----------------- + 3 +(1 row) + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT append_range_partition('calamity.part_test'); + append_range_partition +------------------------ + calamity.part_test_4 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 +NOTICE: 0 rows copied from calamity.part_test_4 + drop_partitions +----------------- + 4 +(1 row) + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT append_range_partition('calamity.part_test'); + append_range_partition +------------------------ + calamity.part_test_4 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 +NOTICE: 0 rows copied from calamity.part_test_4 + drop_partitions +----------------- + 4 +(1 row) + +SELECT count(*) FROM calamity.part_test; + count +------- + 30 +(1 row) + +DELETE FROM calamity.part_test; +/* test function create_single_range_partition() */ +SELECT create_single_range_partition(NULL, NULL::INT4, NULL); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT create_single_range_partition('pg_class', NULL::INT4, NULL); /* not ok */ +ERROR: table "pg_class" is not partitioned by RANGE +SELECT add_to_pathman_config('calamity.part_test', 'val'); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT create_single_range_partition('calamity.part_test', NULL::INT4, NULL); /* not ok */ +ERROR: table "part_test" is not partitioned by RANGE +DELETE FROM pathman_config WHERE partrel = 'calamity.part_test'::REGCLASS; +/* test function create_range_partitions_internal() */ +SELECT create_range_partitions_internal(NULL, '{}'::INT[], NULL, NULL); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT create_range_partitions_internal('calamity.part_test', + NULL::INT[], NULL, NULL); /* not ok */ +ERROR: 'bounds' should not be NULL +SELECT create_range_partitions_internal('calamity.part_test', '{1}'::INT[], + '{part_1}'::TEXT[], NULL); /* not ok */ +ERROR: wrong length of 'partition_names' array +SELECT create_range_partitions_internal('calamity.part_test', '{1}'::INT[], + NULL, '{tblspc_1}'::TEXT[]); /* not ok */ +ERROR: wrong length of 'tablespaces' array +SELECT create_range_partitions_internal('calamity.part_test', + '{1, NULL}'::INT[], NULL, NULL); /* not ok */ +ERROR: only first bound can be NULL +SELECT create_range_partitions_internal('calamity.part_test', + '{2, 1}'::INT[], NULL, NULL); /* not ok */ +ERROR: 'bounds' array must be ascending +/* test function create_hash_partitions() */ +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY[]::TEXT[]); /* not ok */ +ERROR: array should not be empty +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY[ 'p1', NULL ]::TEXT[]); /* not ok */ +ERROR: array should not contain NULLs +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY[ ['p1'], ['p2'] ]::TEXT[]); /* not ok */ +ERROR: array should contain only 1 dimension +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY['calamity.p1']::TEXT[]); /* not ok */ +ERROR: size of 'partition_names' must be equal to 'partitions_count' +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + tablespaces := ARRAY['abcd']::TEXT[]); /* not ok */ +ERROR: size of 'tablespaces' must be equal to 'partitions_count' +/* test case when naming sequence does not exist */ +CREATE TABLE calamity.no_naming_seq(val INT4 NOT NULL); +SELECT add_to_pathman_config('calamity.no_naming_seq', 'val', '100'); + add_to_pathman_config +----------------------- + t +(1 row) + +select add_range_partition(' calamity.no_naming_seq', 10, 20); +ERROR: auto naming sequence "no_naming_seq_seq" does not exist +DROP TABLE calamity.no_naming_seq CASCADE; +/* test (-inf, +inf) partition creation */ +CREATE TABLE calamity.double_inf(val INT4 NOT NULL); +SELECT add_to_pathman_config('calamity.double_inf', 'val', '10'); + add_to_pathman_config +----------------------- + t +(1 row) + +select add_range_partition('calamity.double_inf', NULL::INT4, NULL::INT4, + partition_name := 'double_inf_part'); +ERROR: cannot create partition with range (-inf, +inf) +DROP TABLE calamity.double_inf CASCADE; +/* test stub 'enable_parent' value for PATHMAN_CONFIG_PARAMS */ +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +DELETE FROM pathman_config_params WHERE partrel = 'calamity.part_test'::regclass; +SELECT append_range_partition('calamity.part_test'); + append_range_partition +------------------------ + calamity.part_test_4 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_test; + QUERY PLAN +------------------------------- + Append + -> Seq Scan on part_test_1 + -> Seq Scan on part_test_2 + -> Seq Scan on part_test_3 + -> Seq Scan on part_test_4 +(5 rows) + +SELECT drop_partitions('calamity.part_test', true); + drop_partitions +----------------- + 4 +(1 row) + +DELETE FROM calamity.part_test; +/* check function validate_interval_value() */ +SELECT set_interval('pg_catalog.pg_class', 100); /* not ok */ +ERROR: table "pg_class" is not partitioned by RANGE +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT set_interval('calamity.part_test', 100); /* ok */ + set_interval +-------------- + +(1 row) + +SELECT set_interval('calamity.part_test', 15.6); /* not ok */ +ERROR: invalid input syntax for type integer: "15.6" +SELECT set_interval('calamity.part_test', 'abc'::text); /* not ok */ +ERROR: invalid input syntax for type integer: "abc" +SELECT drop_partitions('calamity.part_test', true); + drop_partitions +----------------- + 3 +(1 row) + +DELETE FROM calamity.part_test; +/* check function build_hash_condition() */ +SELECT build_hash_condition('int4', 'val', 10, 1); + build_hash_condition +------------------------------------------------- + public.get_hash_part_idx(hashint4(val), 10) = 1 +(1 row) + +SELECT build_hash_condition('text', 'val', 10, 1); + build_hash_condition +------------------------------------------------- + public.get_hash_part_idx(hashtext(val), 10) = 1 +(1 row) + +SELECT build_hash_condition('int4', 'val', 1, 1); +ERROR: 'partition_index' must be lower than 'partitions_count' +SELECT build_hash_condition('int4', 'val', 10, 20); +ERROR: 'partition_index' must be lower than 'partitions_count' +SELECT build_hash_condition('text', 'val', 10, NULL) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT build_hash_condition('calamity.part_test', 'val', 10, 1); +ERROR: no hash function for type calamity.part_test +/* check function build_range_condition() */ +SELECT build_range_condition(NULL, 'val', 10, 20); /* not ok */ +ERROR: 'partition_relid' should not be NULL +SELECT build_range_condition('calamity.part_test', NULL, 10, 20); /* not ok */ +ERROR: 'expression' should not be NULL +SELECT build_range_condition('calamity.part_test', 'val', 10, 20); /* OK */ + build_range_condition +------------------------------ + ((val >= 10) AND (val < 20)) +(1 row) + +SELECT build_range_condition('calamity.part_test', 'val', 10, NULL); /* OK */ + build_range_condition +----------------------- + ((val >= 10)) +(1 row) + +SELECT build_range_condition('calamity.part_test', 'val', NULL, 10); /* OK */ + build_range_condition +----------------------- + ((val < 10)) +(1 row) + +/* check function validate_interval_value() */ +SELECT validate_interval_value(1::REGCLASS, 'expr', 2, '1 mon'); /* not ok */ +ERROR: relation "1" does not exist +SELECT validate_interval_value(NULL, 'expr', 2, '1 mon'); /* not ok */ +ERROR: 'partrel' should not be NULL +SELECT validate_interval_value('pg_class', NULL, 2, '1 mon'); /* not ok */ +ERROR: 'expression' should not be NULL +SELECT validate_interval_value('pg_class', 'relname', NULL, '1 mon'); /* not ok */ +ERROR: 'parttype' should not be NULL +SELECT validate_interval_value('pg_class', 'relname', 1, 'HASH'); /* not ok */ +ERROR: interval should be NULL for HASH partitioned table +SELECT validate_interval_value('pg_class', 'expr', 2, '1 mon'); /* not ok */ +ERROR: failed to analyze partitioning expression "expr" +SELECT validate_interval_value('pg_class', 'expr', 2, NULL); /* not ok */ +ERROR: failed to analyze partitioning expression "expr" +SELECT validate_interval_value('pg_class', 'EXPR', 1, 'HASH'); /* not ok */ +ERROR: failed to analyze partitioning expression "EXPR" +/* check function validate_relname() */ +SELECT validate_relname('calamity.part_test'); + validate_relname +------------------ + +(1 row) + +SELECT validate_relname(1::REGCLASS); +ERROR: relation "1" does not exist +SELECT validate_relname(NULL); +ERROR: relation should not be NULL +/* check function validate_expression() */ +SELECT validate_expression(1::regclass, NULL); /* not ok */ +ERROR: relation "1" does not exist +SELECT validate_expression(NULL::regclass, NULL); /* not ok */ +ERROR: 'relid' should not be NULL +SELECT validate_expression('calamity.part_test', NULL); /* not ok */ +ERROR: 'expression' should not be NULL +SELECT validate_expression('calamity.part_test', 'valval'); /* not ok */ +ERROR: failed to analyze partitioning expression "valval" +SELECT validate_expression('calamity.part_test', 'random()'); /* not ok */ +ERROR: failed to analyze partitioning expression "random()" +SELECT validate_expression('calamity.part_test', 'val'); /* OK */ + validate_expression +--------------------- + +(1 row) + +SELECT validate_expression('calamity.part_test', 'VaL'); /* OK */ + validate_expression +--------------------- + +(1 row) + +/* check function get_number_of_partitions() */ +SELECT get_number_of_partitions('calamity.part_test'); + get_number_of_partitions +-------------------------- + 0 +(1 row) + +SELECT get_number_of_partitions(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function get_parent_of_partition() */ +SELECT get_parent_of_partition('calamity.part_test'); +ERROR: "part_test" is not a partition +SELECT get_parent_of_partition(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function get_base_type() */ +CREATE DOMAIN calamity.test_domain AS INT4; +SELECT get_base_type('int4'::regtype); + get_base_type +--------------- + integer +(1 row) + +SELECT get_base_type('calamity.test_domain'::regtype); + get_base_type +--------------- + integer +(1 row) + +SELECT get_base_type(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function get_partition_key_type() */ +SELECT get_partition_key_type('calamity.part_test'); +ERROR: relation "part_test" has no partitions +SELECT get_partition_key_type(0::regclass); +ERROR: relation "0" has no partitions +SELECT get_partition_key_type(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function build_check_constraint_name() */ +SELECT build_check_constraint_name('calamity.part_test'); /* OK */ + build_check_constraint_name +----------------------------- + pathman_part_test_check +(1 row) + +SELECT build_check_constraint_name(0::REGCLASS); /* not ok */ +ERROR: relation "0" does not exist +SELECT build_check_constraint_name(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function build_sequence_name() */ +SELECT build_sequence_name('calamity.part_test'); /* OK */ + build_sequence_name +------------------------ + calamity.part_test_seq +(1 row) + +SELECT build_sequence_name(1::REGCLASS); /* not ok */ +ERROR: relation "1" does not exist +SELECT build_sequence_name(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function partition_table_concurrently() */ +SELECT partition_table_concurrently(1::REGCLASS); /* not ok */ +ERROR: relation "1" has no partitions +SELECT partition_table_concurrently('pg_class', 0); /* not ok */ +ERROR: 'batch_size' should not be less than 1 or greater than 10000 +SELECT partition_table_concurrently('pg_class', 1, 1E-5); /* not ok */ +ERROR: 'sleep_time' should not be less than 0.5 +SELECT partition_table_concurrently('pg_class'); /* not ok */ +ERROR: relation "pg_class" has no partitions +/* check function stop_concurrent_part_task() */ +SELECT stop_concurrent_part_task(1::REGCLASS); /* not ok */ +ERROR: cannot find worker for relation "1" +/* check function drop_range_partition_expand_next() */ +SELECT drop_range_partition_expand_next('pg_class'); /* not ok */ +ERROR: relation "pg_class" is not a partition +SELECT drop_range_partition_expand_next(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function generate_range_bounds() */ +SELECT generate_range_bounds(NULL, 100, 10) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT generate_range_bounds(0, NULL::INT4, 10) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT generate_range_bounds(0, 100, NULL) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT generate_range_bounds(0, 100, 0); /* not ok */ +ERROR: 'p_count' must be greater than zero +SELECT generate_range_bounds('a'::TEXT, 'test'::TEXT, 10); /* not ok */ +ERROR: cannot find operator +(text, text) +SELECT generate_range_bounds('a'::TEXT, '1 mon'::INTERVAL, 10); /* not ok */ +ERROR: cannot find operator +(text, interval) +SELECT generate_range_bounds(0::NUMERIC, 1::NUMERIC, 10); /* OK */ + generate_range_bounds +-------------------------- + {0,1,2,3,4,5,6,7,8,9,10} +(1 row) + +SELECT generate_range_bounds('1-jan-2017'::DATE, + '1 day'::INTERVAL, + 4); /* OK */ + generate_range_bounds +---------------------------------------------------------- + {01-01-2017,01-02-2017,01-03-2017,01-04-2017,01-05-2017} +(1 row) + +SELECT check_range_available(NULL, NULL::INT4, NULL); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT check_range_available('pg_class', 1, 10); /* OK (not partitioned) */ +WARNING: table "pg_class" is not partitioned + check_range_available +----------------------- + +(1 row) + +/* check invoke_on_partition_created_callback() */ +CREATE FUNCTION calamity.dummy_cb(arg jsonb) RETURNS void AS $$ + begin + raise warning 'arg: %', arg::text; + end +$$ LANGUAGE plpgsql; +/* Invalid args */ +SELECT invoke_on_partition_created_callback(NULL, 'calamity.part_test', 1); +ERROR: 'parent_relid' should not be NULL +SELECT invoke_on_partition_created_callback('calamity.part_test', NULL, 1); +ERROR: 'partition_relid' should not be NULL +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 0); + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 1); +ERROR: callback function 1 does not exist +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', NULL); + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +/* HASH */ +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure); +WARNING: arg: {"parent": null, "parttype": "1", "partition": null, "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +/* RANGE */ +SELECT invoke_on_partition_created_callback('calamity.part_test'::regclass, 'pg_class'::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL::int, NULL); +WARNING: arg: {"parent": "part_test", "parttype": "2", "partition": "pg_class", "range_max": null, "range_min": null, "parent_schema": "calamity", "partition_schema": "pg_catalog"} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL::int, NULL); +WARNING: arg: {"parent": null, "parttype": "2", "partition": null, "range_max": null, "range_min": null, "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, 1, NULL); +WARNING: arg: {"parent": null, "parttype": "2", "partition": null, "range_max": null, "range_min": "1", "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL, 1); +WARNING: arg: {"parent": null, "parttype": "2", "partition": null, "range_max": "1", "range_min": null, "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +DROP FUNCTION calamity.dummy_cb(arg jsonb); +/* check function add_to_pathman_config() -- PHASE #1 */ +SELECT add_to_pathman_config(NULL, 'val'); /* no table */ +ERROR: 'parent_relid' should not be NULL +SELECT add_to_pathman_config(0::REGCLASS, 'val'); /* no table (oid) */ +ERROR: relation "0" does not exist +SELECT add_to_pathman_config('calamity.part_test', NULL); /* no expr */ +ERROR: 'expression' should not be NULL +SELECT add_to_pathman_config('calamity.part_test', 'V_A_L'); /* wrong expr */ +ERROR: failed to analyze partitioning expression "V_A_L" +SELECT add_to_pathman_config('calamity.part_test', 'val'); /* OK */ + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT disable_pathman_for('calamity.part_test'); + disable_pathman_for +--------------------- + +(1 row) + +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); /* OK */ + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT disable_pathman_for('calamity.part_test'); + disable_pathman_for +--------------------- + +(1 row) + +/* check function add_to_pathman_config() -- PHASE #2 */ +CREATE TABLE calamity.part_ok(val serial); +INSERT INTO calamity.part_ok SELECT generate_series(1, 2); +SELECT create_hash_partitions('calamity.part_ok', 'val', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +CREATE TABLE calamity.wrong_partition (LIKE calamity.part_test) INHERITS (calamity.part_test); /* wrong partition w\o constraints */ +NOTICE: merging column "val" with inherited definition +SELECT add_to_pathman_config('calamity.part_test', 'val'); +ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +----------------------------- + Append + -> Seq Scan on part_ok_0 + -> Seq Scan on part_ok_1 + -> Seq Scan on part_ok_2 + -> Seq Scan on part_ok_3 +(5 rows) + +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +----------------------------- + Append + -> Seq Scan on part_ok_0 + -> Seq Scan on part_ok_1 + -> Seq Scan on part_ok_2 + -> Seq Scan on part_ok_3 +(5 rows) + +ALTER TABLE calamity.wrong_partition +ADD CONSTRAINT pathman_wrong_partition_check +CHECK (val = 1 OR val = 2); /* wrong constraint */ +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +ERROR: wrong constraint format for RANGE partition "wrong_partition" +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +----------------------------- + Append + -> Seq Scan on part_ok_0 + -> Seq Scan on part_ok_1 + -> Seq Scan on part_ok_2 + -> Seq Scan on part_ok_3 +(5 rows) + +ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_check; +ALTER TABLE calamity.wrong_partition +ADD CONSTRAINT pathman_wrong_partition_check +CHECK (val >= 10 AND val = 2); /* wrong constraint */ +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +ERROR: wrong constraint format for RANGE partition "wrong_partition" +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +----------------------------- + Append + -> Seq Scan on part_ok_0 + -> Seq Scan on part_ok_1 + -> Seq Scan on part_ok_2 + -> Seq Scan on part_ok_3 +(5 rows) + +ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_check; +/* check GUC variable */ +SHOW pg_pathman.enable; + pg_pathman.enable +------------------- + on +(1 row) + +/* check function create_hash_partitions_internal() (called for the 2nd time) */ +CREATE TABLE calamity.hash_two_times(val serial); +SELECT create_hash_partitions_internal('calamity.hash_two_times', 'val', 2); +ERROR: table "hash_two_times" is not partitioned +SELECT create_hash_partitions('calamity.hash_two_times', 'val', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +SELECT create_hash_partitions_internal('calamity.hash_two_times', 'val', 2); +ERROR: cannot add new HASH partitions +/* check function disable_pathman_for() */ +CREATE TABLE calamity.to_be_disabled(val INT NOT NULL); +SELECT create_hash_partitions('calamity.to_be_disabled', 'val', 3); /* add row to main config */ + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT set_enable_parent('calamity.to_be_disabled', true); /* add row to params */ + set_enable_parent +------------------- + +(1 row) + +SELECT disable_pathman_for('calamity.to_be_disabled'); /* should delete both rows */ + disable_pathman_for +--------------------- + +(1 row) + +SELECT count(*) FROM pathman_config WHERE partrel = 'calamity.to_be_disabled'::REGCLASS; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pathman_config_params WHERE partrel = 'calamity.to_be_disabled'::REGCLASS; + count +------- + 0 +(1 row) + +/* check function get_part_range_by_idx() */ +CREATE TABLE calamity.test_range_idx(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.test_range_idx', 'val', 1, 10, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +SELECT get_part_range(NULL, 1, NULL::INT4); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT get_part_range('calamity.test_range_idx', NULL, NULL::INT4); /* not ok */ +ERROR: 'partition_idx' should not be NULL +SELECT get_part_range('calamity.test_range_idx', 0, NULL::INT2); /* not ok */ +ERROR: pg_typeof(dummy) should be integer +SELECT get_part_range('calamity.test_range_idx', -2, NULL::INT4); /* not ok */ +ERROR: negative indices other than -1 (last partition) are not allowed +SELECT get_part_range('calamity.test_range_idx', 4, NULL::INT4); /* not ok */ +ERROR: partition #4 does not exist (total amount is 1) +SELECT get_part_range('calamity.test_range_idx', 0, NULL::INT4); /* OK */ + get_part_range +---------------- + {1,11} +(1 row) + +DROP TABLE calamity.test_range_idx CASCADE; +NOTICE: drop cascades to 2 other objects +/* check function get_part_range_by_oid() */ +CREATE TABLE calamity.test_range_oid(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.test_range_oid', 'val', 1, 10, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +SELECT get_part_range(NULL, NULL::INT4); /* not ok */ +ERROR: 'partition_relid' should not be NULL +SELECT get_part_range('pg_class', NULL::INT4); /* not ok */ +ERROR: relation "pg_class" is not a partition +SELECT get_part_range('calamity.test_range_oid_1', NULL::INT2); /* not ok */ +ERROR: pg_typeof(dummy) should be integer +SELECT get_part_range('calamity.test_range_oid_1', NULL::INT4); /* OK */ + get_part_range +---------------- + {1,11} +(1 row) + +DROP TABLE calamity.test_range_oid CASCADE; +NOTICE: drop cascades to 2 other objects +/* check function merge_range_partitions() */ +SELECT merge_range_partitions('pg_class'); /* not ok */ +ERROR: cannot merge partitions +SELECT merge_range_partitions('pg_class', 'pg_inherits'); /* not ok */ +ERROR: cannot merge partitions +CREATE TABLE calamity.merge_test_a(val INT4 NOT NULL); +CREATE TABLE calamity.merge_test_b(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.merge_test_a', 'val', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_range_partitions('calamity.merge_test_b', 'val', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT merge_range_partitions('calamity.merge_test_a_1', + 'calamity.merge_test_b_1'); /* not ok */ +ERROR: cannot merge partitions +DROP TABLE calamity.merge_test_a,calamity.merge_test_b CASCADE; +NOTICE: drop cascades to 6 other objects +DROP SCHEMA calamity CASCADE; +NOTICE: drop cascades to 15 other objects +DROP EXTENSION pg_pathman; +/* + * ------------------------------- + * Special tests (SET statement) + * ------------------------------- + */ +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = false; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SET pg_pathman.enable = true; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SET pg_pathman.enable = false; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +RESET pg_pathman.enable; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +RESET ALL; +BEGIN; ROLLBACK; +BEGIN ISOLATION LEVEL SERIALIZABLE; ROLLBACK; +BEGIN; SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; ROLLBACK; +DROP EXTENSION pg_pathman; +/* + * ------------------------------------- + * Special tests (pathman_cache_stats) + * ------------------------------------- + */ +CREATE SCHEMA calamity; +CREATE EXTENSION pg_pathman; +/* check that cache loading is lazy */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 + partition status cache | 2 +(4 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 11 other objects +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 + partition status cache | 2 +(4 rows) + +/* Change this setting for code coverage */ +SET pg_pathman.enable_bounds_cache = false; +/* check view pathman_cache_stats (bounds cache disabled) */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on test_pathman_cache_stats_1 + -> Seq Scan on test_pathman_cache_stats_2 + -> Seq Scan on test_pathman_cache_stats_3 + -> Seq Scan on test_pathman_cache_stats_4 + -> Seq Scan on test_pathman_cache_stats_5 + -> Seq Scan on test_pathman_cache_stats_6 + -> Seq Scan on test_pathman_cache_stats_7 + -> Seq Scan on test_pathman_cache_stats_8 + -> Seq Scan on test_pathman_cache_stats_9 + -> Seq Scan on test_pathman_cache_stats_10 +(11 rows) + +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 10 + partition status cache | 3 +(4 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 11 other objects +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 + partition status cache | 2 +(4 rows) + +/* Restore this GUC */ +SET pg_pathman.enable_bounds_cache = true; +/* check view pathman_cache_stats (bounds cache enabled) */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on test_pathman_cache_stats_1 + -> Seq Scan on test_pathman_cache_stats_2 + -> Seq Scan on test_pathman_cache_stats_3 + -> Seq Scan on test_pathman_cache_stats_4 + -> Seq Scan on test_pathman_cache_stats_5 + -> Seq Scan on test_pathman_cache_stats_6 + -> Seq Scan on test_pathman_cache_stats_7 + -> Seq Scan on test_pathman_cache_stats_8 + -> Seq Scan on test_pathman_cache_stats_9 + -> Seq Scan on test_pathman_cache_stats_10 +(11 rows) + +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 + partition status cache | 3 +(4 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 11 other objects +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 + partition status cache | 2 +(4 rows) + +/* check that parents cache has been flushed after partition was dropped */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on test_pathman_cache_stats_1 + -> Seq Scan on test_pathman_cache_stats_2 + -> Seq Scan on test_pathman_cache_stats_3 + -> Seq Scan on test_pathman_cache_stats_4 + -> Seq Scan on test_pathman_cache_stats_5 + -> Seq Scan on test_pathman_cache_stats_6 + -> Seq Scan on test_pathman_cache_stats_7 + -> Seq Scan on test_pathman_cache_stats_8 + -> Seq Scan on test_pathman_cache_stats_9 + -> Seq Scan on test_pathman_cache_stats_10 +(11 rows) + +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 + partition status cache | 3 +(4 rows) + +SELECT drop_range_partition('calamity.test_pathman_cache_stats_1'); + drop_range_partition +------------------------------------- + calamity.test_pathman_cache_stats_1 +(1 row) + +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 9 + partition parents cache | 9 + partition status cache | 2 +(4 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 10 other objects +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 + partition status cache | 2 +(4 rows) + +DROP SCHEMA calamity CASCADE; +DROP EXTENSION pg_pathman; +/* + * ------------------------------------------ + * Special tests (uninitialized pg_pathman) + * ------------------------------------------ + */ +CREATE SCHEMA calamity; +CREATE EXTENSION pg_pathman; +/* check function pathman_cache_search_relid() */ +CREATE TABLE calamity.survivor(val INT NOT NULL); +SELECT create_range_partitions('calamity.survivor', 'val', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +DROP EXTENSION pg_pathman CASCADE; +SET pg_pathman.enable = f; /* DON'T LOAD CONFIG */ +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +CREATE EXTENSION pg_pathman; +SHOW pg_pathman.enable; + pg_pathman.enable +------------------- + off +(1 row) + +SELECT add_to_pathman_config('calamity.survivor', 'val', '10'); /* not ok */ +ERROR: pg_pathman is disabled +SELECT * FROM pathman_partition_list; /* not ok */ +ERROR: pg_pathman is not initialized yet +SELECT get_part_range('calamity.survivor', 0, NULL::INT); /* not ok */ +ERROR: pg_pathman is disabled +EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ + QUERY PLAN +----------------------------------------- + Append + -> Seq Scan on survivor survivor_1 + -> Seq Scan on survivor_1 survivor_2 + -> Seq Scan on survivor_2 survivor_3 +(4 rows) + +SET pg_pathman.enable = t; /* LOAD CONFIG */ +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT add_to_pathman_config('calamity.survivor', 'val', '10'); /* OK */ + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT * FROM pathman_partition_list; /* OK */ + parent | partition | parttype | expr | range_min | range_max +-------------------+---------------------+----------+------+-----------+----------- + calamity.survivor | calamity.survivor_1 | 2 | val | 1 | 11 + calamity.survivor | calamity.survivor_2 | 2 | val | 11 | 21 +(2 rows) + +SELECT get_part_range('calamity.survivor', 0, NULL::INT); /* OK */ + get_part_range +---------------- + {1,11} +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ + QUERY PLAN +------------------------------ + Append + -> Seq Scan on survivor_1 + -> Seq Scan on survivor_2 +(3 rows) + +DROP TABLE calamity.survivor CASCADE; +NOTICE: drop cascades to 3 other objects +DROP SCHEMA calamity CASCADE; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_column_type.out b/expected/pathman_column_type.out index 3ae9355c..4e2f3ff6 100644 --- a/expected/pathman_column_type.out +++ b/expected/pathman_column_type.out @@ -1,3 +1,7 @@ +/* + * In 9ce77d75c5a (>= 13) struct Var was changed, which caused the output + * of get_partition_cooked_key to change. + */ \set VERBOSITY terse SET search_path = 'public'; CREATE EXTENSION pg_pathman; diff --git a/expected/pathman_column_type_1.out b/expected/pathman_column_type_1.out new file mode 100644 index 00000000..d169719d --- /dev/null +++ b/expected/pathman_column_type_1.out @@ -0,0 +1,203 @@ +/* + * In 9ce77d75c5a (>= 13) struct Var was changed, which caused the output + * of get_partition_cooked_key to change. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_column_type; +/* + * RANGE partitioning. + */ +/* create new table (val int) */ +CREATE TABLE test_column_type.test(val INT4 NOT NULL); +SELECT create_range_partitions('test_column_type.test', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* make sure that bounds and dispatch info has been cached */ +SELECT * FROM test_column_type.test; + val +----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 + partition status cache | 3 +(4 rows) + +/* + * Get parsed and analyzed expression. + */ +CREATE FUNCTION get_cached_partition_cooked_key(REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'get_cached_partition_cooked_key_pl' +LANGUAGE C STRICT; +SELECT get_partition_cooked_key('test_column_type.test'::REGCLASS); + get_partition_cooked_key +------------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location 8} +(1 row) + +SELECT get_cached_partition_cooked_key('test_column_type.test'::REGCLASS); + get_cached_partition_cooked_key +------------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location 8} +(1 row) + +SELECT get_partition_key_type('test_column_type.test'::REGCLASS); + get_partition_key_type +------------------------ + integer +(1 row) + +/* change column's type (should also flush caches) */ +ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; +/* check that correct expression has been built */ +SELECT get_partition_key_type('test_column_type.test'::REGCLASS); + get_partition_key_type +------------------------ + numeric +(1 row) + +SELECT get_partition_cooked_key('test_column_type.test'::REGCLASS); + get_partition_cooked_key +--------------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 1700 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location 8} +(1 row) + +SELECT get_cached_partition_cooked_key('test_column_type.test'::REGCLASS); + get_cached_partition_cooked_key +--------------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 1700 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location 8} +(1 row) + +DROP FUNCTION get_cached_partition_cooked_key(REGCLASS); +/* make sure that everything works properly */ +SELECT * FROM test_column_type.test; + val +----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 + partition status cache | 3 +(4 rows) + +/* check insert dispatching */ +INSERT INTO test_column_type.test VALUES (1); +SELECT tableoid::regclass, * FROM test_column_type.test; + tableoid | val +-------------------------+----- + test_column_type.test_1 | 1 +(1 row) + +SELECT drop_partitions('test_column_type.test'); +NOTICE: 1 rows copied from test_column_type.test_1 +NOTICE: 0 rows copied from test_column_type.test_2 +NOTICE: 0 rows copied from test_column_type.test_3 +NOTICE: 0 rows copied from test_column_type.test_4 +NOTICE: 0 rows copied from test_column_type.test_5 +NOTICE: 0 rows copied from test_column_type.test_6 +NOTICE: 0 rows copied from test_column_type.test_7 +NOTICE: 0 rows copied from test_column_type.test_8 +NOTICE: 0 rows copied from test_column_type.test_9 +NOTICE: 0 rows copied from test_column_type.test_10 + drop_partitions +----------------- + 10 +(1 row) + +DROP TABLE test_column_type.test CASCADE; +/* + * HASH partitioning. + */ +/* create new table (id int, val int) */ +CREATE TABLE test_column_type.test(id INT4 NOT NULL, val INT4); +SELECT create_hash_partitions('test_column_type.test', 'id', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +/* make sure that bounds and dispatch info has been cached */ +SELECT * FROM test_column_type.test; + id | val +----+----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 5 + partition parents cache | 5 + partition status cache | 3 +(4 rows) + +/* change column's type (should NOT work) */ +ALTER TABLE test_column_type.test ALTER id TYPE NUMERIC; +ERROR: cannot change type of column "id" of table "test" partitioned by HASH +/* make sure that everything works properly */ +SELECT * FROM test_column_type.test; + id | val +----+----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 5 + partition parents cache | 5 + partition status cache | 3 +(4 rows) + +/* change column's type (should flush caches) */ +ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; +/* make sure that everything works properly */ +SELECT * FROM test_column_type.test; + id | val +----+----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 5 + partition parents cache | 5 + partition status cache | 3 +(4 rows) + +/* check insert dispatching */ +INSERT INTO test_column_type.test VALUES (1); +SELECT tableoid::regclass, * FROM test_column_type.test; + tableoid | id | val +-------------------------+----+----- + test_column_type.test_0 | 1 | +(1 row) + +SELECT drop_partitions('test_column_type.test'); +NOTICE: 1 rows copied from test_column_type.test_0 +NOTICE: 0 rows copied from test_column_type.test_1 +NOTICE: 0 rows copied from test_column_type.test_2 +NOTICE: 0 rows copied from test_column_type.test_3 +NOTICE: 0 rows copied from test_column_type.test_4 + drop_partitions +----------------- + 5 +(1 row) + +DROP TABLE test_column_type.test CASCADE; +DROP SCHEMA test_column_type CASCADE; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_hashjoin.out b/expected/pathman_hashjoin.out index 1e5b2783..779efe3d 100644 --- a/expected/pathman_hashjoin.out +++ b/expected/pathman_hashjoin.out @@ -2,6 +2,9 @@ * pathman_hashjoin_1.out and pathman_hashjoin_2.out seem to deal with pgpro's * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan * are eliminated, hence pathman_hashjoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_hashjoin_1.out b/expected/pathman_hashjoin_1.out index af569764..ae1edda6 100644 --- a/expected/pathman_hashjoin_1.out +++ b/expected/pathman_hashjoin_1.out @@ -2,6 +2,9 @@ * pathman_hashjoin_1.out and pathman_hashjoin_2.out seem to deal with pgpro's * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan * are eliminated, hence pathman_hashjoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_hashjoin_2.out b/expected/pathman_hashjoin_2.out index c77146d1..21cd1883 100644 --- a/expected/pathman_hashjoin_2.out +++ b/expected/pathman_hashjoin_2.out @@ -2,6 +2,9 @@ * pathman_hashjoin_1.out and pathman_hashjoin_2.out seem to deal with pgpro's * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan * are eliminated, hence pathman_hashjoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_hashjoin_3.out b/expected/pathman_hashjoin_3.out index 93613919..106e8c0e 100644 --- a/expected/pathman_hashjoin_3.out +++ b/expected/pathman_hashjoin_3.out @@ -2,6 +2,9 @@ * pathman_hashjoin_1.out and pathman_hashjoin_2.out seem to deal with pgpro's * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan * are eliminated, hence pathman_hashjoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_hashjoin_4.out b/expected/pathman_hashjoin_4.out new file mode 100644 index 00000000..ad4b5651 --- /dev/null +++ b/expected/pathman_hashjoin_4.out @@ -0,0 +1,81 @@ +/* + * pathman_hashjoin_1.out and pathman_hashjoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_hashjoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) + SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SET pg_pathman.enable_runtimeappend = OFF; +SET pg_pathman.enable_runtimemergeappend = OFF; +VACUUM; +/* + * Hash join + */ +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +SET enable_nestloop = OFF; +SET enable_hashjoin = ON; +SET enable_mergejoin = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------------- + Sort + Sort Key: j2_1.dt + -> Hash Join + Hash Cond: (j1_1.id = j2_1.id) + -> Hash Join + Hash Cond: (j3_1.id = j1_1.id) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3_1 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_2 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_3 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_4 + -> Hash + -> Append + -> Index Scan using range_rel_1_pkey on range_rel_1 j1_1 + -> Index Scan using range_rel_2_pkey on range_rel_2 j1_2 + -> Hash + -> Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 j2_1 + -> Index Scan using range_rel_3_dt_idx on range_rel_3 j2_2 + -> Index Scan using range_rel_4_dt_idx on range_rel_4 j2_3 +(20 rows) + +DROP SCHEMA test CASCADE; +NOTICE: drop cascades to 12 other objects +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman CASCADE; diff --git a/expected/pathman_hashjoin_5.out b/expected/pathman_hashjoin_5.out new file mode 100644 index 00000000..7bbea061 --- /dev/null +++ b/expected/pathman_hashjoin_5.out @@ -0,0 +1,73 @@ +/* + * pathman_hashjoin_1.out and pathman_hashjoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_hashjoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) + SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SET pg_pathman.enable_runtimeappend = OFF; +SET pg_pathman.enable_runtimemergeappend = OFF; +VACUUM; +/* + * Hash join + */ +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +SET enable_nestloop = OFF; +SET enable_hashjoin = ON; +SET enable_mergejoin = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Hash Join + Hash Cond: (j3_1.id = j2.id) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3_1 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_2 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_3 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_4 + -> Hash + -> Index Scan using range_rel_2_dt_idx on range_rel_2 j2 + Filter: (id IS NOT NULL) +(12 rows) + +DROP SCHEMA test CASCADE; +NOTICE: drop cascades to 12 other objects +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman CASCADE; diff --git a/expected/pathman_inserts.out b/expected/pathman_inserts.out index cf05bd5a..225604c5 100644 --- a/expected/pathman_inserts.out +++ b/expected/pathman_inserts.out @@ -1,3 +1,7 @@ +/* + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ \set VERBOSITY terse SET search_path = 'public'; CREATE EXTENSION pg_pathman; diff --git a/expected/pathman_inserts_1.out b/expected/pathman_inserts_1.out index fd54aeef..a6634edd 100644 --- a/expected/pathman_inserts_1.out +++ b/expected/pathman_inserts_1.out @@ -1,3 +1,7 @@ +/* + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ \set VERBOSITY terse SET search_path = 'public'; CREATE EXTENSION pg_pathman; diff --git a/expected/pathman_inserts_2.out b/expected/pathman_inserts_2.out new file mode 100644 index 00000000..9a439010 --- /dev/null +++ b/expected/pathman_inserts_2.out @@ -0,0 +1,1071 @@ +/* + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_inserts; +/* create a partitioned table */ +CREATE TABLE test_inserts.storage(a INT4, b INT4 NOT NULL, c NUMERIC, d TEXT); +INSERT INTO test_inserts.storage SELECT i * 2, i, i, i::text FROM generate_series(1, 100) i; +CREATE UNIQUE INDEX ON test_inserts.storage(a); +SELECT create_range_partitions('test_inserts.storage', 'b', 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* attach before and after insertion triggers to partitioned table */ +CREATE OR REPLACE FUNCTION test_inserts.print_cols_before_change() RETURNS TRIGGER AS $$ +BEGIN + RAISE NOTICE 'BEFORE INSERTION TRIGGER ON TABLE % HAS EXPIRED. INSERTED ROW: %', tg_table_name, new; + RETURN new; +END; +$$ LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION test_inserts.print_cols_after_change() RETURNS TRIGGER AS $$ +BEGIN + RAISE NOTICE 'AFTER INSERTION TRIGGER ON TABLE % HAS EXPIRED. INSERTED ROW: %', tg_table_name, new; + RETURN new; +END; +$$ LANGUAGE plpgsql; +/* set triggers on existing first partition and new generated partitions */ +CREATE TRIGGER print_new_row_before_insert BEFORE INSERT ON test_inserts.storage_1 + FOR EACH ROW EXECUTE PROCEDURE test_inserts.print_cols_before_change(); +CREATE TRIGGER print_new_row_after_insert AFTER INSERT ON test_inserts.storage_1 + FOR EACH ROW EXECUTE PROCEDURE test_inserts.print_cols_after_change(); +/* set partition init callback that will add triggers to partitions */ +CREATE OR REPLACE FUNCTION test_inserts.set_triggers(args jsonb) RETURNS VOID AS $$ +BEGIN + EXECUTE format('create trigger print_new_row_before_insert before insert on %s.%s + for each row execute procedure test_inserts.print_cols_before_change();', + args->>'partition_schema', args->>'partition'); + EXECUTE format('create trigger print_new_row_after_insert after insert on %s.%s + for each row execute procedure test_inserts.print_cols_after_change();', + args->>'partition_schema', args->>'partition'); +END; +$$ LANGUAGE plpgsql; +SELECT set_init_callback('test_inserts.storage', 'test_inserts.set_triggers(jsonb)'); + set_init_callback +------------------- + +(1 row) + +/* we don't support ON CONLICT */ +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'UNSUPPORTED_1') +ON CONFLICT (a) DO UPDATE SET a = 3; +ERROR: ON CONFLICT clause is not supported with partitioned tables +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'UNSUPPORTED_2') +ON CONFLICT (a) DO NOTHING; +ERROR: ON CONFLICT clause is not supported with partitioned tables +/* implicitly prepend a partition (no columns have been dropped yet) */ +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'PREPEND.') RETURNING *; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0,PREPEND.) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0,PREPEND.) + a | b | c | d +---+---+---+---------- + 0 | 0 | 0 | PREPEND. +(1 row) + +SELECT * FROM test_inserts.storage_11; + a | b | c | d +---+---+---+---------- + 0 | 0 | 0 | PREPEND. +(1 row) + +INSERT INTO test_inserts.storage VALUES(1, 0, 0, 'PREPEND..') RETURNING tableoid::regclass; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (1,0,0,PREPEND..) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (1,0,0,PREPEND..) + tableoid +------------------------- + test_inserts.storage_11 +(1 row) + +SELECT * FROM test_inserts.storage_11; + a | b | c | d +---+---+---+----------- + 0 | 0 | 0 | PREPEND. + 1 | 0 | 0 | PREPEND.. +(2 rows) + +INSERT INTO test_inserts.storage VALUES(3, 0, 0, 'PREPEND...') RETURNING a + b / 3; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (3,0,0,PREPEND...) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (3,0,0,PREPEND...) + ?column? +---------- + 3 +(1 row) + +SELECT * FROM test_inserts.storage_11; + a | b | c | d +---+---+---+------------ + 0 | 0 | 0 | PREPEND. + 1 | 0 | 0 | PREPEND.. + 3 | 0 | 0 | PREPEND... +(3 rows) + +/* cause an unique index conflict (a = 0) */ +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'CONFLICT') RETURNING *; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0,CONFLICT) +ERROR: duplicate key value violates unique constraint "storage_11_a_idx" +/* drop first column */ +ALTER TABLE test_inserts.storage DROP COLUMN a CASCADE; +/* will have 3 columns (b, c, d) */ +SELECT append_range_partition('test_inserts.storage'); + append_range_partition +------------------------- + test_inserts.storage_12 +(1 row) + +INSERT INTO test_inserts.storage (b, c, d) VALUES (101, 17, '3 cols!'); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,17,"3 cols!") +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,17,"3 cols!") +SELECT * FROM test_inserts.storage_12; /* direct access */ + b | c | d +-----+----+--------- + 101 | 17 | 3 cols! +(1 row) + +SELECT * FROM test_inserts.storage WHERE b > 100; /* via parent */ + b | c | d +-----+----+--------- + 101 | 17 | 3 cols! +(1 row) + +/* spawn a new partition (b, c, d) */ +INSERT INTO test_inserts.storage (b, c, d) VALUES (111, 17, '3 cols as well!'); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,17,"3 cols as well!") +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,17,"3 cols as well!") +SELECT * FROM test_inserts.storage_13; /* direct access */ + b | c | d +-----+----+----------------- + 111 | 17 | 3 cols as well! +(1 row) + +SELECT * FROM test_inserts.storage WHERE b > 110; /* via parent */ + b | c | d +-----+----+----------------- + 111 | 17 | 3 cols as well! +(1 row) + +/* column 'a' has been dropped */ +INSERT INTO test_inserts.storage VALUES(111, 0, 'DROP_COL_1.') RETURNING *, 17; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1.) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1.) + b | c | d | ?column? +-----+---+-------------+---------- + 111 | 0 | DROP_COL_1. | 17 +(1 row) + +INSERT INTO test_inserts.storage VALUES(111, 0, 'DROP_COL_1..') RETURNING tableoid::regclass; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1..) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1..) + tableoid +------------------------- + test_inserts.storage_13 +(1 row) + +INSERT INTO test_inserts.storage VALUES(111, 0, 'DROP_COL_1...') RETURNING b * 2, b; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1...) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1...) + ?column? | b +----------+----- + 222 | 111 +(1 row) + +/* drop third column */ +ALTER TABLE test_inserts.storage DROP COLUMN c CASCADE; +/* will have 2 columns (b, d) */ +SELECT append_range_partition('test_inserts.storage'); + append_range_partition +------------------------- + test_inserts.storage_14 +(1 row) + +INSERT INTO test_inserts.storage (b, d) VALUES (121, '2 cols!'); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,"2 cols!") +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,"2 cols!") +SELECT * FROM test_inserts.storage_14; /* direct access */ + b | d +-----+--------- + 121 | 2 cols! +(1 row) + +SELECT * FROM test_inserts.storage WHERE b > 120; /* via parent */ + b | d +-----+--------- + 121 | 2 cols! +(1 row) + +/* column 'c' has been dropped */ +INSERT INTO test_inserts.storage VALUES(121, 'DROP_COL_2.') RETURNING *; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2.) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2.) + b | d +-----+------------- + 121 | DROP_COL_2. +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'DROP_COL_2..') RETURNING tableoid::regclass; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2..) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2..) + tableoid +------------------------- + test_inserts.storage_14 +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'DROP_COL_2...') RETURNING d || '0_0', b * 3; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2...) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2...) + ?column? | ?column? +------------------+---------- + DROP_COL_2...0_0 | 363 +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'query_1') +RETURNING (SELECT 1); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_1) + ?column? +---------- + 1 +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'query_2') +RETURNING (SELECT generate_series(1, 10) LIMIT 1); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_2) + generate_series +----------------- + 1 +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'query_3') +RETURNING (SELECT get_partition_key('test_inserts.storage')); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_3) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_3) + get_partition_key +------------------- + b +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'query_4') +RETURNING 1, 2, 3, 4; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_4) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_4) + ?column? | ?column? | ?column? | ?column? +----------+----------+----------+---------- + 1 | 2 | 3 | 4 +(1 row) + +/* show number of columns in each partition */ +SELECT partition, range_min, range_max, count(partition) +FROM pathman_partition_list JOIN pg_attribute ON partition = attrelid +WHERE attnum > 0 +GROUP BY partition, range_min, range_max +ORDER BY range_min::INT4; + partition | range_min | range_max | count +-------------------------+-----------+-----------+------- + test_inserts.storage_11 | -9 | 1 | 4 + test_inserts.storage_1 | 1 | 11 | 4 + test_inserts.storage_2 | 11 | 21 | 4 + test_inserts.storage_3 | 21 | 31 | 4 + test_inserts.storage_4 | 31 | 41 | 4 + test_inserts.storage_5 | 41 | 51 | 4 + test_inserts.storage_6 | 51 | 61 | 4 + test_inserts.storage_7 | 61 | 71 | 4 + test_inserts.storage_8 | 71 | 81 | 4 + test_inserts.storage_9 | 81 | 91 | 4 + test_inserts.storage_10 | 91 | 101 | 4 + test_inserts.storage_12 | 101 | 111 | 3 + test_inserts.storage_13 | 111 | 121 | 3 + test_inserts.storage_14 | 121 | 131 | 2 +(14 rows) + +/* check the data */ +SELECT *, tableoid::regclass FROM test_inserts.storage ORDER BY b, d; + b | d | tableoid +-----+-----------------+------------------------- + 0 | PREPEND. | test_inserts.storage_11 + 0 | PREPEND.. | test_inserts.storage_11 + 0 | PREPEND... | test_inserts.storage_11 + 1 | 1 | test_inserts.storage_1 + 2 | 2 | test_inserts.storage_1 + 3 | 3 | test_inserts.storage_1 + 4 | 4 | test_inserts.storage_1 + 5 | 5 | test_inserts.storage_1 + 6 | 6 | test_inserts.storage_1 + 7 | 7 | test_inserts.storage_1 + 8 | 8 | test_inserts.storage_1 + 9 | 9 | test_inserts.storage_1 + 10 | 10 | test_inserts.storage_1 + 11 | 11 | test_inserts.storage_2 + 12 | 12 | test_inserts.storage_2 + 13 | 13 | test_inserts.storage_2 + 14 | 14 | test_inserts.storage_2 + 15 | 15 | test_inserts.storage_2 + 16 | 16 | test_inserts.storage_2 + 17 | 17 | test_inserts.storage_2 + 18 | 18 | test_inserts.storage_2 + 19 | 19 | test_inserts.storage_2 + 20 | 20 | test_inserts.storage_2 + 21 | 21 | test_inserts.storage_3 + 22 | 22 | test_inserts.storage_3 + 23 | 23 | test_inserts.storage_3 + 24 | 24 | test_inserts.storage_3 + 25 | 25 | test_inserts.storage_3 + 26 | 26 | test_inserts.storage_3 + 27 | 27 | test_inserts.storage_3 + 28 | 28 | test_inserts.storage_3 + 29 | 29 | test_inserts.storage_3 + 30 | 30 | test_inserts.storage_3 + 31 | 31 | test_inserts.storage_4 + 32 | 32 | test_inserts.storage_4 + 33 | 33 | test_inserts.storage_4 + 34 | 34 | test_inserts.storage_4 + 35 | 35 | test_inserts.storage_4 + 36 | 36 | test_inserts.storage_4 + 37 | 37 | test_inserts.storage_4 + 38 | 38 | test_inserts.storage_4 + 39 | 39 | test_inserts.storage_4 + 40 | 40 | test_inserts.storage_4 + 41 | 41 | test_inserts.storage_5 + 42 | 42 | test_inserts.storage_5 + 43 | 43 | test_inserts.storage_5 + 44 | 44 | test_inserts.storage_5 + 45 | 45 | test_inserts.storage_5 + 46 | 46 | test_inserts.storage_5 + 47 | 47 | test_inserts.storage_5 + 48 | 48 | test_inserts.storage_5 + 49 | 49 | test_inserts.storage_5 + 50 | 50 | test_inserts.storage_5 + 51 | 51 | test_inserts.storage_6 + 52 | 52 | test_inserts.storage_6 + 53 | 53 | test_inserts.storage_6 + 54 | 54 | test_inserts.storage_6 + 55 | 55 | test_inserts.storage_6 + 56 | 56 | test_inserts.storage_6 + 57 | 57 | test_inserts.storage_6 + 58 | 58 | test_inserts.storage_6 + 59 | 59 | test_inserts.storage_6 + 60 | 60 | test_inserts.storage_6 + 61 | 61 | test_inserts.storage_7 + 62 | 62 | test_inserts.storage_7 + 63 | 63 | test_inserts.storage_7 + 64 | 64 | test_inserts.storage_7 + 65 | 65 | test_inserts.storage_7 + 66 | 66 | test_inserts.storage_7 + 67 | 67 | test_inserts.storage_7 + 68 | 68 | test_inserts.storage_7 + 69 | 69 | test_inserts.storage_7 + 70 | 70 | test_inserts.storage_7 + 71 | 71 | test_inserts.storage_8 + 72 | 72 | test_inserts.storage_8 + 73 | 73 | test_inserts.storage_8 + 74 | 74 | test_inserts.storage_8 + 75 | 75 | test_inserts.storage_8 + 76 | 76 | test_inserts.storage_8 + 77 | 77 | test_inserts.storage_8 + 78 | 78 | test_inserts.storage_8 + 79 | 79 | test_inserts.storage_8 + 80 | 80 | test_inserts.storage_8 + 81 | 81 | test_inserts.storage_9 + 82 | 82 | test_inserts.storage_9 + 83 | 83 | test_inserts.storage_9 + 84 | 84 | test_inserts.storage_9 + 85 | 85 | test_inserts.storage_9 + 86 | 86 | test_inserts.storage_9 + 87 | 87 | test_inserts.storage_9 + 88 | 88 | test_inserts.storage_9 + 89 | 89 | test_inserts.storage_9 + 90 | 90 | test_inserts.storage_9 + 91 | 91 | test_inserts.storage_10 + 92 | 92 | test_inserts.storage_10 + 93 | 93 | test_inserts.storage_10 + 94 | 94 | test_inserts.storage_10 + 95 | 95 | test_inserts.storage_10 + 96 | 96 | test_inserts.storage_10 + 97 | 97 | test_inserts.storage_10 + 98 | 98 | test_inserts.storage_10 + 99 | 99 | test_inserts.storage_10 + 100 | 100 | test_inserts.storage_10 + 101 | 3 cols! | test_inserts.storage_12 + 111 | 3 cols as well! | test_inserts.storage_13 + 111 | DROP_COL_1. | test_inserts.storage_13 + 111 | DROP_COL_1.. | test_inserts.storage_13 + 111 | DROP_COL_1... | test_inserts.storage_13 + 121 | 2 cols! | test_inserts.storage_14 + 121 | DROP_COL_2. | test_inserts.storage_14 + 121 | DROP_COL_2.. | test_inserts.storage_14 + 121 | DROP_COL_2... | test_inserts.storage_14 + 121 | query_1 | test_inserts.storage_14 + 121 | query_2 | test_inserts.storage_14 + 121 | query_3 | test_inserts.storage_14 + 121 | query_4 | test_inserts.storage_14 +(116 rows) + +/* drop data */ +TRUNCATE test_inserts.storage; +/* one more time! */ +INSERT INTO test_inserts.storage (b, d) SELECT i, i FROM generate_series(-2, 120) i; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-1,-1) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (1,1) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (2,2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (4,4) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (5,5) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (6,6) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (7,7) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (9,9) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (10,10) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,101) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (102,102) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (104,104) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (105,105) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (106,106) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (107,107) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (109,109) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (110,110) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,111) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (112,112) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (114,114) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (115,115) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (116,116) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (117,117) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (119,119) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (120,120) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-1,-1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (1,1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (2,2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (4,4) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (5,5) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (6,6) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (7,7) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (9,9) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (10,10) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,101) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (102,102) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (104,104) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (105,105) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (106,106) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (107,107) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (109,109) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (110,110) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,111) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (112,112) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (114,114) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (115,115) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (116,116) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (117,117) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (119,119) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (120,120) +SELECT *, tableoid::regclass FROM test_inserts.storage ORDER BY b, d; + b | d | tableoid +-----+-----+------------------------- + -2 | -2 | test_inserts.storage_11 + -1 | -1 | test_inserts.storage_11 + 0 | 0 | test_inserts.storage_11 + 1 | 1 | test_inserts.storage_1 + 2 | 2 | test_inserts.storage_1 + 3 | 3 | test_inserts.storage_1 + 4 | 4 | test_inserts.storage_1 + 5 | 5 | test_inserts.storage_1 + 6 | 6 | test_inserts.storage_1 + 7 | 7 | test_inserts.storage_1 + 8 | 8 | test_inserts.storage_1 + 9 | 9 | test_inserts.storage_1 + 10 | 10 | test_inserts.storage_1 + 11 | 11 | test_inserts.storage_2 + 12 | 12 | test_inserts.storage_2 + 13 | 13 | test_inserts.storage_2 + 14 | 14 | test_inserts.storage_2 + 15 | 15 | test_inserts.storage_2 + 16 | 16 | test_inserts.storage_2 + 17 | 17 | test_inserts.storage_2 + 18 | 18 | test_inserts.storage_2 + 19 | 19 | test_inserts.storage_2 + 20 | 20 | test_inserts.storage_2 + 21 | 21 | test_inserts.storage_3 + 22 | 22 | test_inserts.storage_3 + 23 | 23 | test_inserts.storage_3 + 24 | 24 | test_inserts.storage_3 + 25 | 25 | test_inserts.storage_3 + 26 | 26 | test_inserts.storage_3 + 27 | 27 | test_inserts.storage_3 + 28 | 28 | test_inserts.storage_3 + 29 | 29 | test_inserts.storage_3 + 30 | 30 | test_inserts.storage_3 + 31 | 31 | test_inserts.storage_4 + 32 | 32 | test_inserts.storage_4 + 33 | 33 | test_inserts.storage_4 + 34 | 34 | test_inserts.storage_4 + 35 | 35 | test_inserts.storage_4 + 36 | 36 | test_inserts.storage_4 + 37 | 37 | test_inserts.storage_4 + 38 | 38 | test_inserts.storage_4 + 39 | 39 | test_inserts.storage_4 + 40 | 40 | test_inserts.storage_4 + 41 | 41 | test_inserts.storage_5 + 42 | 42 | test_inserts.storage_5 + 43 | 43 | test_inserts.storage_5 + 44 | 44 | test_inserts.storage_5 + 45 | 45 | test_inserts.storage_5 + 46 | 46 | test_inserts.storage_5 + 47 | 47 | test_inserts.storage_5 + 48 | 48 | test_inserts.storage_5 + 49 | 49 | test_inserts.storage_5 + 50 | 50 | test_inserts.storage_5 + 51 | 51 | test_inserts.storage_6 + 52 | 52 | test_inserts.storage_6 + 53 | 53 | test_inserts.storage_6 + 54 | 54 | test_inserts.storage_6 + 55 | 55 | test_inserts.storage_6 + 56 | 56 | test_inserts.storage_6 + 57 | 57 | test_inserts.storage_6 + 58 | 58 | test_inserts.storage_6 + 59 | 59 | test_inserts.storage_6 + 60 | 60 | test_inserts.storage_6 + 61 | 61 | test_inserts.storage_7 + 62 | 62 | test_inserts.storage_7 + 63 | 63 | test_inserts.storage_7 + 64 | 64 | test_inserts.storage_7 + 65 | 65 | test_inserts.storage_7 + 66 | 66 | test_inserts.storage_7 + 67 | 67 | test_inserts.storage_7 + 68 | 68 | test_inserts.storage_7 + 69 | 69 | test_inserts.storage_7 + 70 | 70 | test_inserts.storage_7 + 71 | 71 | test_inserts.storage_8 + 72 | 72 | test_inserts.storage_8 + 73 | 73 | test_inserts.storage_8 + 74 | 74 | test_inserts.storage_8 + 75 | 75 | test_inserts.storage_8 + 76 | 76 | test_inserts.storage_8 + 77 | 77 | test_inserts.storage_8 + 78 | 78 | test_inserts.storage_8 + 79 | 79 | test_inserts.storage_8 + 80 | 80 | test_inserts.storage_8 + 81 | 81 | test_inserts.storage_9 + 82 | 82 | test_inserts.storage_9 + 83 | 83 | test_inserts.storage_9 + 84 | 84 | test_inserts.storage_9 + 85 | 85 | test_inserts.storage_9 + 86 | 86 | test_inserts.storage_9 + 87 | 87 | test_inserts.storage_9 + 88 | 88 | test_inserts.storage_9 + 89 | 89 | test_inserts.storage_9 + 90 | 90 | test_inserts.storage_9 + 91 | 91 | test_inserts.storage_10 + 92 | 92 | test_inserts.storage_10 + 93 | 93 | test_inserts.storage_10 + 94 | 94 | test_inserts.storage_10 + 95 | 95 | test_inserts.storage_10 + 96 | 96 | test_inserts.storage_10 + 97 | 97 | test_inserts.storage_10 + 98 | 98 | test_inserts.storage_10 + 99 | 99 | test_inserts.storage_10 + 100 | 100 | test_inserts.storage_10 + 101 | 101 | test_inserts.storage_12 + 102 | 102 | test_inserts.storage_12 + 103 | 103 | test_inserts.storage_12 + 104 | 104 | test_inserts.storage_12 + 105 | 105 | test_inserts.storage_12 + 106 | 106 | test_inserts.storage_12 + 107 | 107 | test_inserts.storage_12 + 108 | 108 | test_inserts.storage_12 + 109 | 109 | test_inserts.storage_12 + 110 | 110 | test_inserts.storage_12 + 111 | 111 | test_inserts.storage_13 + 112 | 112 | test_inserts.storage_13 + 113 | 113 | test_inserts.storage_13 + 114 | 114 | test_inserts.storage_13 + 115 | 115 | test_inserts.storage_13 + 116 | 116 | test_inserts.storage_13 + 117 | 117 | test_inserts.storage_13 + 118 | 118 | test_inserts.storage_13 + 119 | 119 | test_inserts.storage_13 + 120 | 120 | test_inserts.storage_13 +(123 rows) + +/* drop data */ +TRUNCATE test_inserts.storage; +/* add new column */ +ALTER TABLE test_inserts.storage ADD COLUMN e INT8 NOT NULL; +/* one more time! x2 */ +INSERT INTO test_inserts.storage (b, d, e) SELECT i, i, i FROM generate_series(-2, 120) i; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2,-2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-1,-1,-1) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (1,1,1) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (2,2,2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3,3) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (4,4,4) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (5,5,5) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (6,6,6) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (7,7,7) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8,8) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (9,9,9) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (10,10,10) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,101,101) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (102,102,102) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103,103) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (104,104,104) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (105,105,105) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (106,106,106) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (107,107,107) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108,108) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (109,109,109) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (110,110,110) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,111,111) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (112,112,112) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113,113) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (114,114,114) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (115,115,115) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (116,116,116) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (117,117,117) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118,118) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (119,119,119) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (120,120,120) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2,-2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-1,-1,-1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (1,1,1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (2,2,2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3,3) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (4,4,4) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (5,5,5) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (6,6,6) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (7,7,7) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8,8) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (9,9,9) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (10,10,10) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,101,101) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (102,102,102) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103,103) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (104,104,104) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (105,105,105) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (106,106,106) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (107,107,107) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108,108) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (109,109,109) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (110,110,110) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,111,111) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (112,112,112) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113,113) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (114,114,114) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (115,115,115) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (116,116,116) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (117,117,117) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118,118) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (119,119,119) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (120,120,120) +SELECT *, tableoid::regclass FROM test_inserts.storage ORDER BY b, d; + b | d | e | tableoid +-----+-----+-----+------------------------- + -2 | -2 | -2 | test_inserts.storage_11 + -1 | -1 | -1 | test_inserts.storage_11 + 0 | 0 | 0 | test_inserts.storage_11 + 1 | 1 | 1 | test_inserts.storage_1 + 2 | 2 | 2 | test_inserts.storage_1 + 3 | 3 | 3 | test_inserts.storage_1 + 4 | 4 | 4 | test_inserts.storage_1 + 5 | 5 | 5 | test_inserts.storage_1 + 6 | 6 | 6 | test_inserts.storage_1 + 7 | 7 | 7 | test_inserts.storage_1 + 8 | 8 | 8 | test_inserts.storage_1 + 9 | 9 | 9 | test_inserts.storage_1 + 10 | 10 | 10 | test_inserts.storage_1 + 11 | 11 | 11 | test_inserts.storage_2 + 12 | 12 | 12 | test_inserts.storage_2 + 13 | 13 | 13 | test_inserts.storage_2 + 14 | 14 | 14 | test_inserts.storage_2 + 15 | 15 | 15 | test_inserts.storage_2 + 16 | 16 | 16 | test_inserts.storage_2 + 17 | 17 | 17 | test_inserts.storage_2 + 18 | 18 | 18 | test_inserts.storage_2 + 19 | 19 | 19 | test_inserts.storage_2 + 20 | 20 | 20 | test_inserts.storage_2 + 21 | 21 | 21 | test_inserts.storage_3 + 22 | 22 | 22 | test_inserts.storage_3 + 23 | 23 | 23 | test_inserts.storage_3 + 24 | 24 | 24 | test_inserts.storage_3 + 25 | 25 | 25 | test_inserts.storage_3 + 26 | 26 | 26 | test_inserts.storage_3 + 27 | 27 | 27 | test_inserts.storage_3 + 28 | 28 | 28 | test_inserts.storage_3 + 29 | 29 | 29 | test_inserts.storage_3 + 30 | 30 | 30 | test_inserts.storage_3 + 31 | 31 | 31 | test_inserts.storage_4 + 32 | 32 | 32 | test_inserts.storage_4 + 33 | 33 | 33 | test_inserts.storage_4 + 34 | 34 | 34 | test_inserts.storage_4 + 35 | 35 | 35 | test_inserts.storage_4 + 36 | 36 | 36 | test_inserts.storage_4 + 37 | 37 | 37 | test_inserts.storage_4 + 38 | 38 | 38 | test_inserts.storage_4 + 39 | 39 | 39 | test_inserts.storage_4 + 40 | 40 | 40 | test_inserts.storage_4 + 41 | 41 | 41 | test_inserts.storage_5 + 42 | 42 | 42 | test_inserts.storage_5 + 43 | 43 | 43 | test_inserts.storage_5 + 44 | 44 | 44 | test_inserts.storage_5 + 45 | 45 | 45 | test_inserts.storage_5 + 46 | 46 | 46 | test_inserts.storage_5 + 47 | 47 | 47 | test_inserts.storage_5 + 48 | 48 | 48 | test_inserts.storage_5 + 49 | 49 | 49 | test_inserts.storage_5 + 50 | 50 | 50 | test_inserts.storage_5 + 51 | 51 | 51 | test_inserts.storage_6 + 52 | 52 | 52 | test_inserts.storage_6 + 53 | 53 | 53 | test_inserts.storage_6 + 54 | 54 | 54 | test_inserts.storage_6 + 55 | 55 | 55 | test_inserts.storage_6 + 56 | 56 | 56 | test_inserts.storage_6 + 57 | 57 | 57 | test_inserts.storage_6 + 58 | 58 | 58 | test_inserts.storage_6 + 59 | 59 | 59 | test_inserts.storage_6 + 60 | 60 | 60 | test_inserts.storage_6 + 61 | 61 | 61 | test_inserts.storage_7 + 62 | 62 | 62 | test_inserts.storage_7 + 63 | 63 | 63 | test_inserts.storage_7 + 64 | 64 | 64 | test_inserts.storage_7 + 65 | 65 | 65 | test_inserts.storage_7 + 66 | 66 | 66 | test_inserts.storage_7 + 67 | 67 | 67 | test_inserts.storage_7 + 68 | 68 | 68 | test_inserts.storage_7 + 69 | 69 | 69 | test_inserts.storage_7 + 70 | 70 | 70 | test_inserts.storage_7 + 71 | 71 | 71 | test_inserts.storage_8 + 72 | 72 | 72 | test_inserts.storage_8 + 73 | 73 | 73 | test_inserts.storage_8 + 74 | 74 | 74 | test_inserts.storage_8 + 75 | 75 | 75 | test_inserts.storage_8 + 76 | 76 | 76 | test_inserts.storage_8 + 77 | 77 | 77 | test_inserts.storage_8 + 78 | 78 | 78 | test_inserts.storage_8 + 79 | 79 | 79 | test_inserts.storage_8 + 80 | 80 | 80 | test_inserts.storage_8 + 81 | 81 | 81 | test_inserts.storage_9 + 82 | 82 | 82 | test_inserts.storage_9 + 83 | 83 | 83 | test_inserts.storage_9 + 84 | 84 | 84 | test_inserts.storage_9 + 85 | 85 | 85 | test_inserts.storage_9 + 86 | 86 | 86 | test_inserts.storage_9 + 87 | 87 | 87 | test_inserts.storage_9 + 88 | 88 | 88 | test_inserts.storage_9 + 89 | 89 | 89 | test_inserts.storage_9 + 90 | 90 | 90 | test_inserts.storage_9 + 91 | 91 | 91 | test_inserts.storage_10 + 92 | 92 | 92 | test_inserts.storage_10 + 93 | 93 | 93 | test_inserts.storage_10 + 94 | 94 | 94 | test_inserts.storage_10 + 95 | 95 | 95 | test_inserts.storage_10 + 96 | 96 | 96 | test_inserts.storage_10 + 97 | 97 | 97 | test_inserts.storage_10 + 98 | 98 | 98 | test_inserts.storage_10 + 99 | 99 | 99 | test_inserts.storage_10 + 100 | 100 | 100 | test_inserts.storage_10 + 101 | 101 | 101 | test_inserts.storage_12 + 102 | 102 | 102 | test_inserts.storage_12 + 103 | 103 | 103 | test_inserts.storage_12 + 104 | 104 | 104 | test_inserts.storage_12 + 105 | 105 | 105 | test_inserts.storage_12 + 106 | 106 | 106 | test_inserts.storage_12 + 107 | 107 | 107 | test_inserts.storage_12 + 108 | 108 | 108 | test_inserts.storage_12 + 109 | 109 | 109 | test_inserts.storage_12 + 110 | 110 | 110 | test_inserts.storage_12 + 111 | 111 | 111 | test_inserts.storage_13 + 112 | 112 | 112 | test_inserts.storage_13 + 113 | 113 | 113 | test_inserts.storage_13 + 114 | 114 | 114 | test_inserts.storage_13 + 115 | 115 | 115 | test_inserts.storage_13 + 116 | 116 | 116 | test_inserts.storage_13 + 117 | 117 | 117 | test_inserts.storage_13 + 118 | 118 | 118 | test_inserts.storage_13 + 119 | 119 | 119 | test_inserts.storage_13 + 120 | 120 | 120 | test_inserts.storage_13 +(123 rows) + +/* drop data */ +TRUNCATE test_inserts.storage; +/* now test RETURNING list using our new column 'e' */ +INSERT INTO test_inserts.storage (b, d, e) SELECT i, i, i +FROM generate_series(-2, 130, 5) i +RETURNING e * 2, b, tableoid::regclass; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2,-2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3,3) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8,8) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103,103) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108,108) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113,113) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118,118) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (123,123,123) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (128,128,128) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2,-2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3,3) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8,8) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103,103) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108,108) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113,113) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118,118) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (123,123,123) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (128,128,128) + ?column? | b | tableoid +----------+-----+------------------------- + -4 | -2 | test_inserts.storage_11 + 6 | 3 | test_inserts.storage_1 + 16 | 8 | test_inserts.storage_1 + 26 | 13 | test_inserts.storage_2 + 36 | 18 | test_inserts.storage_2 + 46 | 23 | test_inserts.storage_3 + 56 | 28 | test_inserts.storage_3 + 66 | 33 | test_inserts.storage_4 + 76 | 38 | test_inserts.storage_4 + 86 | 43 | test_inserts.storage_5 + 96 | 48 | test_inserts.storage_5 + 106 | 53 | test_inserts.storage_6 + 116 | 58 | test_inserts.storage_6 + 126 | 63 | test_inserts.storage_7 + 136 | 68 | test_inserts.storage_7 + 146 | 73 | test_inserts.storage_8 + 156 | 78 | test_inserts.storage_8 + 166 | 83 | test_inserts.storage_9 + 176 | 88 | test_inserts.storage_9 + 186 | 93 | test_inserts.storage_10 + 196 | 98 | test_inserts.storage_10 + 206 | 103 | test_inserts.storage_12 + 216 | 108 | test_inserts.storage_12 + 226 | 113 | test_inserts.storage_13 + 236 | 118 | test_inserts.storage_13 + 246 | 123 | test_inserts.storage_14 + 256 | 128 | test_inserts.storage_14 +(27 rows) + +/* test EXPLAIN (VERBOSE) - for PartitionFilter's targetlists */ +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b, d, e) SELECT i, i, i +FROM generate_series(1, 10) i +RETURNING e * 2, b, tableoid::regclass; + QUERY PLAN +------------------------------------------------------------------------------- + Insert on test_inserts.storage + Output: (storage.e * 2), storage.b, (storage.tableoid)::regclass + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + -> Function Scan on pg_catalog.generate_series i + Output: NULL::integer, i.i, NULL::integer, i.i, i.i + Function Call: generate_series(1, 10) +(7 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (d, e) SELECT i, i +FROM generate_series(1, 10) i; + QUERY PLAN +----------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, NULL::integer, NULL::integer, storage.d, storage.e + -> Function Scan on pg_catalog.generate_series i + Output: NULL::integer, NULL::integer, NULL::integer, i.i, i.i + Function Call: generate_series(1, 10) +(6 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b) SELECT i +FROM generate_series(1, 10) i; + QUERY PLAN +----------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, NULL::text, NULL::bigint + -> Function Scan on pg_catalog.generate_series i + Output: NULL::integer, i.i, NULL::integer, NULL::text, NULL::bigint + Function Call: generate_series(1, 10) +(6 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b, d, e) SELECT b, d, e +FROM test_inserts.storage; + QUERY PLAN +---------------------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + -> Result + Output: NULL::integer, storage_11.b, NULL::integer, storage_11.d, storage_11.e + -> Append + -> Seq Scan on test_inserts.storage_11 + Output: storage_11.b, storage_11.d, storage_11.e + -> Seq Scan on test_inserts.storage_1 storage_1_1 + Output: storage_1_1.b, storage_1_1.d, storage_1_1.e + -> Seq Scan on test_inserts.storage_2 + Output: storage_2.b, storage_2.d, storage_2.e + -> Seq Scan on test_inserts.storage_3 + Output: storage_3.b, storage_3.d, storage_3.e + -> Seq Scan on test_inserts.storage_4 + Output: storage_4.b, storage_4.d, storage_4.e + -> Seq Scan on test_inserts.storage_5 + Output: storage_5.b, storage_5.d, storage_5.e + -> Seq Scan on test_inserts.storage_6 + Output: storage_6.b, storage_6.d, storage_6.e + -> Seq Scan on test_inserts.storage_7 + Output: storage_7.b, storage_7.d, storage_7.e + -> Seq Scan on test_inserts.storage_8 + Output: storage_8.b, storage_8.d, storage_8.e + -> Seq Scan on test_inserts.storage_9 + Output: storage_9.b, storage_9.d, storage_9.e + -> Seq Scan on test_inserts.storage_10 + Output: storage_10.b, storage_10.d, storage_10.e + -> Seq Scan on test_inserts.storage_12 + Output: storage_12.b, storage_12.d, storage_12.e + -> Seq Scan on test_inserts.storage_13 + Output: storage_13.b, storage_13.d, storage_13.e + -> Seq Scan on test_inserts.storage_14 + Output: storage_14.b, storage_14.d, storage_14.e +(34 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b, d) SELECT b, d +FROM test_inserts.storage; + QUERY PLAN +---------------------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, storage.d, NULL::bigint + -> Result + Output: NULL::integer, storage_11.b, NULL::integer, storage_11.d, NULL::bigint + -> Append + -> Seq Scan on test_inserts.storage_11 + Output: storage_11.b, storage_11.d + -> Seq Scan on test_inserts.storage_1 storage_1_1 + Output: storage_1_1.b, storage_1_1.d + -> Seq Scan on test_inserts.storage_2 + Output: storage_2.b, storage_2.d + -> Seq Scan on test_inserts.storage_3 + Output: storage_3.b, storage_3.d + -> Seq Scan on test_inserts.storage_4 + Output: storage_4.b, storage_4.d + -> Seq Scan on test_inserts.storage_5 + Output: storage_5.b, storage_5.d + -> Seq Scan on test_inserts.storage_6 + Output: storage_6.b, storage_6.d + -> Seq Scan on test_inserts.storage_7 + Output: storage_7.b, storage_7.d + -> Seq Scan on test_inserts.storage_8 + Output: storage_8.b, storage_8.d + -> Seq Scan on test_inserts.storage_9 + Output: storage_9.b, storage_9.d + -> Seq Scan on test_inserts.storage_10 + Output: storage_10.b, storage_10.d + -> Seq Scan on test_inserts.storage_12 + Output: storage_12.b, storage_12.d + -> Seq Scan on test_inserts.storage_13 + Output: storage_13.b, storage_13.d + -> Seq Scan on test_inserts.storage_14 + Output: storage_14.b, storage_14.d +(34 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b) SELECT b +FROM test_inserts.storage; + QUERY PLAN +-------------------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, NULL::text, NULL::bigint + -> Result + Output: NULL::integer, storage_11.b, NULL::integer, NULL::text, NULL::bigint + -> Append + -> Seq Scan on test_inserts.storage_11 + Output: storage_11.b + -> Seq Scan on test_inserts.storage_1 storage_1_1 + Output: storage_1_1.b + -> Seq Scan on test_inserts.storage_2 + Output: storage_2.b + -> Seq Scan on test_inserts.storage_3 + Output: storage_3.b + -> Seq Scan on test_inserts.storage_4 + Output: storage_4.b + -> Seq Scan on test_inserts.storage_5 + Output: storage_5.b + -> Seq Scan on test_inserts.storage_6 + Output: storage_6.b + -> Seq Scan on test_inserts.storage_7 + Output: storage_7.b + -> Seq Scan on test_inserts.storage_8 + Output: storage_8.b + -> Seq Scan on test_inserts.storage_9 + Output: storage_9.b + -> Seq Scan on test_inserts.storage_10 + Output: storage_10.b + -> Seq Scan on test_inserts.storage_12 + Output: storage_12.b + -> Seq Scan on test_inserts.storage_13 + Output: storage_13.b + -> Seq Scan on test_inserts.storage_14 + Output: storage_14.b +(34 rows) + +/* test gap case (missing partition in between) */ +CREATE TABLE test_inserts.test_gap(val INT NOT NULL); +INSERT INTO test_inserts.test_gap SELECT generate_series(1, 30); +SELECT create_range_partitions('test_inserts.test_gap', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +DROP TABLE test_inserts.test_gap_2; /* make a gap */ +INSERT INTO test_inserts.test_gap VALUES(15); /* not ok */ +ERROR: cannot spawn a partition +DROP TABLE test_inserts.test_gap CASCADE; +NOTICE: drop cascades to 3 other objects +/* test a few "special" ONLY queries used in pg_repack */ +CREATE TABLE test_inserts.test_special_only(val INT NOT NULL); +INSERT INTO test_inserts.test_special_only SELECT generate_series(1, 30); +SELECT create_hash_partitions('test_inserts.test_special_only', 'val', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +/* create table as select only */ +CREATE TABLE test_inserts.special_1 AS SELECT * FROM ONLY test_inserts.test_special_only; +SELECT count(*) FROM test_inserts.special_1; + count +------- + 0 +(1 row) + +DROP TABLE test_inserts.special_1; +/* insert into ... select only */ +CREATE TABLE test_inserts.special_2 AS SELECT * FROM ONLY test_inserts.test_special_only WITH NO DATA; +INSERT INTO test_inserts.special_2 SELECT * FROM ONLY test_inserts.test_special_only; +SELECT count(*) FROM test_inserts.special_2; + count +------- + 0 +(1 row) + +DROP TABLE test_inserts.special_2; +DROP TABLE test_inserts.test_special_only CASCADE; +NOTICE: drop cascades to 4 other objects +DROP SCHEMA test_inserts CASCADE; +NOTICE: drop cascades to 19 other objects +DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_lateral.out b/expected/pathman_lateral.out index 9bff1e57..0cb1a864 100644 --- a/expected/pathman_lateral.out +++ b/expected/pathman_lateral.out @@ -1,5 +1,10 @@ --- Sometimes join selectivity improvements patches in pgpro force nested loop --- members swap -- in pathman_lateral_1.out +/* + * Sometimes join selectivity improvements patches in pgpro force nested loop + * members swap -- in pathman_lateral_1.out and pathman_lateral_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ \set VERBOSITY terse SET search_path = 'public'; CREATE EXTENSION pg_pathman; diff --git a/expected/pathman_lateral_2.out b/expected/pathman_lateral_2.out new file mode 100644 index 00000000..5ee4104c --- /dev/null +++ b/expected/pathman_lateral_2.out @@ -0,0 +1,127 @@ +/* + * Sometimes join selectivity improvements patches in pgpro force nested loop + * members swap -- in pathman_lateral_1.out and pathman_lateral_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_lateral; +/* create table partitioned by HASH */ +create table test_lateral.data(id int8 not null); +select create_hash_partitions('test_lateral.data', 'id', 10); + create_hash_partitions +------------------------ + 10 +(1 row) + +insert into test_lateral.data select generate_series(1, 10000); +VACUUM ANALYZE; +set enable_hashjoin = off; +set enable_mergejoin = off; +/* all credits go to Ivan Frolkov */ +explain (costs off) +select * from + test_lateral.data as t1, + lateral(select * from test_lateral.data as t2 where t2.id > t1.id) t2, + lateral(select * from test_lateral.data as t3 where t3.id = t2.id + t1.id) t3 + where t1.id between 1 and 100 and + t2.id between 2 and 299 and + t1.id > t2.id and + exists(select * from test_lateral.data t + where t1.id = t2.id and t.id = t3.id); + QUERY PLAN +-------------------------------------------------------------------------------------------------------- + Nested Loop + -> Nested Loop + Join Filter: ((t2_1.id + t1_1.id) = t_1.id) + -> HashAggregate + Group Key: t_1.id + -> Append + -> Seq Scan on data_0 t_1 + -> Seq Scan on data_1 t_2 + -> Seq Scan on data_2 t_3 + -> Seq Scan on data_3 t_4 + -> Seq Scan on data_4 t_5 + -> Seq Scan on data_5 t_6 + -> Seq Scan on data_6 t_7 + -> Seq Scan on data_7 t_8 + -> Seq Scan on data_8 t_9 + -> Seq Scan on data_9 t_10 + -> Materialize + -> Nested Loop + Join Filter: ((t2_1.id > t1_1.id) AND (t1_1.id > t2_1.id) AND (t1_1.id = t2_1.id)) + -> Append + -> Seq Scan on data_0 t2_1 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_1 t2_2 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_2 t2_3 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_3 t2_4 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_4 t2_5 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_5 t2_6 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_6 t2_7 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_7 t2_8 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_8 t2_9 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_9 t2_10 + Filter: ((id >= 2) AND (id <= 299)) + -> Materialize + -> Append + -> Seq Scan on data_0 t1_1 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_1 t1_2 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_2 t1_3 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_3 t1_4 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_4 t1_5 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_5 t1_6 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_6 t1_7 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_7 t1_8 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_8 t1_9 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_9 t1_10 + Filter: ((id >= 1) AND (id <= 100)) + -> Custom Scan (RuntimeAppend) + Prune by: (t_1.id = t3.id) + -> Seq Scan on data_0 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_1 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_2 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_3 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_4 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_5 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_6 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_7 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_8 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_9 t3 + Filter: (t_1.id = id) +(84 rows) + +set enable_hashjoin = on; +set enable_mergejoin = on; +DROP SCHEMA test_lateral CASCADE; +NOTICE: drop cascades to 11 other objects +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_lateral_3.out b/expected/pathman_lateral_3.out new file mode 100644 index 00000000..dd64819d --- /dev/null +++ b/expected/pathman_lateral_3.out @@ -0,0 +1,126 @@ +/* + * Sometimes join selectivity improvements patches in pgpro force nested loop + * members swap -- in pathman_lateral_1.out and pathman_lateral_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_lateral; +/* create table partitioned by HASH */ +create table test_lateral.data(id int8 not null); +select create_hash_partitions('test_lateral.data', 'id', 10); + create_hash_partitions +------------------------ + 10 +(1 row) + +insert into test_lateral.data select generate_series(1, 10000); +VACUUM ANALYZE; +set enable_hashjoin = off; +set enable_mergejoin = off; +/* all credits go to Ivan Frolkov */ +explain (costs off) +select * from + test_lateral.data as t1, + lateral(select * from test_lateral.data as t2 where t2.id > t1.id) t2, + lateral(select * from test_lateral.data as t3 where t3.id = t2.id + t1.id) t3 + where t1.id between 1 and 100 and + t2.id between 2 and 299 and + t1.id > t2.id and + exists(select * from test_lateral.data t + where t1.id = t2.id and t.id = t3.id); + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Nested Loop + -> Nested Loop + Join Filter: ((t2_1.id + t1_1.id) = t_1.id) + -> Nested Loop + Join Filter: ((t2_1.id > t1_1.id) AND (t1_1.id > t2_1.id) AND (t1_1.id = t2_1.id)) + -> Append + -> Seq Scan on data_0 t2_1 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_1 t2_2 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_2 t2_3 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_3 t2_4 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_4 t2_5 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_5 t2_6 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_6 t2_7 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_7 t2_8 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_8 t2_9 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_9 t2_10 + Filter: ((id >= 2) AND (id <= 299)) + -> Materialize + -> Append + -> Seq Scan on data_0 t1_1 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_1 t1_2 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_2 t1_3 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_3 t1_4 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_4 t1_5 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_5 t1_6 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_6 t1_7 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_7 t1_8 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_8 t1_9 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_9 t1_10 + Filter: ((id >= 1) AND (id <= 100)) + -> HashAggregate + Group Key: t_1.id + -> Append + -> Seq Scan on data_0 t_1 + -> Seq Scan on data_1 t_2 + -> Seq Scan on data_2 t_3 + -> Seq Scan on data_3 t_4 + -> Seq Scan on data_4 t_5 + -> Seq Scan on data_5 t_6 + -> Seq Scan on data_6 t_7 + -> Seq Scan on data_7 t_8 + -> Seq Scan on data_8 t_9 + -> Seq Scan on data_9 t_10 + -> Custom Scan (RuntimeAppend) + Prune by: (t_1.id = t3.id) + -> Seq Scan on data_0 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_1 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_2 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_3 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_4 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_5 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_6 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_7 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_8 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_9 t3 + Filter: (t_1.id = id) +(83 rows) + +set enable_hashjoin = on; +set enable_mergejoin = on; +DROP SCHEMA test_lateral CASCADE; +NOTICE: drop cascades to 11 other objects +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_mergejoin.out b/expected/pathman_mergejoin.out index 1bd9da6f..ca3a3d9d 100644 --- a/expected/pathman_mergejoin.out +++ b/expected/pathman_mergejoin.out @@ -2,6 +2,13 @@ * pathman_mergejoin_1.out and pathman_mergejoin_2.out seem to deal with pgpro's * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan * are eliminated, hence pathman_mergejoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + * + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_mergejoin_1.out b/expected/pathman_mergejoin_1.out index 5b903dc1..31da465a 100644 --- a/expected/pathman_mergejoin_1.out +++ b/expected/pathman_mergejoin_1.out @@ -2,6 +2,13 @@ * pathman_mergejoin_1.out and pathman_mergejoin_2.out seem to deal with pgpro's * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan * are eliminated, hence pathman_mergejoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + * + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_mergejoin_2.out b/expected/pathman_mergejoin_2.out index 0168d556..4b614ad6 100644 --- a/expected/pathman_mergejoin_2.out +++ b/expected/pathman_mergejoin_2.out @@ -2,6 +2,13 @@ * pathman_mergejoin_1.out and pathman_mergejoin_2.out seem to deal with pgpro's * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan * are eliminated, hence pathman_mergejoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + * + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_mergejoin_3.out b/expected/pathman_mergejoin_3.out index 3d4a441c..7003205f 100644 --- a/expected/pathman_mergejoin_3.out +++ b/expected/pathman_mergejoin_3.out @@ -2,6 +2,13 @@ * pathman_mergejoin_1.out and pathman_mergejoin_2.out seem to deal with pgpro's * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan * are eliminated, hence pathman_mergejoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + * + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_mergejoin_4.out b/expected/pathman_mergejoin_4.out new file mode 100644 index 00000000..185aa3d1 --- /dev/null +++ b/expected/pathman_mergejoin_4.out @@ -0,0 +1,84 @@ +/* + * pathman_mergejoin_1.out and pathman_mergejoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_mergejoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + * + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +INSERT INTO test.num_range_rel SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +/* + * Merge join between 3 partitioned tables + * + * test case for the fix of sorting, merge append and index scan issues + * details in commit 54dd0486fc55b2d25cf7d095f83dee6ff4adee06 + */ +SET enable_hashjoin = OFF; +SET enable_nestloop = OFF; +SET enable_mergejoin = ON; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Sort Key: j2_1.dt + -> Merge Join + Merge Cond: (j2_1.id = j3_1.id) + -> Merge Join + Merge Cond: (j1_1.id = j2_1.id) + -> Merge Append + Sort Key: j1_1.id + -> Index Scan using range_rel_1_pkey on range_rel_1 j1_1 + -> Index Scan using range_rel_2_pkey on range_rel_2 j1_2 + -> Merge Append + Sort Key: j2_1.id + -> Index Scan using range_rel_2_pkey on range_rel_2 j2_1 + -> Index Scan using range_rel_3_pkey on range_rel_3 j2_2 + -> Index Scan using range_rel_4_pkey on range_rel_4 j2_3 + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3_1 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_2 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_3 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_4 +(20 rows) + +SET enable_hashjoin = ON; +SET enable_nestloop = ON; +DROP SCHEMA test CASCADE; +NOTICE: drop cascades to 12 other objects +DROP EXTENSION pg_pathman; +DROP SCHEMA pathman CASCADE; diff --git a/expected/pathman_mergejoin_5.out b/expected/pathman_mergejoin_5.out new file mode 100644 index 00000000..6ffe89cd --- /dev/null +++ b/expected/pathman_mergejoin_5.out @@ -0,0 +1,75 @@ +/* + * pathman_mergejoin_1.out and pathman_mergejoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_mergejoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + * + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +INSERT INTO test.num_range_rel SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +/* + * Merge join between 3 partitioned tables + * + * test case for the fix of sorting, merge append and index scan issues + * details in commit 54dd0486fc55b2d25cf7d095f83dee6ff4adee06 + */ +SET enable_hashjoin = OFF; +SET enable_nestloop = OFF; +SET enable_mergejoin = ON; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Merge Join + Merge Cond: (j2.id = j3_1.id) + -> Index Scan using range_rel_2_pkey on range_rel_2 j2 + Index Cond: (id IS NOT NULL) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3_1 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_2 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_3 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_4 +(11 rows) + +SET enable_hashjoin = ON; +SET enable_nestloop = ON; +DROP SCHEMA test CASCADE; +NOTICE: drop cascades to 12 other objects +DROP EXTENSION pg_pathman; +DROP SCHEMA pathman CASCADE; diff --git a/expected/pathman_only.out b/expected/pathman_only.out index b54722d8..83425632 100644 --- a/expected/pathman_only.out +++ b/expected/pathman_only.out @@ -7,6 +7,9 @@ * optimization fence, which changes practically all plans here. There is * an option to forcibly make them MATERIALIZED, but we also need to run tests * on older versions, so create pathman_only_1.out instead. + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_only_1.out b/expected/pathman_only_1.out index fe64e5c9..da913e54 100644 --- a/expected/pathman_only_1.out +++ b/expected/pathman_only_1.out @@ -7,6 +7,9 @@ * optimization fence, which changes practically all plans here. There is * an option to forcibly make them MATERIALIZED, but we also need to run tests * on older versions, so create pathman_only_1.out instead. + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_only_2.out b/expected/pathman_only_2.out new file mode 100644 index 00000000..39b8f199 --- /dev/null +++ b/expected/pathman_only_2.out @@ -0,0 +1,280 @@ +/* + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- + * + * Since 12 (608b167f9f), CTEs which are scanned once are no longer an + * optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_only_1.out instead. + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_only; +/* Test special case: ONLY statement with not-ONLY for partitioned table */ +CREATE TABLE test_only.from_only_test(val INT NOT NULL); +INSERT INTO test_only.from_only_test SELECT generate_series(1, 20); +SELECT create_range_partitions('test_only.from_only_test', 'val', 1, 2); + create_range_partitions +------------------------- + 10 +(1 row) + +VACUUM ANALYZE; +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test; + QUERY PLAN +------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Seq Scan on from_only_test + -> Append + -> Seq Scan on from_only_test_1 from_only_test_1_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 +(15 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +UNION SELECT * FROM ONLY test_only.from_only_test; + QUERY PLAN +---------------------------------------------------------- + HashAggregate + Group Key: from_only_test_1.val + -> Append + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Seq Scan on from_only_test from_only_test_11 +(15 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test +UNION SELECT * FROM ONLY test_only.from_only_test; + QUERY PLAN +--------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test_1.val + -> Append + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Append + -> Seq Scan on from_only_test_1 from_only_test_1_1 + -> Seq Scan on from_only_test_2 from_only_test_2_1 + -> Seq Scan on from_only_test_3 from_only_test_3_1 + -> Seq Scan on from_only_test_4 from_only_test_4_1 + -> Seq Scan on from_only_test_5 from_only_test_5_1 + -> Seq Scan on from_only_test_6 from_only_test_6_1 + -> Seq Scan on from_only_test_7 from_only_test_7_1 + -> Seq Scan on from_only_test_8 from_only_test_8_1 + -> Seq Scan on from_only_test_9 from_only_test_9_1 + -> Seq Scan on from_only_test_10 from_only_test_10_1 + -> Seq Scan on from_only_test from_only_test_12 +(26 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test; + QUERY PLAN +--------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Seq Scan on from_only_test + -> Append + -> Seq Scan on from_only_test_1 from_only_test_1_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Append + -> Seq Scan on from_only_test_1 from_only_test_1_2 + -> Seq Scan on from_only_test_2 from_only_test_2_1 + -> Seq Scan on from_only_test_3 from_only_test_3_1 + -> Seq Scan on from_only_test_4 from_only_test_4_1 + -> Seq Scan on from_only_test_5 from_only_test_5_1 + -> Seq Scan on from_only_test_6 from_only_test_6_1 + -> Seq Scan on from_only_test_7 from_only_test_7_1 + -> Seq Scan on from_only_test_8 from_only_test_8_1 + -> Seq Scan on from_only_test_9 from_only_test_9_1 + -> Seq Scan on from_only_test_10 from_only_test_10_1 +(26 rows) + +/* not ok, ONLY|non-ONLY in one query (this is not the case for PgPro) */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test a +JOIN ONLY test_only.from_only_test b USING(val); + QUERY PLAN +--------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test b + -> Custom Scan (RuntimeAppend) + Prune by: (b.val = a.val) + -> Seq Scan on from_only_test_1 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_2 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_3 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_4 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_5 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_6 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_7 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_8 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_9 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_10 a + Filter: (b.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM test_only.from_only_test), + q2 AS (SELECT * FROM ONLY test_only.from_only_test) +SELECT * FROM q1 JOIN q2 USING(val); + QUERY PLAN +--------------------------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test from_only_test_1 + -> Custom Scan (RuntimeAppend) + Prune by: (from_only_test_1.val = from_only_test.val) + -> Seq Scan on from_only_test_1 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (from_only_test_1.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM ONLY test_only.from_only_test) +SELECT * FROM test_only.from_only_test JOIN q1 USING(val); + QUERY PLAN +--------------------------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test from_only_test_1 + -> Custom Scan (RuntimeAppend) + Prune by: (from_only_test_1.val = from_only_test.val) + -> Seq Scan on from_only_test_1 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (from_only_test_1.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +WHERE val = (SELECT val FROM ONLY test_only.from_only_test + ORDER BY val ASC + LIMIT 1); + QUERY PLAN +----------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (from_only_test.val = $0) + InitPlan 1 (returns $0) + -> Limit + -> Sort + Sort Key: from_only_test_1.val + -> Seq Scan on from_only_test from_only_test_1 + -> Seq Scan on from_only_test_1 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (val = $0) +(27 rows) + +DROP SCHEMA test_only CASCADE; +NOTICE: drop cascades to 12 other objects +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_rowmarks.out b/expected/pathman_rowmarks.out index 4b51cb65..f9ef8114 100644 --- a/expected/pathman_rowmarks.out +++ b/expected/pathman_rowmarks.out @@ -5,6 +5,9 @@ * * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, * causing different output; pathman_rowmarks_2.out is the updated version. + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. */ SET search_path = 'public'; CREATE EXTENSION pg_pathman; diff --git a/expected/pathman_rowmarks_1.out b/expected/pathman_rowmarks_1.out index e72e7076..e0877333 100644 --- a/expected/pathman_rowmarks_1.out +++ b/expected/pathman_rowmarks_1.out @@ -5,6 +5,9 @@ * * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, * causing different output; pathman_rowmarks_2.out is the updated version. + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. */ SET search_path = 'public'; CREATE EXTENSION pg_pathman; diff --git a/expected/pathman_rowmarks_2.out b/expected/pathman_rowmarks_2.out index a111d688..7436b081 100644 --- a/expected/pathman_rowmarks_2.out +++ b/expected/pathman_rowmarks_2.out @@ -5,6 +5,9 @@ * * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, * causing different output; pathman_rowmarks_2.out is the updated version. + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. */ SET search_path = 'public'; CREATE EXTENSION pg_pathman; diff --git a/expected/pathman_rowmarks_3.out b/expected/pathman_rowmarks_3.out new file mode 100644 index 00000000..6179ff94 --- /dev/null +++ b/expected/pathman_rowmarks_3.out @@ -0,0 +1,390 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_rowmarks_2.out is the updated version. + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA rowmarks; +CREATE TABLE rowmarks.first(id int NOT NULL); +CREATE TABLE rowmarks.second(id int NOT NULL); +INSERT INTO rowmarks.first SELECT generate_series(1, 10); +INSERT INTO rowmarks.second SELECT generate_series(1, 10); +SELECT create_hash_partitions('rowmarks.first', 'id', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +VACUUM ANALYZE; +/* Not partitioned */ +SELECT * FROM rowmarks.second ORDER BY id FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +/* Simple case (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first ORDER BY id FOR UPDATE; + QUERY PLAN +--------------------------------------- + LockRows + -> Sort + Sort Key: first_0.id + -> Append + -> Seq Scan on first_0 + -> Seq Scan on first_1 + -> Seq Scan on first_2 + -> Seq Scan on first_3 + -> Seq Scan on first_4 +(9 rows) + +/* Simple case (execution) */ +SELECT * FROM rowmarks.first ORDER BY id FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +SELECT FROM rowmarks.first ORDER BY id FOR UPDATE; +-- +(10 rows) + +SELECT tableoid > 0 FROM rowmarks.first ORDER BY id FOR UPDATE; + ?column? +---------- + t + t + t + t + t + t + t + t + t + t +(10 rows) + +/* A little harder (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.first + ORDER BY id + OFFSET 10 LIMIT 1 + FOR UPDATE) +FOR SHARE; + QUERY PLAN +--------------------------------------------------------------- + LockRows + InitPlan 1 (returns $1) + -> Limit + -> LockRows + -> Sort + Sort Key: first_0.id + -> Append + -> Seq Scan on first_0 + -> Seq Scan on first_1 first_1_1 + -> Seq Scan on first_2 + -> Seq Scan on first_3 + -> Seq Scan on first_4 + -> Custom Scan (RuntimeAppend) + Prune by: (first.id = $1) + -> Seq Scan on first_0 first + Filter: (id = $1) + -> Seq Scan on first_1 first + Filter: (id = $1) + -> Seq Scan on first_2 first + Filter: (id = $1) + -> Seq Scan on first_3 first + Filter: (id = $1) + -> Seq Scan on first_4 first + Filter: (id = $1) +(24 rows) + +/* A little harder (execution) */ +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.first + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + id +---- + 6 +(1 row) + +/* Two tables (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.second + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + QUERY PLAN +---------------------------------------------- + LockRows + InitPlan 1 (returns $1) + -> Limit + -> LockRows + -> Sort + Sort Key: second.id + -> Seq Scan on second + -> Custom Scan (RuntimeAppend) + Prune by: (first.id = $1) + -> Seq Scan on first_0 first + Filter: (id = $1) + -> Seq Scan on first_1 first + Filter: (id = $1) + -> Seq Scan on first_2 first + Filter: (id = $1) + -> Seq Scan on first_3 first + Filter: (id = $1) + -> Seq Scan on first_4 first + Filter: (id = $1) +(19 rows) + +/* Two tables (execution) */ +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.second + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + id +---- + 6 +(1 row) + +/* JOIN (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + QUERY PLAN +--------------------------------------------------- + LockRows + -> Sort + Sort Key: first_0.id + -> Hash Join + Hash Cond: (first_0.id = second.id) + -> Append + -> Seq Scan on first_0 + -> Seq Scan on first_1 + -> Seq Scan on first_2 + -> Seq Scan on first_3 + -> Seq Scan on first_4 + -> Hash + -> Seq Scan on second +(13 rows) + +/* JOIN (execution) */ +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +/* ONLY (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY rowmarks.first FOR SHARE; + QUERY PLAN +------------------------- + LockRows + -> Seq Scan on first +(2 rows) + +/* ONLY (execution) */ +SELECT * FROM ONLY rowmarks.first FOR SHARE; + id +---- +(0 rows) + +/* Check updates (plan) */ +SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ +SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); + QUERY PLAN +--------------------------------- + Update on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Seq Scan on first_0 + Filter: (id = 1) +(6 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); + QUERY PLAN +----------------------------------------------- + Update on second + -> Nested Loop Semi Join + Join Filter: (second.id = first_0.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 + Filter: (id < 1) + -> Seq Scan on first_1 + Filter: (id < 1) + -> Seq Scan on first_2 + Filter: (id < 1) + -> Seq Scan on first_3 + Filter: (id < 1) + -> Seq Scan on first_4 + Filter: (id < 1) +(16 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); + QUERY PLAN +----------------------------------------------- + Update on second + -> Nested Loop Semi Join + Join Filter: (second.id = first_0.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 + Filter: (id = 1) + -> Seq Scan on first_1 + Filter: (id = 2) +(10 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1) +RETURNING *, tableoid::regclass; + QUERY PLAN +--------------------------------- + Update on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Seq Scan on first_0 + Filter: (id = 1) +(6 rows) + +SET enable_hashjoin = t; +SET enable_mergejoin = t; +/* Check updates (execution) */ +UPDATE rowmarks.second SET id = 1 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2) +RETURNING *, tableoid::regclass; + id | tableoid +----+----------------- + 1 | rowmarks.second + 1 | rowmarks.second +(2 rows) + +/* Check deletes (plan) */ +SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ +SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); + QUERY PLAN +--------------------------------- + Delete on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Seq Scan on first_0 + Filter: (id = 1) +(6 rows) + +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); + QUERY PLAN +----------------------------------------------- + Delete on second + -> Nested Loop Semi Join + Join Filter: (second.id = first_0.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 + Filter: (id < 1) + -> Seq Scan on first_1 + Filter: (id < 1) + -> Seq Scan on first_2 + Filter: (id < 1) + -> Seq Scan on first_3 + Filter: (id < 1) + -> Seq Scan on first_4 + Filter: (id < 1) +(16 rows) + +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); + QUERY PLAN +----------------------------------------------- + Delete on second + -> Nested Loop Semi Join + Join Filter: (second.id = first_0.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 + Filter: (id = 1) + -> Seq Scan on first_1 + Filter: (id = 2) +(10 rows) + +SET enable_hashjoin = t; +SET enable_mergejoin = t; +DROP SCHEMA rowmarks CASCADE; +NOTICE: drop cascades to 7 other objects +DETAIL: drop cascades to table rowmarks.first +drop cascades to table rowmarks.second +drop cascades to table rowmarks.first_0 +drop cascades to table rowmarks.first_1 +drop cascades to table rowmarks.first_2 +drop cascades to table rowmarks.first_3 +drop cascades to table rowmarks.first_4 +DROP EXTENSION pg_pathman; diff --git a/run_tests.sh b/run_tests.sh index 82d1f9d3..8f06d39c 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -134,6 +134,8 @@ make USE_PGXS=1 python_tests || status=$? deactivate set -x +if [ $status -ne 0 ]; then tail -n 2000 tests/python/tests.log; fi + # show Valgrind logs if necessary if [ "$LEVEL" = "nightmare" ]; then for f in $(find /tmp -name valgrind-*.log); do diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index a164d421..403424f5 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -2,6 +2,9 @@ * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, * causing different output. Also, EXPLAIN now always shows key first in quals * ('test commutator' queries). + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. */ \set VERBOSITY terse diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index c380ea1d..b49d061c 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -4,6 +4,9 @@ * ERROR: invalid input syntax for type integer: "abc" * instead of * ERROR: invalid input syntax for integer: "15.6" + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. */ \set VERBOSITY terse diff --git a/sql/pathman_column_type.sql b/sql/pathman_column_type.sql index 98c73908..685643fd 100644 --- a/sql/pathman_column_type.sql +++ b/sql/pathman_column_type.sql @@ -1,3 +1,8 @@ +/* + * In 9ce77d75c5a (>= 13) struct Var was changed, which caused the output + * of get_partition_cooked_key to change. + */ + \set VERBOSITY terse SET search_path = 'public'; diff --git a/sql/pathman_hashjoin.sql b/sql/pathman_hashjoin.sql index 8a08569f..2c3654d4 100644 --- a/sql/pathman_hashjoin.sql +++ b/sql/pathman_hashjoin.sql @@ -2,6 +2,9 @@ * pathman_hashjoin_1.out and pathman_hashjoin_2.out seem to deal with pgpro's * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan * are eliminated, hence pathman_hashjoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. */ \set VERBOSITY terse diff --git a/sql/pathman_inserts.sql b/sql/pathman_inserts.sql index 0f4859c4..c8c6439d 100644 --- a/sql/pathman_inserts.sql +++ b/sql/pathman_inserts.sql @@ -1,3 +1,8 @@ +/* + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ + \set VERBOSITY terse SET search_path = 'public'; diff --git a/sql/pathman_lateral.sql b/sql/pathman_lateral.sql index 645e5f93..d287c051 100644 --- a/sql/pathman_lateral.sql +++ b/sql/pathman_lateral.sql @@ -1,5 +1,11 @@ --- Sometimes join selectivity improvements patches in pgpro force nested loop --- members swap -- in pathman_lateral_1.out +/* + * Sometimes join selectivity improvements patches in pgpro force nested loop + * members swap -- in pathman_lateral_1.out and pathman_lateral_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ + \set VERBOSITY terse diff --git a/sql/pathman_mergejoin.sql b/sql/pathman_mergejoin.sql index e85cc934..05de4ba2 100644 --- a/sql/pathman_mergejoin.sql +++ b/sql/pathman_mergejoin.sql @@ -2,6 +2,13 @@ * pathman_mergejoin_1.out and pathman_mergejoin_2.out seem to deal with pgpro's * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan * are eliminated, hence pathman_mergejoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + * + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- */ \set VERBOSITY terse diff --git a/sql/pathman_only.sql b/sql/pathman_only.sql index 6e34a9c1..53ef6a9a 100644 --- a/sql/pathman_only.sql +++ b/sql/pathman_only.sql @@ -7,6 +7,9 @@ * optimization fence, which changes practically all plans here. There is * an option to forcibly make them MATERIALIZED, but we also need to run tests * on older versions, so create pathman_only_1.out instead. + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. */ \set VERBOSITY terse diff --git a/sql/pathman_rowmarks.sql b/sql/pathman_rowmarks.sql index f1ac0fe9..ab7f24ac 100644 --- a/sql/pathman_rowmarks.sql +++ b/sql/pathman_rowmarks.sql @@ -5,6 +5,9 @@ * * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, * causing different output; pathman_rowmarks_2.out is the updated version. + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. */ SET search_path = 'public'; CREATE EXTENSION pg_pathman; diff --git a/src/hooks.c b/src/hooks.c index ca1db9be..e9ff1ed7 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -3,7 +3,7 @@ * hooks.c * definitions of rel_pathlist and join_pathlist hooks * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * @@ -517,7 +517,7 @@ pathman_rel_pathlist_hook(PlannerInfo *root, } /* Parent has already been locked by rewriter */ - parent_rel = heap_open(rte->relid, NoLock); + parent_rel = heap_open_compat(rte->relid, NoLock); parent_rowmark = get_plan_rowmark(root->rowMarks, rti); @@ -537,7 +537,7 @@ pathman_rel_pathlist_hook(PlannerInfo *root, } /* Now close parent relation */ - heap_close(parent_rel, NoLock); + heap_close_compat(parent_rel, NoLock); /* Clear path list and make it point to NIL */ list_free_deep(rel->pathlist); @@ -673,9 +673,15 @@ execute_for_plantree(PlannedStmt *planned_stmt, * Planner hook. It disables inheritance for tables that have been partitioned * by pathman to prevent standart PostgreSQL partitioning mechanism from * handling those tables. + * + * Since >= 13 (6aba63ef3e6) query_string parameter was added. */ PlannedStmt * +#if PG_VERSION_NUM >= 130000 +pathman_planner_hook(Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams) +#else pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) +#endif { PlannedStmt *result; uint32 query_id = parse->queryId; @@ -696,9 +702,17 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) /* Invoke original hook if needed */ if (pathman_planner_hook_next) +#if PG_VERSION_NUM >= 130000 + result = pathman_planner_hook_next(parse, query_string, cursorOptions, boundParams); +#else result = pathman_planner_hook_next(parse, cursorOptions, boundParams); +#endif else +#if PG_VERSION_NUM >= 130000 + result = standard_planner(parse, query_string, cursorOptions, boundParams); +#else result = standard_planner(parse, cursorOptions, boundParams); +#endif if (pathman_ready) { @@ -927,9 +941,21 @@ pathman_relcache_hook(Datum arg, Oid relid) /* * Utility function invoker hook. * NOTE: 'first_arg' is (PlannedStmt *) in PG 10, or (Node *) in PG <= 9.6. + * In PG 13 (2f9661311b8) command completion tags was reworked (added QueryCompletion struct) */ void -#if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 130000 +pathman_process_utility_hook(PlannedStmt *first_arg, + const char *queryString, + ProcessUtilityContext context, + ParamListInfo params, + QueryEnvironment *queryEnv, + DestReceiver *dest, QueryCompletion *queryCompletion) +{ + Node *parsetree = first_arg->utilityStmt; + int stmt_location = first_arg->stmt_location, + stmt_len = first_arg->stmt_len; +#elif PG_VERSION_NUM >= 100000 pathman_process_utility_hook(PlannedStmt *first_arg, const char *queryString, ProcessUtilityContext context, @@ -968,9 +994,14 @@ pathman_process_utility_hook(Node *first_arg, /* Handle our COPY case (and show a special cmd name) */ PathmanDoCopy((CopyStmt *) parsetree, queryString, stmt_location, stmt_len, &processed); +#if PG_VERSION_NUM >= 130000 + if (queryCompletion) + SetQueryCompletion(queryCompletion, CMDTAG_COPY, processed); +#else if (completionTag) snprintf(completionTag, COMPLETION_TAG_BUFSIZE, "COPY " UINT64_FORMAT, processed); +#endif return; /* don't call standard_ProcessUtility() or hooks */ } @@ -1037,10 +1068,19 @@ pathman_process_utility_hook(Node *first_arg, } /* Finally call process_utility_hook_next or standard_ProcessUtility */ +#if PG_VERSION_NUM >= 130000 + call_process_utility_compat((pathman_process_utility_hook_next ? + pathman_process_utility_hook_next : + standard_ProcessUtility), + first_arg, queryString, + context, params, queryEnv, + dest, queryCompletion); +#else call_process_utility_compat((pathman_process_utility_hook_next ? pathman_process_utility_hook_next : standard_ProcessUtility), first_arg, queryString, context, params, queryEnv, dest, completionTag); +#endif } diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index c1805f80..24a36fea 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -3,7 +3,7 @@ * pg_compat.h * Compatibility tools for PostgreSQL API * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional * * ------------------------------------------------------------------------ */ @@ -240,7 +240,14 @@ /* * create_append_path() */ -#if PG_VERSION_NUM >= 120000 +#if PG_VERSION_NUM >= 130000 +/* + * PGPRO-3938 made create_append_path compatible with vanilla again + */ +#define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ + create_append_path(NULL, (rel), (subpaths), NIL, NIL, (required_outer), \ + (parallel_workers), false, NIL, -1) +#elif PG_VERSION_NUM >= 120000 #ifndef PGPRO_VERSION #define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ @@ -1058,5 +1065,40 @@ CustomEvalParamExternCompat(Param *param, void set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti); +/* + * lnext() + * In >=13 list implementation was reworked (1cff1b95ab6) + */ +#if PG_VERSION_NUM >= 130000 +#define lnext_compat(l, lc) lnext((l), (lc)) +#else +#define lnext_compat(l, lc) lnext((lc)) +#endif + +/* + * heap_open() + * heap_openrv() + * heap_close() + * In >=13 heap_* was replaced with table_* (e0c4ec07284) + */ +#if PG_VERSION_NUM >= 130000 +#define heap_open_compat(r, l) table_open((r), (l)) +#define heap_openrv_compat(r, l) table_openrv((r), (l)) +#define heap_close_compat(r, l) table_close((r), (l)) +#else +#define heap_open_compat(r, l) heap_open((r), (l)) +#define heap_openrv_compat(r, l) heap_openrv((r), (l)) +#define heap_close_compat(r, l) heap_close((r), (l)) +#endif + +/* + * convert_tuples_by_name() + * In >=13 msg parameter in convert_tuples_by_name function was removed (fe66125974c) + */ +#if PG_VERSION_NUM >= 130000 +#define convert_tuples_by_name_compat(i, o, m) convert_tuples_by_name((i), (o)) +#else +#define convert_tuples_by_name_compat(i, o, m) convert_tuples_by_name((i), (o), (m)) +#endif #endif /* PG_COMPAT_H */ diff --git a/src/include/hooks.h b/src/include/hooks.h index adf96d37..49d7e8f1 100644 --- a/src/include/hooks.h +++ b/src/include/hooks.h @@ -3,7 +3,7 @@ * hooks.h * prototypes of rel_pathlist and join_pathlist hooks * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional * * ------------------------------------------------------------------------ */ @@ -45,6 +45,9 @@ void pathman_rel_pathlist_hook(PlannerInfo *root, void pathman_enable_assign_hook(bool newval, void *extra); PlannedStmt * pathman_planner_hook(Query *parse, +#if PG_VERSION_NUM >= 130000 + const char *query_string, +#endif int cursorOptions, ParamListInfo boundParams); @@ -55,7 +58,15 @@ void pathman_shmem_startup_hook(void); void pathman_relcache_hook(Datum arg, Oid relid); -#if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 130000 +void pathman_process_utility_hook(PlannedStmt *pstmt, + const char *queryString, + ProcessUtilityContext context, + ParamListInfo params, + QueryEnvironment *queryEnv, + DestReceiver *dest, + QueryCompletion *qc); +#elif PG_VERSION_NUM >= 100000 void pathman_process_utility_hook(PlannedStmt *pstmt, const char *queryString, ProcessUtilityContext context, diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index 0b32e575..233054b7 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -3,7 +3,7 @@ * partition_filter.h * Select partition for INSERT operation * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional * * ------------------------------------------------------------------------ */ @@ -31,7 +31,13 @@ #define ERR_PART_ATTR_NULL "partitioning expression's value should not be NULL" #define ERR_PART_ATTR_NO_PART "no suitable partition for key '%s'" #define ERR_PART_ATTR_MULTIPLE INSERT_NODE_NAME " selected more than one partition" +#if PG_VERSION_NUM < 130000 +/* + * In >=13 msg parameter in convert_tuples_by_name function was removed (fe66125974c) + * and ERR_PART_DESC_CONVERT become unusable + */ #define ERR_PART_DESC_CONVERT "could not convert row type for partition" +#endif /* diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 80b92740..a42bf727 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -3,7 +3,7 @@ * relation_info.h * Data structures describing partitioned relations * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional * * ------------------------------------------------------------------------ */ @@ -309,9 +309,14 @@ PrelExpressionForRelid(const PartRelationInfo *prel, Index rti) return expr; } +#if PG_VERSION_NUM >= 130000 +AttrMap *PrelExpressionAttributesMap(const PartRelationInfo *prel, + TupleDesc source_tupdesc); +#else AttrNumber *PrelExpressionAttributesMap(const PartRelationInfo *prel, TupleDesc source_tupdesc, int *map_length); +#endif /* PartType wrappers */ diff --git a/src/init.c b/src/init.c index bd85c593..86e96ebe 100644 --- a/src/init.c +++ b/src/init.c @@ -3,7 +3,7 @@ * init.c * Initialization functions * - * Copyright (c) 2015-2016, Postgres Professional + * Copyright (c) 2015-2020, Postgres Professional * * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California @@ -470,7 +470,7 @@ find_inheritance_children_array(Oid parent_relid, */ ArrayAlloc(oidarr, maxoids, numoids, 32); - relation = heap_open(InheritsRelationId, AccessShareLock); + relation = heap_open_compat(InheritsRelationId, AccessShareLock); ScanKeyInit(&key[0], Anum_pg_inherits_inhparent, @@ -490,7 +490,7 @@ find_inheritance_children_array(Oid parent_relid, systable_endscan(scan); - heap_close(relation, AccessShareLock); + heap_close_compat(relation, AccessShareLock); /* * If we found more than one child, sort them by OID. This ensures @@ -655,7 +655,7 @@ pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, ObjectIdGetDatum(relid)); /* Open PATHMAN_CONFIG with latest snapshot available */ - rel = heap_open(get_pathman_config_relid(false), AccessShareLock); + rel = heap_open_compat(get_pathman_config_relid(false), AccessShareLock); /* Check that 'partrel' column is of regclass type */ Assert(TupleDescAttr(RelationGetDescr(rel), @@ -703,7 +703,7 @@ pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, heap_endscan(scan); #endif UnregisterSnapshot(snapshot); - heap_close(rel, AccessShareLock); + heap_close_compat(rel, AccessShareLock); elog(DEBUG2, "PATHMAN_CONFIG %s relation %u", (contains_rel ? "contains" : "doesn't contain"), relid); @@ -734,7 +734,7 @@ read_pathman_params(Oid relid, Datum *values, bool *isnull) BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(relid)); - rel = heap_open(get_pathman_config_params_relid(false), AccessShareLock); + rel = heap_open_compat(get_pathman_config_params_relid(false), AccessShareLock); snapshot = RegisterSnapshot(GetLatestSnapshot()); #if PG_VERSION_NUM >= 120000 scan = table_beginscan(rel, snapshot, 1, key); @@ -764,7 +764,7 @@ read_pathman_params(Oid relid, Datum *values, bool *isnull) heap_endscan(scan); #endif UnregisterSnapshot(snapshot); - heap_close(rel, AccessShareLock); + heap_close_compat(rel, AccessShareLock); return row_found; } @@ -1118,7 +1118,7 @@ get_plpgsql_frontend_version(void) char *version_cstr; /* Look up the extension */ - pg_extension_rel = heap_open(ExtensionRelationId, AccessShareLock); + pg_extension_rel = heap_open_compat(ExtensionRelationId, AccessShareLock); ScanKeyInit(&skey, Anum_pg_extension_extname, @@ -1143,7 +1143,7 @@ get_plpgsql_frontend_version(void) version_cstr = text_to_cstring(DatumGetTextPP(datum)); systable_endscan(scan); - heap_close(pg_extension_rel, AccessShareLock); + heap_close_compat(pg_extension_rel, AccessShareLock); return build_semver_uint32(version_cstr); } diff --git a/src/nodes_common.c b/src/nodes_common.c index cf273fe6..c2a02649 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -3,7 +3,7 @@ * nodes_common.c * Common code for custom nodes * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional * * ------------------------------------------------------------------------ */ @@ -364,11 +364,19 @@ canonicalize_custom_exprs_mutator(Node *node, void *cxt) Var *var = palloc(sizeof(Var)); *var = *(Var *) node; +#if PG_VERSION_NUM >= 130000 +/* + * In >=13 (9ce77d75c5) varnoold and varoattno were changed to varnosyn and + * varattnosyn, and they are not consulted in _equalVar anymore. + */ + var->varattno = var->varattnosyn; +#else /* Replace original 'varnoold' */ var->varnoold = INDEX_VAR; /* Restore original 'varattno' */ var->varattno = var->varoattno; +#endif return (Node *) var; } @@ -822,9 +830,18 @@ explain_append_common(CustomScanState *node, char *exprstr; /* Set up deparsing context */ +#if PG_VERSION_NUM >= 130000 +/* + * Since 6ef77cf46e8 + */ + deparse_context = set_deparse_context_plan(es->deparse_cxt, + node->ss.ps.plan, + ancestors); +#else deparse_context = set_deparse_context_planstate(es->deparse_cxt, (Node *) node, ancestors); +#endif /* Deparse the expression */ exprstr = deparse_expression((Node *) make_ands_explicit(custom_exprs), diff --git a/src/partition_creation.c b/src/partition_creation.c index cd2a7b82..c7a944a1 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -3,7 +3,7 @@ * partition_creation.c * Various functions for partition creation. * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional * *------------------------------------------------------------------------- */ @@ -42,6 +42,9 @@ #include "parser/parse_utilcmd.h" #include "parser/parse_relation.h" #include "tcop/utility.h" +#if PG_VERSION_NUM >= 130000 +#include "utils/acl.h" +#endif #include "utils/builtins.h" #include "utils/datum.h" #include "utils/fmgroids.h" @@ -247,11 +250,11 @@ create_single_partition_common(Oid parent_relid, Relation child_relation; /* Open the relation and add new check constraint & fkeys */ - child_relation = heap_open(partition_relid, AccessExclusiveLock); + child_relation = heap_open_compat(partition_relid, AccessExclusiveLock); AddRelationNewConstraintsCompat(child_relation, NIL, list_make1(check_constraint), false, true, true); - heap_close(child_relation, NoLock); + heap_close_compat(child_relation, NoLock); /* Make constraint visible */ CommandCounterIncrement(); @@ -984,17 +987,17 @@ postprocess_child_table_and_atts(Oid parent_relid, Oid partition_relid) Snapshot snapshot; /* Both parent & partition have already been locked */ - parent_rel = heap_open(parent_relid, NoLock); - partition_rel = heap_open(partition_relid, NoLock); + parent_rel = heap_open_compat(parent_relid, NoLock); + partition_rel = heap_open_compat(partition_relid, NoLock); make_inh_translation_list(parent_rel, partition_rel, 0, &translated_vars); - heap_close(parent_rel, NoLock); - heap_close(partition_rel, NoLock); + heap_close_compat(parent_rel, NoLock); + heap_close_compat(partition_rel, NoLock); /* Open catalog's relations */ - pg_class_rel = heap_open(RelationRelationId, RowExclusiveLock); - pg_attribute_rel = heap_open(AttributeRelationId, RowExclusiveLock); + pg_class_rel = heap_open_compat(RelationRelationId, RowExclusiveLock); + pg_attribute_rel = heap_open_compat(AttributeRelationId, RowExclusiveLock); /* Get most recent snapshot */ snapshot = RegisterSnapshot(GetLatestSnapshot()); @@ -1165,8 +1168,8 @@ postprocess_child_table_and_atts(Oid parent_relid, Oid partition_relid) /* Don't forget to free snapshot */ UnregisterSnapshot(snapshot); - heap_close(pg_class_rel, RowExclusiveLock); - heap_close(pg_attribute_rel, RowExclusiveLock); + heap_close_compat(pg_class_rel, RowExclusiveLock); + heap_close_compat(pg_attribute_rel, RowExclusiveLock); } /* Copy foreign keys of parent table (updates pg_class) */ @@ -1235,7 +1238,7 @@ copy_rel_options(Oid parent_relid, Oid partition_relid) bool isnull[Natts_pg_class], replace[Natts_pg_class] = { false }; - pg_class_rel = heap_open(RelationRelationId, RowExclusiveLock); + pg_class_rel = heap_open_compat(RelationRelationId, RowExclusiveLock); parent_htup = SearchSysCache1(RELOID, ObjectIdGetDatum(parent_relid)); partition_htup = SearchSysCache1(RELOID, ObjectIdGetDatum(partition_relid)); @@ -1273,7 +1276,7 @@ copy_rel_options(Oid parent_relid, Oid partition_relid) ReleaseSysCache(parent_htup); ReleaseSysCache(partition_htup); - heap_close(pg_class_rel, RowExclusiveLock); + heap_close_compat(pg_class_rel, RowExclusiveLock); /* Make changes visible */ CommandCounterIncrement(); @@ -1291,15 +1294,21 @@ void drop_pathman_check_constraint(Oid relid) { char *constr_name; +#if PG_VERSION_NUM >= 130000 + List *cmds; +#else AlterTableStmt *stmt; +#endif AlterTableCmd *cmd; /* Build a correct name for this constraint */ constr_name = build_check_constraint_name_relid_internal(relid); +#if PG_VERSION_NUM < 130000 stmt = makeNode(AlterTableStmt); stmt->relation = makeRangeVarFromRelid(relid); stmt->relkind = OBJECT_TABLE; +#endif cmd = makeNode(AlterTableCmd); cmd->subtype = AT_DropConstraint; @@ -1307,23 +1316,35 @@ drop_pathman_check_constraint(Oid relid) cmd->behavior = DROP_RESTRICT; cmd->missing_ok = true; +#if PG_VERSION_NUM >= 130000 + cmds = list_make1(cmd); + + /* + * Since 1281a5c907b AlterTable() was changed. + * recurse = true (see stmt->relation->inh makeRangeVarFromRelid() makeRangeVar()) + * Dropping constraint won't do parse analyze, so AlterTableInternal + * is enough. + */ + AlterTableInternal(relid, cmds, true); +#else stmt->cmds = list_make1(cmd); /* See function AlterTableGetLockLevel() */ AlterTable(relid, AccessExclusiveLock, stmt); +#endif } /* Add pg_pathman's check constraint using 'relid' */ void add_pathman_check_constraint(Oid relid, Constraint *constraint) { - Relation part_rel = heap_open(relid, AccessExclusiveLock); + Relation part_rel = heap_open_compat(relid, AccessExclusiveLock); AddRelationNewConstraintsCompat(part_rel, NIL, list_make1(constraint), false, true, true); - heap_close(part_rel, NoLock); + heap_close_compat(part_rel, NoLock); } diff --git a/src/partition_filter.c b/src/partition_filter.c index f6cb5b60..3808dc26 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -3,7 +3,7 @@ * partition_filter.c * Select partition for INSERT operation * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional * * ------------------------------------------------------------------------ */ @@ -233,7 +233,7 @@ fini_result_parts_storage(ResultPartsStorage *parts_storage) { ExecCloseIndices(rri_holder->result_rel_info); /* And relation itself */ - heap_close(rri_holder->result_rel_info->ri_RelationDesc, + heap_close_compat(rri_holder->result_rel_info->ri_RelationDesc, NoLock); } @@ -307,7 +307,7 @@ scan_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid) base_rel = parts_storage->base_rri->ri_RelationDesc; /* Open child relation and check if it is a valid target */ - child_rel = heap_open(partid, NoLock); + child_rel = heap_open_compat(partid, NoLock); /* Build Var translation list for 'inserted_cols' */ make_inh_translation_list(base_rel, child_rel, 0, &translated_vars); @@ -450,7 +450,7 @@ build_part_tuple_map(Relation base_rel, Relation child_rel) parent_tupdesc->tdtypeid = InvalidOid; /* Generate tuple transformation map and some other stuff */ - tuple_map = convert_tuples_by_name(parent_tupdesc, + tuple_map = convert_tuples_by_name_compat(parent_tupdesc, child_tupdesc, ERR_PART_DESC_CONVERT); @@ -592,6 +592,10 @@ select_partition_for_insert(ResultPartsStorage *parts_storage, return result; } +/* + * Since 13 (e1551f96e64) AttrNumber[] and map_length was combined + * into one struct AttrMap + */ static ExprState * prepare_expr_state(const PartRelationInfo *prel, Relation source_rel, @@ -610,26 +614,44 @@ prepare_expr_state(const PartRelationInfo *prel, /* Should we try using map? */ if (PrelParentRelid(prel) != RelationGetRelid(source_rel)) { +#if PG_VERSION_NUM >= 130000 + AttrMap *map; +#else AttrNumber *map; int map_length; +#endif TupleDesc source_tupdesc = RelationGetDescr(source_rel); /* Remap expression attributes for source relation */ +#if PG_VERSION_NUM >= 130000 + map = PrelExpressionAttributesMap(prel, source_tupdesc); +#else map = PrelExpressionAttributesMap(prel, source_tupdesc, &map_length); +#endif if (map) { bool found_whole_row; +#if PG_VERSION_NUM >= 130000 + expr = map_variable_attnos(expr, PART_EXPR_VARNO, 0, map, + InvalidOid, + &found_whole_row); +#else expr = map_variable_attnos_compat(expr, PART_EXPR_VARNO, 0, map, map_length, InvalidOid, &found_whole_row); +#endif if (found_whole_row) elog(ERROR, "unexpected whole-row reference" " found in partition key"); +#if PG_VERSION_NUM >= 130000 + free_attrmap(map); +#else pfree(map); +#endif } } @@ -1073,7 +1095,11 @@ prepare_rri_fdw_for_insert(ResultRelInfoHolder *rri_holder, /* HACK: plan a fake query for FDW access to be planned as well */ elog(DEBUG1, "FDW(%u): plan fake query for fdw_private", partid); +#if PG_VERSION_NUM >= 130000 + plan = standard_planner(&query, NULL, 0, NULL); +#else plan = standard_planner(&query, 0, NULL); +#endif /* HACK: create a fake PlanState */ memset(&pstate, 0, sizeof(PlanState)); @@ -1147,7 +1173,11 @@ fix_returning_list_mutator(Node *node, void *state) for (i = 0; i < rri_holder->tuple_map->outdesc->natts; i++) { /* Good, 'varattno' of parent is child's 'i+1' */ +#if PG_VERSION_NUM >= 130000 + if (var->varattno == rri_holder->tuple_map->attrMap->attnums[i]) +#else if (var->varattno == rri_holder->tuple_map->attrMap[i]) +#endif { var->varattno = i + 1; /* attnos begin with 1 */ found_mapping = true; @@ -1189,19 +1219,25 @@ append_rte_to_estate(EState *estate, RangeTblEntry *rte, Relation child_rel) /* Update estate_mod_data */ emd_struct->estate_not_modified = false; +#if PG_VERSION_NUM >= 120000 + estate->es_range_table_size = list_length(estate->es_range_table); +#endif +#if PG_VERSION_NUM >= 120000 && PG_VERSION_NUM < 130000 /* - * On PG >= 12, also add rte to es_range_table_array. This is horribly + * On PG = 12, also add rte to es_range_table_array. This is horribly * inefficient, yes. - * At least in 12 es_range_table_array ptr is not saved anywhere in + * In 12 es_range_table_array ptr is not saved anywhere in * core, so it is safe to repalloc. + * + * In >= 13 (3c92658) es_range_table_array was removed */ -#if PG_VERSION_NUM >= 120000 - estate->es_range_table_size = list_length(estate->es_range_table); estate->es_range_table_array = (RangeTblEntry **) repalloc(estate->es_range_table_array, estate->es_range_table_size * sizeof(RangeTblEntry *)); estate->es_range_table_array[estate->es_range_table_size - 1] = rte; +#endif +#if PG_VERSION_NUM >= 120000 /* * Also reallocate es_relations, because es_range_table_size defines its * len. This also ensures ExecEndPlan will close the rel. diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 285a130f..e3a46abd 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -400,7 +400,7 @@ get_pathman_schema(void) BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(ext_oid)); - rel = heap_open(ExtensionRelationId, AccessShareLock); + rel = heap_open_compat(ExtensionRelationId, AccessShareLock); scandesc = systable_beginscan(rel, ExtensionOidIndexId, true, NULL, 1, entry); @@ -414,7 +414,7 @@ get_pathman_schema(void) systable_endscan(scandesc); - heap_close(rel, AccessShareLock); + heap_close_compat(rel, AccessShareLock); return result; } @@ -483,7 +483,7 @@ append_child_relation(PlannerInfo *root, parent_rte = root->simple_rte_array[parent_rti]; /* Open child relation (we've just locked it) */ - child_relation = heap_open(child_oid, NoLock); + child_relation = heap_open_compat(child_oid, NoLock); /* Create RangeTblEntry for child relation */ child_rte = copyObject(parent_rte); @@ -678,7 +678,7 @@ append_child_relation(PlannerInfo *root, } /* Close child relations, but keep locks */ - heap_close(child_relation, NoLock); + heap_close_compat(child_relation, NoLock); return child_rti; } diff --git a/src/pl_funcs.c b/src/pl_funcs.c index ebf80861..76ecbe3d 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -3,7 +3,7 @@ * pl_funcs.c * Utility C functions for stored procedures * - * Copyright (c) 2015-2016, Postgres Professional + * Copyright (c) 2015-2020, Postgres Professional * * ------------------------------------------------------------------------ */ @@ -367,6 +367,9 @@ show_cache_stats_internal(PG_FUNCTION_ARGS) /* * List all existing partitions and their parents. + * + * In >=13 (bc8393cf277) struct SPITupleTable was changed + * (free removed and numvals added) */ Datum show_partition_list_internal(PG_FUNCTION_ARGS) @@ -389,7 +392,7 @@ show_partition_list_internal(PG_FUNCTION_ARGS) usercxt = (show_partition_list_cxt *) palloc(sizeof(show_partition_list_cxt)); /* Open PATHMAN_CONFIG with latest snapshot available */ - usercxt->pathman_config = heap_open(get_pathman_config_relid(false), + usercxt->pathman_config = heap_open_compat(get_pathman_config_relid(false), AccessShareLock); usercxt->snapshot = RegisterSnapshot(GetLatestSnapshot()); #if PG_VERSION_NUM >= 120000 @@ -433,7 +436,12 @@ show_partition_list_internal(PG_FUNCTION_ARGS) tuptable->tuptabcxt = tuptab_mcxt; /* Set up initial allocations */ +#if PG_VERSION_NUM >= 130000 + tuptable->alloced = PART_RELS_SIZE * CHILD_FACTOR; + tuptable->numvals = 0; +#else tuptable->alloced = tuptable->free = PART_RELS_SIZE * CHILD_FACTOR; +#endif tuptable->vals = (HeapTuple *) palloc(tuptable->alloced * sizeof(HeapTuple)); MemoryContextSwitchTo(old_mcxt); @@ -549,20 +557,34 @@ show_partition_list_internal(PG_FUNCTION_ARGS) /* Form output tuple */ htup = heap_form_tuple(funccxt->tuple_desc, values, isnull); +#if PG_VERSION_NUM >= 130000 + if (tuptable->numvals == tuptable->alloced) +#else if (tuptable->free == 0) +#endif { /* Double the size of the pointer array */ +#if PG_VERSION_NUM >= 130000 + tuptable->alloced += tuptable->alloced; +#else tuptable->free = tuptable->alloced; tuptable->alloced += tuptable->free; +#endif tuptable->vals = (HeapTuple *) repalloc_huge(tuptable->vals, tuptable->alloced * sizeof(HeapTuple)); } +#if PG_VERSION_NUM >= 130000 + /* Add tuple to table and increase 'numvals' */ + tuptable->vals[tuptable->numvals] = htup; + (tuptable->numvals)++; +#else /* Add tuple to table and decrement 'free' */ tuptable->vals[tuptable->alloced - tuptable->free] = htup; (tuptable->free)--; +#endif MemoryContextSwitchTo(old_mcxt); @@ -577,7 +599,7 @@ show_partition_list_internal(PG_FUNCTION_ARGS) heap_endscan(usercxt->pathman_config_scan); #endif UnregisterSnapshot(usercxt->snapshot); - heap_close(usercxt->pathman_config, AccessShareLock); + heap_close_compat(usercxt->pathman_config, AccessShareLock); usercxt->child_number = 0; } @@ -587,7 +609,11 @@ show_partition_list_internal(PG_FUNCTION_ARGS) tuptable = usercxt->tuptable; /* Iterate through used slots */ +#if PG_VERSION_NUM >= 130000 + if (usercxt->child_number < tuptable->numvals) +#else if (usercxt->child_number < (tuptable->alloced - tuptable->free)) +#endif { HeapTuple htup = usercxt->tuptable->vals[usercxt->child_number++]; @@ -689,21 +715,34 @@ is_tuple_convertible(PG_FUNCTION_ARGS) { Relation rel1, rel2; +#if PG_VERSION_NUM >= 130000 + AttrMap *map; /* we don't actually need it */ +#else void *map; /* we don't actually need it */ +#endif - rel1 = heap_open(PG_GETARG_OID(0), AccessShareLock); - rel2 = heap_open(PG_GETARG_OID(1), AccessShareLock); + rel1 = heap_open_compat(PG_GETARG_OID(0), AccessShareLock); + rel2 = heap_open_compat(PG_GETARG_OID(1), AccessShareLock); /* Try to build a conversion map */ +#if PG_VERSION_NUM >= 130000 + map = build_attrmap_by_name(RelationGetDescr(rel1), + RelationGetDescr(rel2)); +#else map = convert_tuples_by_name_map(RelationGetDescr(rel1), RelationGetDescr(rel2), ERR_PART_DESC_CONVERT); +#endif /* Now free map */ +#if PG_VERSION_NUM >= 130000 + free_attrmap(map); +#else pfree(map); +#endif - heap_close(rel1, AccessShareLock); - heap_close(rel2, AccessShareLock); + heap_close_compat(rel1, AccessShareLock); + heap_close_compat(rel2, AccessShareLock); /* still return true to avoid changing tests */ PG_RETURN_BOOL(true); @@ -852,12 +891,12 @@ add_to_pathman_config(PG_FUNCTION_ARGS) isnull[Anum_pathman_config_expr - 1] = false; /* Insert new row into PATHMAN_CONFIG */ - pathman_config = heap_open(get_pathman_config_relid(false), RowExclusiveLock); + pathman_config = heap_open_compat(get_pathman_config_relid(false), RowExclusiveLock); htup = heap_form_tuple(RelationGetDescr(pathman_config), values, isnull); CatalogTupleInsert(pathman_config, htup); - heap_close(pathman_config, RowExclusiveLock); + heap_close_compat(pathman_config, RowExclusiveLock); /* Make changes visible */ CommandCounterIncrement(); diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 27361dd3..12c247ab 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -3,7 +3,7 @@ * pl_range_funcs.c * Utility C functions for stored RANGE procedures * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional * * ------------------------------------------------------------------------ */ @@ -1320,12 +1320,18 @@ modify_range_constraint(Oid partition_relid, /* * Transform constraint into cstring + * + * In >=13 (5815696bc66) result type of addRangeTableEntryForRelationCompat() was changed */ static char * deparse_constraint(Oid relid, Node *expr) { Relation rel; +#if PG_VERSION_NUM >= 130000 + ParseNamespaceItem *nsitem; +#else RangeTblEntry *rte; +#endif Node *cooked_expr; ParseState *pstate; List *context; @@ -1333,12 +1339,17 @@ deparse_constraint(Oid relid, Node *expr) context = deparse_context_for(get_rel_name(relid), relid); - rel = heap_open(relid, NoLock); + rel = heap_open_compat(relid, NoLock); /* Initialize parse state */ pstate = make_parsestate(NULL); +#if PG_VERSION_NUM >= 130000 + nsitem = addRangeTableEntryForRelationCompat(pstate, rel, AccessShareLock, NULL, false, true); + addNSItemToQuery(pstate, nsitem, true, true, true); +#else rte = addRangeTableEntryForRelationCompat(pstate, rel, AccessShareLock, NULL, false, true); addRTEtoQuery(pstate, rte, true, true, true); +#endif /* Transform constraint into executable expression (i.e. cook it) */ cooked_expr = transformExpr(pstate, expr, EXPR_KIND_CHECK_CONSTRAINT); @@ -1346,7 +1357,7 @@ deparse_constraint(Oid relid, Node *expr) /* Transform expression into string */ result = deparse_expression(cooked_expr, context, false, false); - heap_close(rel, NoLock); + heap_close_compat(rel, NoLock); return result; } diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 6fc55c7b..77a55bd3 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -3,7 +3,7 @@ * planner_tree_modification.c * Functions for query- and plan- tree modification * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * @@ -588,8 +588,8 @@ handle_modification_query(Query *parse, transform_query_cxt *context) rte->inh = false; /* Both tables are already locked */ - child_rel = heap_open(child, NoLock); - parent_rel = heap_open(parent, NoLock); + child_rel = heap_open_compat(child, NoLock); + parent_rel = heap_open_compat(parent, NoLock); make_inh_translation_list(parent_rel, child_rel, 0, &translated_vars); @@ -611,8 +611,8 @@ handle_modification_query(Query *parse, transform_query_cxt *context) } /* Close relations (should remain locked, though) */ - heap_close(child_rel, NoLock); - heap_close(parent_rel, NoLock); + heap_close_compat(child_rel, NoLock); + heap_close_compat(parent_rel, NoLock); } } @@ -783,7 +783,7 @@ partition_filter_visitor(Plan *plan, void *context) if (lc3) { returning_list = lfirst(lc3); - lc3 = lnext(lc3); + lc3 = lnext_compat(modify_table->returningLists, lc3); } lfirst(lc1) = make_partition_filter((Plan *) lfirst(lc1), relid, @@ -849,7 +849,7 @@ partition_router_visitor(Plan *plan, void *context) if (lc3) { returning_list = lfirst(lc3); - lc3 = lnext(lc3); + lc3 = lnext_compat(modify_table->returningLists, lc3); } prouter = make_partition_router((Plan *) lfirst(lc1), diff --git a/src/rangeset.c b/src/rangeset.c index 15bb5849..9f7b2aa1 100644 --- a/src/rangeset.c +++ b/src/rangeset.c @@ -3,11 +3,12 @@ * rangeset.c * IndexRange functions * - * Copyright (c) 2015-2016, Postgres Professional + * Copyright (c) 2015-2020, Postgres Professional * * ------------------------------------------------------------------------ */ +#include "compat/pg_compat.h" #include "rangeset.h" @@ -238,25 +239,25 @@ irange_list_union(List *a, List *b) if (irange_lower(lfirst_irange(ca)) <= irange_lower(lfirst_irange(cb))) { next = lfirst_irange(ca); - ca = lnext(ca); /* move to next cell */ + ca = lnext_compat(a, ca); /* move to next cell */ } else { next = lfirst_irange(cb); - cb = lnext(cb); /* move to next cell */ + cb = lnext_compat(b, cb); /* move to next cell */ } } /* Fetch next irange from A */ else if (ca) { next = lfirst_irange(ca); - ca = lnext(ca); /* move to next cell */ + ca = lnext_compat(a, ca); /* move to next cell */ } /* Fetch next irange from B */ else if (cb) { next = lfirst_irange(cb); - cb = lnext(cb); /* move to next cell */ + cb = lnext_compat(b, cb); /* move to next cell */ } /* Put this irange to 'cur' if don't have it yet */ @@ -339,9 +340,9 @@ irange_list_intersection(List *a, List *b) * irange is greater (or equal) to upper bound of current. */ if (irange_upper(ra) <= irange_upper(rb)) - ca = lnext(ca); + ca = lnext_compat(a, ca); if (irange_upper(ra) >= irange_upper(rb)) - cb = lnext(cb); + cb = lnext_compat(b, cb); } return result; } diff --git a/src/relation_info.c b/src/relation_info.c index 0c79b504..df60dde3 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -3,7 +3,7 @@ * relation_info.c * Data structures describing partitioned relations * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional * * ------------------------------------------------------------------------ */ @@ -925,16 +925,26 @@ shout_if_prel_is_invalid(const Oid parent_oid, * This is a simplified version of functions that return TupleConversionMap. * It should be faster if expression uses a few fields of relation. */ +#if PG_VERSION_NUM >= 130000 +AttrMap * +PrelExpressionAttributesMap(const PartRelationInfo *prel, + TupleDesc source_tupdesc) +#else AttrNumber * PrelExpressionAttributesMap(const PartRelationInfo *prel, TupleDesc source_tupdesc, int *map_length) +#endif { Oid parent_relid = PrelParentRelid(prel); int source_natts = source_tupdesc->natts, expr_natts = 0; - AttrNumber *result, - i; +#if PG_VERSION_NUM >= 130000 + AttrMap *result; +#else + AttrNumber *result; +#endif + AttrNumber i; bool is_trivial = true; /* Get largest attribute number used in expression */ @@ -942,8 +952,12 @@ PrelExpressionAttributesMap(const PartRelationInfo *prel, while ((i = bms_next_member(prel->expr_atts, i)) >= 0) expr_natts = i; +#if PG_VERSION_NUM >= 130000 + result = make_attrmap(expr_natts); +#else /* Allocate array for map */ result = (AttrNumber *) palloc0(expr_natts * sizeof(AttrNumber)); +#endif /* Find a match for each attribute */ i = -1; @@ -964,26 +978,44 @@ PrelExpressionAttributesMap(const PartRelationInfo *prel, if (strcmp(NameStr(att->attname), attname) == 0) { +#if PG_VERSION_NUM >= 130000 + result->attnums[attnum - 1] = (AttrNumber) (j + 1); +#else result[attnum - 1] = (AttrNumber) (j + 1); +#endif break; } } +#if PG_VERSION_NUM >= 130000 + if (result->attnums[attnum - 1] == 0) +#else if (result[attnum - 1] == 0) +#endif elog(ERROR, "cannot find column \"%s\" in child relation", attname); +#if PG_VERSION_NUM >= 130000 + if (result->attnums[attnum - 1] != attnum) +#else if (result[attnum - 1] != attnum) +#endif is_trivial = false; } /* Check if map is trivial */ if (is_trivial) { +#if PG_VERSION_NUM >= 130000 + free_attrmap(result); +#else pfree(result); +#endif return NULL; } +#if PG_VERSION_NUM < 130000 *map_length = expr_natts; +#endif return result; } @@ -1330,7 +1362,7 @@ get_parent_of_partition(Oid partition) HeapTuple htup; Oid parent = InvalidOid; - relation = heap_open(InheritsRelationId, AccessShareLock); + relation = heap_open_compat(InheritsRelationId, AccessShareLock); ScanKeyInit(&key[0], Anum_pg_inherits_inhrelid, @@ -1359,7 +1391,7 @@ get_parent_of_partition(Oid partition) } systable_endscan(scan); - heap_close(relation, AccessShareLock); + heap_close_compat(relation, AccessShareLock); return parent; } diff --git a/src/runtime_merge_append.c b/src/runtime_merge_append.c index 92ae3e60..601c663f 100644 --- a/src/runtime_merge_append.c +++ b/src/runtime_merge_append.c @@ -3,7 +3,7 @@ * runtime_merge_append.c * RuntimeMergeAppend node's function definitions and global variables * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * @@ -898,9 +898,15 @@ show_sort_group_keys(PlanState *planstate, const char *qlabel, initStringInfo(&sortkeybuf); /* Set up deparsing context */ +#if PG_VERSION_NUM >= 130000 + context = set_deparse_context_plan(es->deparse_cxt, + plan, + ancestors); +#else context = set_deparse_context_planstate(es->deparse_cxt, (Node *) planstate, ancestors); +#endif useprefix = (list_length(es->rtable) > 1 || es->verbose); for (keyno = 0; keyno < nkeys; keyno++) diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 2b5a5956..c9ffbf14 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -4,7 +4,7 @@ * Override COPY TO/FROM and ALTER TABLE ... RENAME statements * for partitioned tables * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * @@ -401,7 +401,7 @@ PathmanDoCopy(const CopyStmt *stmt, Assert(!stmt->query); /* Open the relation (we've locked it in is_pathman_related_copy()) */ - rel = heap_openrv(stmt->relation, NoLock); + rel = heap_openrv_compat(stmt->relation, NoLock); rte = makeNode(RangeTblEntry); rte->rtekind = RTE_RELATION; @@ -468,7 +468,7 @@ PathmanDoCopy(const CopyStmt *stmt, } /* Close the relation, but keep it locked */ - heap_close(rel, (is_from ? NoLock : PATHMAN_COPY_READ_LOCK)); + heap_close_compat(rel, (is_from ? NoLock : PATHMAN_COPY_READ_LOCK)); } /* diff --git a/tests/cmocka/missing_basic.c b/tests/cmocka/missing_basic.c index 7524abb5..36d76160 100644 --- a/tests/cmocka/missing_basic.c +++ b/tests/cmocka/missing_basic.c @@ -16,6 +16,11 @@ repalloc(void *pointer, Size size) return realloc(pointer, size); } +void +pfree(void *pointer) +{ + free(pointer); +} void ExceptionalCondition(const char *conditionName, diff --git a/tests/cmocka/missing_list.c b/tests/cmocka/missing_list.c index 5ddce8a8..b85eed94 100644 --- a/tests/cmocka/missing_list.c +++ b/tests/cmocka/missing_list.c @@ -1,10 +1,10 @@ /*------------------------------------------------------------------------- * * list.c - * implementation for PostgreSQL generic linked list package + * implementation for PostgreSQL generic list package * * - * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -13,10 +13,11 @@ * *------------------------------------------------------------------------- */ -#define _GNU_SOURCE #include "postgres.h" + #include "nodes/pg_list.h" +#if PG_VERSION_NUM < 130000 #define IsPointerList(l) ((l) == NIL || IsA((l), List)) #define IsIntegerList(l) ((l) == NIL || IsA((l), IntList)) @@ -141,3 +142,306 @@ lcons(void *datum, List *list) return list; } + +#else /* PG_VERSION_NUM >= 130000 */ + +/*------------------------------------------------------------------------- + * + * This was taken from src/backend/nodes/list.c PostgreSQL-13 source code. + * We only need lappend() and lcons() and their dependencies. + * There is one change: we use palloc() instead MemoryContextAlloc() in + * enlarge_list() (see #defines). + * + *------------------------------------------------------------------------- + */ +#include "port/pg_bitutils.h" +#include "utils/memdebug.h" +#include "utils/memutils.h" + +#define MemoryContextAlloc(c, s) palloc(s) +#define GetMemoryChunkContext(l) 0 + +/* + * The previous List implementation, since it used a separate palloc chunk + * for each cons cell, had the property that adding or deleting list cells + * did not move the storage of other existing cells in the list. Quite a + * bit of existing code depended on that, by retaining ListCell pointers + * across such operations on a list. There is no such guarantee in this + * implementation, so instead we have debugging support that is meant to + * help flush out now-broken assumptions. Defining DEBUG_LIST_MEMORY_USAGE + * while building this file causes the List operations to forcibly move + * all cells in a list whenever a cell is added or deleted. In combination + * with MEMORY_CONTEXT_CHECKING and/or Valgrind, this can usually expose + * broken code. It's a bit expensive though, as there's many more palloc + * cycles and a lot more data-copying than in a default build. + * + * By default, we enable this when building for Valgrind. + */ +#ifdef USE_VALGRIND +#define DEBUG_LIST_MEMORY_USAGE +#endif + +/* Overhead for the fixed part of a List header, measured in ListCells */ +#define LIST_HEADER_OVERHEAD \ + ((int) ((offsetof(List, initial_elements) - 1) / sizeof(ListCell) + 1)) + +/* + * Macros to simplify writing assertions about the type of a list; a + * NIL list is considered to be an empty list of any type. + */ +#define IsPointerList(l) ((l) == NIL || IsA((l), List)) +#define IsIntegerList(l) ((l) == NIL || IsA((l), IntList)) +#define IsOidList(l) ((l) == NIL || IsA((l), OidList)) + +#ifdef USE_ASSERT_CHECKING +/* + * Check that the specified List is valid (so far as we can tell). + */ +static void +check_list_invariants(const List *list) +{ + if (list == NIL) + return; + + Assert(list->length > 0); + Assert(list->length <= list->max_length); + Assert(list->elements != NULL); + + Assert(list->type == T_List || + list->type == T_IntList || + list->type == T_OidList); +} +#else +#define check_list_invariants(l) ((void) 0) +#endif /* USE_ASSERT_CHECKING */ + +/* + * Return a freshly allocated List with room for at least min_size cells. + * + * Since empty non-NIL lists are invalid, new_list() sets the initial length + * to min_size, effectively marking that number of cells as valid; the caller + * is responsible for filling in their data. + */ +static List * +new_list(NodeTag type, int min_size) +{ + List *newlist; + int max_size; + + Assert(min_size > 0); + + /* + * We allocate all the requested cells, and possibly some more, as part of + * the same palloc request as the List header. This is a big win for the + * typical case of short fixed-length lists. It can lose if we allocate a + * moderately long list and then it gets extended; we'll be wasting more + * initial_elements[] space than if we'd made the header small. However, + * rounding up the request as we do in the normal code path provides some + * defense against small extensions. + */ + +#ifndef DEBUG_LIST_MEMORY_USAGE + + /* + * Normally, we set up a list with some extra cells, to allow it to grow + * without a repalloc. Prefer cell counts chosen to make the total + * allocation a power-of-2, since palloc would round it up to that anyway. + * (That stops being true for very large allocations, but very long lists + * are infrequent, so it doesn't seem worth special logic for such cases.) + * + * The minimum allocation is 8 ListCell units, providing either 4 or 5 + * available ListCells depending on the machine's word width. Counting + * palloc's overhead, this uses the same amount of space as a one-cell + * list did in the old implementation, and less space for any longer list. + * + * We needn't worry about integer overflow; no caller passes min_size + * that's more than twice the size of an existing list, so the size limits + * within palloc will ensure that we don't overflow here. + */ + max_size = pg_nextpower2_32(Max(8, min_size + LIST_HEADER_OVERHEAD)); + max_size -= LIST_HEADER_OVERHEAD; +#else + + /* + * For debugging, don't allow any extra space. This forces any cell + * addition to go through enlarge_list() and thus move the existing data. + */ + max_size = min_size; +#endif + + newlist = (List *) palloc(offsetof(List, initial_elements) + + max_size * sizeof(ListCell)); + newlist->type = type; + newlist->length = min_size; + newlist->max_length = max_size; + newlist->elements = newlist->initial_elements; + + return newlist; +} + +/* + * Enlarge an existing non-NIL List to have room for at least min_size cells. + * + * This does *not* update list->length, as some callers would find that + * inconvenient. (list->length had better be the correct number of existing + * valid cells, though.) + */ +static void +enlarge_list(List *list, int min_size) +{ + int new_max_len; + + Assert(min_size > list->max_length); /* else we shouldn't be here */ + +#ifndef DEBUG_LIST_MEMORY_USAGE + + /* + * As above, we prefer power-of-two total allocations; but here we need + * not account for list header overhead. + */ + + /* clamp the minimum value to 16, a semi-arbitrary small power of 2 */ + new_max_len = pg_nextpower2_32(Max(16, min_size)); + +#else + /* As above, don't allocate anything extra */ + new_max_len = min_size; +#endif + + if (list->elements == list->initial_elements) + { + /* + * Replace original in-line allocation with a separate palloc block. + * Ensure it is in the same memory context as the List header. (The + * previous List implementation did not offer any guarantees about + * keeping all list cells in the same context, but it seems reasonable + * to create such a guarantee now.) + */ + list->elements = (ListCell *) + MemoryContextAlloc(GetMemoryChunkContext(list), + new_max_len * sizeof(ListCell)); + memcpy(list->elements, list->initial_elements, + list->length * sizeof(ListCell)); + + /* + * We must not move the list header, so it's unsafe to try to reclaim + * the initial_elements[] space via repalloc. In debugging builds, + * however, we can clear that space and/or mark it inaccessible. + * (wipe_mem includes VALGRIND_MAKE_MEM_NOACCESS.) + */ +#ifdef CLOBBER_FREED_MEMORY + wipe_mem(list->initial_elements, + list->max_length * sizeof(ListCell)); +#else + VALGRIND_MAKE_MEM_NOACCESS(list->initial_elements, + list->max_length * sizeof(ListCell)); +#endif + } + else + { +#ifndef DEBUG_LIST_MEMORY_USAGE + /* Normally, let repalloc deal with enlargement */ + list->elements = (ListCell *) repalloc(list->elements, + new_max_len * sizeof(ListCell)); +#else + /* + * repalloc() might enlarge the space in-place, which we don't want + * for debugging purposes, so forcibly move the data somewhere else. + */ + ListCell *newelements; + + newelements = (ListCell *) + MemoryContextAlloc(GetMemoryChunkContext(list), + new_max_len * sizeof(ListCell)); + memcpy(newelements, list->elements, + list->length * sizeof(ListCell)); + pfree(list->elements); + list->elements = newelements; +#endif + } + + list->max_length = new_max_len; +} + +/* + * Make room for a new head cell in the given (non-NIL) list. + * + * The data in the new head cell is undefined; the caller should be + * sure to fill it in + */ +static void +new_head_cell(List *list) +{ + /* Enlarge array if necessary */ + if (list->length >= list->max_length) + enlarge_list(list, list->length + 1); + /* Now shove the existing data over */ + memmove(&list->elements[1], &list->elements[0], + list->length * sizeof(ListCell)); + list->length++; +} + +/* + * Make room for a new tail cell in the given (non-NIL) list. + * + * The data in the new tail cell is undefined; the caller should be + * sure to fill it in + */ +static void +new_tail_cell(List *list) +{ + /* Enlarge array if necessary */ + if (list->length >= list->max_length) + enlarge_list(list, list->length + 1); + list->length++; +} + +/* + * Append a pointer to the list. A pointer to the modified list is + * returned. Note that this function may or may not destructively + * modify the list; callers should always use this function's return + * value, rather than continuing to use the pointer passed as the + * first argument. + */ +List * +lappend(List *list, void *datum) +{ + Assert(IsPointerList(list)); + + if (list == NIL) + list = new_list(T_List, 1); + else + new_tail_cell(list); + + lfirst(list_tail(list)) = datum; + check_list_invariants(list); + return list; +} + +/* + * Prepend a new element to the list. A pointer to the modified list + * is returned. Note that this function may or may not destructively + * modify the list; callers should always use this function's return + * value, rather than continuing to use the pointer passed as the + * second argument. + * + * Caution: before Postgres 8.0, the original List was unmodified and + * could be considered to retain its separate identity. This is no longer + * the case. + */ +List * +lcons(void *datum, List *list) +{ + Assert(IsPointerList(list)); + + if (list == NIL) + list = new_list(T_List, 1); + else + new_head_cell(list); + + lfirst(list_head(list)) = datum; + check_list_invariants(list); + return list; +} + +#endif /* PG_VERSION_NUM */ diff --git a/tests/python/Makefile b/tests/python/Makefile index fed17cf3..8311bb12 100644 --- a/tests/python/Makefile +++ b/tests/python/Makefile @@ -1,6 +1,6 @@ partitioning_tests: ifneq ($(CASE),) - python3 partitioning_test.py Tests.$(CASE) + python3 -u partitioning_test.py Tests.$(CASE) else - python3 partitioning_test.py + python3 -u partitioning_test.py endif diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 0e3d1492..ad555455 100644 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -4,7 +4,7 @@ partitioning_test.py Various stuff that looks out of place in regression tests - Copyright (c) 2015-2017, Postgres Professional + Copyright (c) 2015-2020, Postgres Professional """ import functools @@ -21,10 +21,11 @@ import unittest from distutils.version import LooseVersion -from testgres import get_new_node, get_pg_version +from testgres import get_new_node, get_pg_version, configure_testgres -# set setup base logging config, it can be turned on by `use_logging` +# set setup base logging config, it can be turned on by `use_python_logging` # parameter on node setup +# configure_testgres(use_python_logging=True) import logging import logging.config @@ -548,7 +549,7 @@ def test_parallel_nodes(self): } ] """) - self.assertEqual(ordered(plan), ordered(expected)) + self.assertEqual(ordered(plan, skip_keys=['Subplans Removed']), ordered(expected)) # Check count of returned tuples count = con.execute( @@ -601,7 +602,7 @@ def test_parallel_nodes(self): } ] """) - self.assertEqual(ordered(plan), ordered(expected)) + self.assertEqual(ordered(plan, skip_keys=['Subplans Removed']), ordered(expected)) # Check tuples returned by query above res_tuples = con.execute( @@ -1128,4 +1129,8 @@ def make_updates(node, count): else: suite = unittest.TestLoader().loadTestsFromTestCase(Tests) - unittest.TextTestRunner(verbosity=2, failfast=True).run(suite) + configure_testgres(use_python_logging=True) + + result = unittest.TextTestRunner(verbosity=2, failfast=True).run(suite) + if not result.wasSuccessful(): + sys.exit(1) From e0171c8ef1b7d0b9143fa800364a60b083e33ef6 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Tue, 27 Oct 2020 17:50:38 +0300 Subject: [PATCH 446/528] Fix for CVE-2020-14350. - Explicit casts to ensure exact match to pathman functions instead of pwning ones. - Explicit use of @extschema@ and pg_catalog schemas where possible (except for operators). - Replace unsafe OR REPLACE clause. This is believed to remove the possibility of malicious internal functions overloading. For more information, see the documentation: 37.17.6.2. Security Considerations for Extension Scripts (https://www.postgresql.org/docs/current/extend-extensions.html#EXTEND-EXTENSIONS-SECURITY) 5.9.6. Usage Patterns (https://www.postgresql.org/docs/current/ddl-schemas.html#DDL-SCHEMAS-PATTERNS) --- Makefile | 3 +- README.md | 22 +++-- expected/pathman_CVE-2020-14350.out | 115 +++++++++++++++++++++ hash.sql | 22 ++--- init.sql | 148 ++++++++++++++-------------- range.sql | 100 +++++++++---------- sql/pathman_CVE-2020-14350.sql | 77 +++++++++++++++ src/partition_creation.c | 2 +- src/pathman_workers.c | 2 +- 9 files changed, 346 insertions(+), 145 deletions(-) create mode 100644 expected/pathman_CVE-2020-14350.out create mode 100644 sql/pathman_CVE-2020-14350.sql diff --git a/Makefile b/Makefile index c1281871..b198a6a1 100644 --- a/Makefile +++ b/Makefile @@ -61,7 +61,8 @@ REGRESS = pathman_array_qual \ pathman_update_triggers \ pathman_upd_del \ pathman_utility_stmt \ - pathman_views + pathman_views \ + pathman_CVE-2020-14350 EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add diff --git a/README.md b/README.md index b49c20ec..2f95a738 100644 --- a/README.md +++ b/README.md @@ -95,11 +95,19 @@ shared_preload_libraries = 'pg_pathman' It is essential to restart the PostgreSQL instance. After that, execute the following query in psql: ```plpgsql -CREATE EXTENSION pg_pathman; +CREATE SCHEMA pathman; +GRANT USAGE ON SCHEMA pathman TO PUBLIC; +CREATE EXTENSION pg_pathman WITH SCHEMA pathman; ``` Done! Now it's time to setup your partitioning schemes. +> **Security notice**: pg_pathman is believed to be secure against +search-path-based attacks mentioned in Postgres +[documentation](https://www.postgresql.org/docs/current/sql-createextension.html). However, +if *your* calls of pathman's functions doesn't exactly match the signature, they +might be vulnerable to malicious overloading. If in doubt, install pathman to clean schema where nobody except superusers have CREATE object permission to avoid problems. + > **Windows-specific**: pg_pathman imports several symbols (e.g. None_Receiver, InvalidObjectAddress) from PostgreSQL, which is fine by itself, but requires that those symbols are marked as `PGDLLIMPORT`. Unfortunately, some of them are not exported from vanilla PostgreSQL, which means that you have to either use Postgres Pro Standard/Enterprise (which includes all necessary patches), or patch and build your own distribution of PostgreSQL. ## How to update @@ -611,7 +619,7 @@ SELECT tableoid::regclass AS partition, * FROM partitioned_table; - All running concurrent partitioning tasks can be listed using the `pathman_concurrent_part_tasks` view: ```plpgsql SELECT * FROM pathman_concurrent_part_tasks; - userid | pid | dbid | relid | processed | status + userid | pid | dbid | relid | processed | status --------+------+-------+-------+-----------+--------- dmitry | 7367 | 16384 | test | 472000 | working (1 row) @@ -625,7 +633,7 @@ WHERE parent = 'part_test'::regclass AND range_min::int < 500; NOTICE: 1 rows copied from part_test_11 NOTICE: 100 rows copied from part_test_1 NOTICE: 100 rows copied from part_test_2 - drop_range_partition + drop_range_partition ---------------------- dummy_test_11 dummy_test_1 @@ -780,8 +788,8 @@ All sections and data will remain unchanged and will be handled by the standard Do not hesitate to post your issues, questions and new ideas at the [issues](https://github.com/postgrespro/pg_pathman/issues) page. ## Authors -[Ildar Musin](https://github.com/zilder) -Alexander Korotkov Postgres Professional Ltd., Russia -[Dmitry Ivanov](https://github.com/funbringer) -Maksim Milyutin Postgres Professional Ltd., Russia +[Ildar Musin](https://github.com/zilder) +Alexander Korotkov Postgres Professional Ltd., Russia +[Dmitry Ivanov](https://github.com/funbringer) +Maksim Milyutin Postgres Professional Ltd., Russia [Ildus Kurbangaliev](https://github.com/ildus) diff --git a/expected/pathman_CVE-2020-14350.out b/expected/pathman_CVE-2020-14350.out new file mode 100644 index 00000000..c91a280f --- /dev/null +++ b/expected/pathman_CVE-2020-14350.out @@ -0,0 +1,115 @@ +/* + * Check fix for CVE-2020-14350. + * See also 7eeb1d986 postgresql commit. + */ +SET client_min_messages = 'warning'; +DROP FUNCTION IF EXISTS _partition_data_concurrent(oid,integer); +DROP FUNCTION IF EXISTS create_single_range_partition(TEXT,ANYELEMENT,ANYELEMENT,TEXT); +DROP TABLE IF EXISTS test1 CASCADE; +DROP TABLE IF EXISTS test2 CASCADE; +DROP ROLE IF EXISTS regress_hacker; +SET client_min_messages = 'notice'; +CREATE EXTENSION pg_pathman; +CREATE ROLE regress_hacker LOGIN; +-- Test 1 +RESET ROLE; +ALTER ROLE regress_hacker NOSUPERUSER; +SET ROLE regress_hacker; +SHOW is_superuser; + is_superuser +-------------- + off +(1 row) + +CREATE FUNCTION _partition_data_concurrent(relation oid, p_limit INT, OUT p_total BIGINT) +RETURNS bigint +AS $$ +BEGIN + ALTER ROLE regress_hacker SUPERUSER; + SELECT _partition_data_concurrent(relation, NULL::text, NULL::text, p_limit) INTO p_total; +END +$$ LANGUAGE plpgsql; +CREATE TABLE test1(i INT4 NOT NULL); +INSERT INTO test1 SELECT generate_series(1, 500); +SELECT create_hash_partitions('test1', 'i', 5, false); + create_hash_partitions +------------------------ + 5 +(1 row) + +RESET ROLE; +SELECT partition_table_concurrently('test1', 10, 1); +NOTICE: worker started, you can stop it with the following command: select public.stop_concurrent_part_task('test1'); + partition_table_concurrently +------------------------------ + +(1 row) + +SELECT pg_sleep(1); + pg_sleep +---------- + +(1 row) + +-- Test result (must be 'off') +SET ROLE regress_hacker; +SHOW is_superuser; + is_superuser +-------------- + off +(1 row) + +-- Test 2 +RESET ROLE; +ALTER ROLE regress_hacker NOSUPERUSER; +SET ROLE regress_hacker; +SHOW is_superuser; + is_superuser +-------------- + off +(1 row) + +CREATE FUNCTION create_single_range_partition(parent_relid TEXT, start_value ANYELEMENT, end_value ANYELEMENT, partition_name TEXT) +RETURNS REGCLASS +AS $$ +BEGIN + ALTER ROLE regress_hacker SUPERUSER; + RETURN create_single_range_partition(parent_relid, start_value, end_value, partition_name, NULL::text); +END +$$ LANGUAGE plpgsql; +RESET ROLE; +CREATE TABLE test2(i INT4 NOT NULL); +INSERT INTO test2 VALUES(0); +SELECT create_range_partitions('test2', 'i', 0, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +INSERT INTO test2 values(1); +-- Test result (must be 'off') +SET ROLE regress_hacker; +SHOW is_superuser; + is_superuser +-------------- + off +(1 row) + +-- Cleanup +RESET ROLE; +DROP FUNCTION _partition_data_concurrent(oid,integer); +DROP FUNCTION create_single_range_partition(TEXT,ANYELEMENT,ANYELEMENT,TEXT); +DROP TABLE test1 CASCADE; +NOTICE: drop cascades to 5 other objects +DETAIL: drop cascades to table test1_0 +drop cascades to table test1_1 +drop cascades to table test1_2 +drop cascades to table test1_3 +drop cascades to table test1_4 +DROP TABLE test2 CASCADE; +NOTICE: drop cascades to 3 other objects +DETAIL: drop cascades to sequence test2_seq +drop cascades to table test2_1 +drop cascades to table test2_2 +DROP ROLE regress_hacker; +DROP EXTENSION pg_pathman; diff --git a/hash.sql b/hash.sql index 45c9b71d..b22fd75e 100644 --- a/hash.sql +++ b/hash.sql @@ -3,7 +3,7 @@ * hash.sql * HASH partitioning functions * - * Copyright (c) 2015-2016, Postgres Professional + * Copyright (c) 2015-2020, Postgres Professional * * ------------------------------------------------------------------------ */ @@ -11,7 +11,7 @@ /* * Creates hash partitions for specified relation */ -CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions( +CREATE FUNCTION @extschema@.create_hash_partitions( parent_relid REGCLASS, expression TEXT, partitions_count INT4, @@ -53,7 +53,7 @@ SET client_min_messages = WARNING; * * lock_parent - should we take an exclusive lock? */ -CREATE OR REPLACE FUNCTION @extschema@.replace_hash_partition( +CREATE FUNCTION @extschema@.replace_hash_partition( old_partition REGCLASS, new_partition REGCLASS, lock_parent BOOL DEFAULT TRUE) @@ -110,18 +110,18 @@ BEGIN /* Fetch definition of old_partition's HASH constraint */ SELECT pg_catalog.pg_get_constraintdef(oid) FROM pg_catalog.pg_constraint - WHERE conrelid = old_partition AND quote_ident(conname) = old_constr_name + WHERE conrelid = old_partition AND pg_catalog.quote_ident(conname) = old_constr_name INTO old_constr_def; /* Detach old partition */ - EXECUTE format('ALTER TABLE %s NO INHERIT %s', old_partition, parent_relid); - EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', + EXECUTE pg_catalog.format('ALTER TABLE %s NO INHERIT %s', old_partition, parent_relid); + EXECUTE pg_catalog.format('ALTER TABLE %s DROP CONSTRAINT %s', old_partition, old_constr_name); /* Attach the new one */ - EXECUTE format('ALTER TABLE %s INHERIT %s', new_partition, parent_relid); - EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s %s', + EXECUTE pg_catalog.format('ALTER TABLE %s INHERIT %s', new_partition, parent_relid); + EXECUTE pg_catalog.format('ALTER TABLE %s ADD CONSTRAINT %s %s', new_partition, @extschema@.build_check_constraint_name(new_partition::REGCLASS), old_constr_def); @@ -146,7 +146,7 @@ $$ LANGUAGE plpgsql; /* * Just create HASH partitions, called by create_hash_partitions(). */ -CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions_internal( +CREATE FUNCTION @extschema@.create_hash_partitions_internal( parent_relid REGCLASS, attribute TEXT, partitions_count INT4, @@ -158,14 +158,14 @@ LANGUAGE C; /* * Calculates hash for integer value */ -CREATE OR REPLACE FUNCTION @extschema@.get_hash_part_idx(INT4, INT4) +CREATE FUNCTION @extschema@.get_hash_part_idx(INT4, INT4) RETURNS INTEGER AS 'pg_pathman', 'get_hash_part_idx' LANGUAGE C STRICT; /* * Build hash condition for a CHECK CONSTRAINT */ -CREATE OR REPLACE FUNCTION @extschema@.build_hash_condition( +CREATE FUNCTION @extschema@.build_hash_condition( attribute_type REGTYPE, attribute TEXT, partitions_count INT4, diff --git a/init.sql b/init.sql index 16ec0b8f..123b2a36 100644 --- a/init.sql +++ b/init.sql @@ -3,7 +3,7 @@ * init.sql * Creates config table and provides common utility functions * - * Copyright (c) 2015-2016, Postgres Professional + * Copyright (c) 2015-2020, Postgres Professional * * ------------------------------------------------------------------------ */ @@ -14,7 +14,7 @@ * to partitioning key. The function throws an error if it fails to convert * text to Datum */ -CREATE OR REPLACE FUNCTION @extschema@.validate_interval_value( +CREATE FUNCTION @extschema@.validate_interval_value( partrel REGCLASS, expr TEXT, parttype INTEGER, @@ -31,7 +31,7 @@ LANGUAGE C; * range_interval - base interval for RANGE partitioning as string * cooked_expr - cooked partitioning expression (parsed & rewritten) */ -CREATE TABLE IF NOT EXISTS @extschema@.pathman_config ( +CREATE TABLE @extschema@.pathman_config ( partrel REGCLASS NOT NULL PRIMARY KEY, expr TEXT NOT NULL, parttype INTEGER NOT NULL, @@ -55,7 +55,7 @@ CREATE TABLE IF NOT EXISTS @extschema@.pathman_config ( * * NOTE: this function is used in CHECK CONSTRAINT. */ -CREATE OR REPLACE FUNCTION @extschema@.validate_part_callback( +CREATE FUNCTION @extschema@.validate_part_callback( callback REGPROCEDURE, raise_error BOOL DEFAULT TRUE) RETURNS BOOL AS 'pg_pathman', 'validate_part_callback_pl' @@ -70,7 +70,7 @@ LANGUAGE C STRICT; * init_callback - text signature of cb to be executed on partition creation * spawn_using_bgw - use background worker in order to auto create partitions */ -CREATE TABLE IF NOT EXISTS @extschema@.pathman_config_params ( +CREATE TABLE @extschema@.pathman_config_params ( partrel REGCLASS NOT NULL PRIMARY KEY, enable_parent BOOLEAN NOT NULL DEFAULT FALSE, auto BOOLEAN NOT NULL DEFAULT TRUE, @@ -91,7 +91,7 @@ TO public; /* * Check if current user can alter/drop specified relation */ -CREATE OR REPLACE FUNCTION @extschema@.check_security_policy(relation regclass) +CREATE FUNCTION @extschema@.check_security_policy(relation regclass) RETURNS BOOL AS 'pg_pathman', 'check_security_policy' LANGUAGE C STRICT; /* @@ -113,7 +113,7 @@ ALTER TABLE @extschema@.pathman_config_params ENABLE ROW LEVEL SECURITY; /* * Invalidate relcache every time someone changes parameters config or pathman_config */ -CREATE OR REPLACE FUNCTION @extschema@.pathman_config_params_trigger_func() +CREATE FUNCTION @extschema@.pathman_config_params_trigger_func() RETURNS TRIGGER AS 'pg_pathman', 'pathman_config_params_trigger_func' LANGUAGE C; @@ -135,13 +135,13 @@ SELECT pg_catalog.pg_extension_config_dump('@extschema@.pathman_config_params', /* * Add a row describing the optional parameter to pathman_config_params. */ -CREATE OR REPLACE FUNCTION @extschema@.pathman_set_param( +CREATE FUNCTION @extschema@.pathman_set_param( relation REGCLASS, param TEXT, value ANYELEMENT) RETURNS VOID AS $$ BEGIN - EXECUTE format('INSERT INTO @extschema@.pathman_config_params + EXECUTE pg_catalog.format('INSERT INTO @extschema@.pathman_config_params (partrel, %1$s) VALUES ($1, $2) ON CONFLICT (partrel) DO UPDATE SET %1$s = $2', param) USING relation, value; @@ -151,7 +151,7 @@ $$ LANGUAGE plpgsql; /* * Include\exclude parent relation in query plan. */ -CREATE OR REPLACE FUNCTION @extschema@.set_enable_parent( +CREATE FUNCTION @extschema@.set_enable_parent( relation REGCLASS, value BOOLEAN) RETURNS VOID AS $$ @@ -163,7 +163,7 @@ $$ LANGUAGE plpgsql STRICT; /* * Enable\disable automatic partition creation. */ -CREATE OR REPLACE FUNCTION @extschema@.set_auto( +CREATE FUNCTION @extschema@.set_auto( relation REGCLASS, value BOOLEAN) RETURNS VOID AS $$ @@ -175,7 +175,7 @@ $$ LANGUAGE plpgsql STRICT; /* * Set partition creation callback */ -CREATE OR REPLACE FUNCTION @extschema@.set_init_callback( +CREATE FUNCTION @extschema@.set_init_callback( relation REGCLASS, callback REGPROCEDURE DEFAULT 0) RETURNS VOID AS $$ @@ -186,10 +186,10 @@ BEGIN /* Fetch schema-qualified name of callback */ IF callback != 0 THEN - SELECT quote_ident(nspname) || '.' || - quote_ident(proname) || '(' || - (SELECT string_agg(x.argtype::REGTYPE::TEXT, ',') - FROM unnest(proargtypes) AS x(argtype)) || + SELECT pg_catalog.quote_ident(nspname) || '.' || + pg_catalog.quote_ident(proname) || '(' || + (SELECT pg_catalog.string_agg(x.argtype::REGTYPE::TEXT, ',') + FROM pg_catalog.unnest(proargtypes) AS x(argtype)) || ')' FROM pg_catalog.pg_proc p JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace @@ -204,7 +204,7 @@ $$ LANGUAGE plpgsql STRICT; /* * Set 'spawn using BGW' option */ -CREATE OR REPLACE FUNCTION @extschema@.set_spawn_using_bgw( +CREATE FUNCTION @extschema@.set_spawn_using_bgw( relation REGCLASS, value BOOLEAN) RETURNS VOID AS $$ @@ -216,7 +216,7 @@ $$ LANGUAGE plpgsql STRICT; /* * Set (or reset) default interval for auto created partitions */ -CREATE OR REPLACE FUNCTION @extschema@.set_interval( +CREATE FUNCTION @extschema@.set_interval( relation REGCLASS, value ANYELEMENT) RETURNS VOID AS $$ @@ -240,7 +240,7 @@ $$ LANGUAGE plpgsql; /* * Show all existing parents and partitions. */ -CREATE OR REPLACE FUNCTION @extschema@.show_partition_list() +CREATE FUNCTION @extschema@.show_partition_list() RETURNS TABLE ( parent REGCLASS, partition REGCLASS, @@ -254,7 +254,7 @@ LANGUAGE C STRICT; /* * View for show_partition_list(). */ -CREATE OR REPLACE VIEW @extschema@.pathman_partition_list +CREATE VIEW @extschema@.pathman_partition_list AS SELECT * FROM @extschema@.show_partition_list(); GRANT SELECT ON @extschema@.pathman_partition_list TO PUBLIC; @@ -262,7 +262,7 @@ GRANT SELECT ON @extschema@.pathman_partition_list TO PUBLIC; /* * Show memory usage of pg_pathman's caches. */ -CREATE OR REPLACE FUNCTION @extschema@.show_cache_stats() +CREATE FUNCTION @extschema@.show_cache_stats() RETURNS TABLE ( context TEXT, size INT8, @@ -274,13 +274,13 @@ LANGUAGE C STRICT; /* * View for show_cache_stats(). */ -CREATE OR REPLACE VIEW @extschema@.pathman_cache_stats +CREATE VIEW @extschema@.pathman_cache_stats AS SELECT * FROM @extschema@.show_cache_stats(); /* * Show all existing concurrent partitioning tasks. */ -CREATE OR REPLACE FUNCTION @extschema@.show_concurrent_part_tasks() +CREATE FUNCTION @extschema@.show_concurrent_part_tasks() RETURNS TABLE ( userid REGROLE, pid INT, @@ -294,7 +294,7 @@ LANGUAGE C STRICT; /* * View for show_concurrent_part_tasks(). */ -CREATE OR REPLACE VIEW @extschema@.pathman_concurrent_part_tasks +CREATE VIEW @extschema@.pathman_concurrent_part_tasks AS SELECT * FROM @extschema@.show_concurrent_part_tasks(); GRANT SELECT ON @extschema@.pathman_concurrent_part_tasks TO PUBLIC; @@ -302,7 +302,7 @@ GRANT SELECT ON @extschema@.pathman_concurrent_part_tasks TO PUBLIC; /* * Partition table using ConcurrentPartWorker. */ -CREATE OR REPLACE FUNCTION @extschema@.partition_table_concurrently( +CREATE FUNCTION @extschema@.partition_table_concurrently( relation REGCLASS, batch_size INTEGER DEFAULT 1000, sleep_time FLOAT8 DEFAULT 1.0) @@ -312,7 +312,7 @@ LANGUAGE C STRICT; /* * Stop concurrent partitioning task. */ -CREATE OR REPLACE FUNCTION @extschema@.stop_concurrent_part_task( +CREATE FUNCTION @extschema@.stop_concurrent_part_task( relation REGCLASS) RETURNS BOOL AS 'pg_pathman', 'stop_concurrent_part_task' LANGUAGE C STRICT; @@ -321,7 +321,7 @@ LANGUAGE C STRICT; /* * Copy rows to partitions concurrently. */ -CREATE OR REPLACE FUNCTION @extschema@._partition_data_concurrent( +CREATE FUNCTION @extschema@._partition_data_concurrent( relation REGCLASS, p_min ANYELEMENT DEFAULT NULL::text, p_max ANYELEMENT DEFAULT NULL::text, @@ -341,19 +341,19 @@ BEGIN /* Format LIMIT clause if needed */ IF NOT p_limit IS NULL THEN - v_limit_clause := format('LIMIT %s', p_limit); + v_limit_clause := pg_catalog.format('LIMIT %s', p_limit); END IF; /* Format WHERE clause if needed */ IF NOT p_min IS NULL THEN - v_where_clause := format('%1$s >= $1', part_expr); + v_where_clause := pg_catalog.format('%1$s >= $1', part_expr); END IF; IF NOT p_max IS NULL THEN IF NOT p_min IS NULL THEN v_where_clause := v_where_clause || ' AND '; END IF; - v_where_clause := v_where_clause || format('%1$s < $2', part_expr); + v_where_clause := v_where_clause || pg_catalog.format('%1$s < $2', part_expr); END IF; IF v_where_clause != '' THEN @@ -362,12 +362,12 @@ BEGIN /* Lock rows and copy data */ RAISE NOTICE 'Copying data to partitions...'; - EXECUTE format('SELECT array(SELECT ctid FROM ONLY %1$s %2$s %3$s FOR UPDATE NOWAIT)', + EXECUTE pg_catalog.format('SELECT array(SELECT ctid FROM ONLY %1$s %2$s %3$s FOR UPDATE NOWAIT)', relation, v_where_clause, v_limit_clause) USING p_min, p_max INTO ctids; - EXECUTE format('WITH data AS ( + EXECUTE pg_catalog.format('WITH data AS ( DELETE FROM ONLY %1$s WHERE ctid = ANY($1) RETURNING *) INSERT INTO %1$s SELECT * FROM data', relation) @@ -383,7 +383,7 @@ SET pg_pathman.enable_partitionfilter = on; /* ensures that PartitionFilter is O /* * Old school way to distribute rows to partitions. */ -CREATE OR REPLACE FUNCTION @extschema@.partition_data( +CREATE FUNCTION @extschema@.partition_data( parent_relid REGCLASS, OUT p_total BIGINT) AS $$ @@ -391,7 +391,7 @@ BEGIN p_total := 0; /* Create partitions and copy rest of the data */ - EXECUTE format('WITH part_data AS (DELETE FROM ONLY %1$s RETURNING *) + EXECUTE pg_catalog.format('WITH part_data AS (DELETE FROM ONLY %1$s RETURNING *) INSERT INTO %1$s SELECT * FROM part_data', parent_relid::TEXT); @@ -405,7 +405,7 @@ SET pg_pathman.enable_partitionfilter = on; /* ensures that PartitionFilter is O /* * Disable pathman partitioning for specified relation. */ -CREATE OR REPLACE FUNCTION @extschema@.disable_pathman_for( +CREATE FUNCTION @extschema@.disable_pathman_for( parent_relid REGCLASS) RETURNS VOID AS $$ BEGIN @@ -420,7 +420,7 @@ $$ LANGUAGE plpgsql STRICT; /* * Check a few things and take locks before partitioning. */ -CREATE OR REPLACE FUNCTION @extschema@.prepare_for_partitioning( +CREATE FUNCTION @extschema@.prepare_for_partitioning( parent_relid REGCLASS, expression TEXT, partition_data BOOLEAN) @@ -455,7 +455,7 @@ BEGIN RAISE EXCEPTION 'table "%" has already been partitioned', parent_relid; END IF; - IF EXISTS (SELECT 1 FROM pg_inherits WHERE inhparent = parent_relid) THEN + IF EXISTS (SELECT 1 FROM pg_catalog.pg_inherits WHERE inhparent = parent_relid) THEN RAISE EXCEPTION 'can''t partition table "%" with existing children', parent_relid; END IF; @@ -478,7 +478,7 @@ $$ LANGUAGE plpgsql; /* * Returns relname without quotes or something. */ -CREATE OR REPLACE FUNCTION @extschema@.get_plain_schema_and_relname( +CREATE FUNCTION @extschema@.get_plain_schema_and_relname( cls REGCLASS, OUT schema TEXT, OUT relname TEXT) @@ -494,7 +494,7 @@ $$ LANGUAGE plpgsql STRICT; /* * DDL trigger that removes entry from pathman_config table. */ -CREATE OR REPLACE FUNCTION @extschema@.pathman_ddl_trigger_func() +CREATE FUNCTION @extschema@.pathman_ddl_trigger_func() RETURNS event_trigger AS $$ DECLARE obj RECORD; @@ -505,8 +505,8 @@ BEGIN pg_class_oid = 'pg_catalog.pg_class'::regclass; /* Find relids to remove from config */ - SELECT array_agg(cfg.partrel) INTO relids - FROM pg_event_trigger_dropped_objects() AS events + SELECT pg_catalog.array_agg(cfg.partrel) INTO relids + FROM pg_catalog.pg_event_trigger_dropped_objects() AS events JOIN @extschema@.pathman_config AS cfg ON cfg.partrel::oid = events.objid WHERE events.classid = pg_class_oid AND events.objsubid = 0; @@ -522,7 +522,7 @@ $$ LANGUAGE plpgsql; * Drop partitions. If delete_data set to TRUE, partitions * will be dropped with all the data. */ -CREATE OR REPLACE FUNCTION @extschema@.drop_partitions( +CREATE FUNCTION @extschema@.drop_partitions( parent_relid REGCLASS, delete_data BOOLEAN DEFAULT FALSE) RETURNS INTEGER AS $$ @@ -552,7 +552,7 @@ BEGIN ORDER BY inhrelid ASC) LOOP IF NOT delete_data THEN - EXECUTE format('INSERT INTO %s SELECT * FROM %s', + EXECUTE pg_catalog.format('INSERT INTO %s SELECT * FROM %s', parent_relid::TEXT, child::TEXT); GET DIAGNOSTICS rows_count = ROW_COUNT; @@ -571,9 +571,9 @@ BEGIN * DROP TABLE or DROP FOREIGN TABLE. */ IF rel_kind = 'f' THEN - EXECUTE format('DROP FOREIGN TABLE %s', child); + EXECUTE pg_catalog.format('DROP FOREIGN TABLE %s', child); ELSE - EXECUTE format('DROP TABLE %s', child); + EXECUTE pg_catalog.format('DROP TABLE %s', child); END IF; part_count := part_count + 1; @@ -592,7 +592,7 @@ SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is /* * Copy all of parent's foreign keys. */ -CREATE OR REPLACE FUNCTION @extschema@.copy_foreign_keys( +CREATE FUNCTION @extschema@.copy_foreign_keys( parent_relid REGCLASS, partition_relid REGCLASS) RETURNS VOID AS $$ @@ -606,7 +606,7 @@ BEGIN FOR conid IN (SELECT oid FROM pg_catalog.pg_constraint WHERE conrelid = parent_relid AND contype = 'f') LOOP - EXECUTE format('ALTER TABLE %s ADD %s', + EXECUTE pg_catalog.format('ALTER TABLE %s ADD %s', partition_relid::TEXT, pg_catalog.pg_get_constraintdef(conid)); END LOOP; @@ -617,7 +617,7 @@ $$ LANGUAGE plpgsql STRICT; /* * Set new relname, schema and tablespace */ -CREATE OR REPLACE FUNCTION @extschema@.alter_partition( +CREATE FUNCTION @extschema@.alter_partition( relation REGCLASS, new_name TEXT, new_schema REGNAMESPACE, @@ -634,17 +634,17 @@ BEGIN /* Alter table name */ IF new_name != orig_name THEN - EXECUTE format('ALTER TABLE %s RENAME TO %s', relation, new_name); + EXECUTE pg_catalog.format('ALTER TABLE %s RENAME TO %s', relation, new_name); END IF; /* Alter table schema */ IF new_schema != orig_schema THEN - EXECUTE format('ALTER TABLE %s SET SCHEMA %s', relation, new_schema); + EXECUTE pg_catalog.format('ALTER TABLE %s SET SCHEMA %s', relation, new_schema); END IF; /* Move to another tablespace */ IF NOT new_tablespace IS NULL THEN - EXECUTE format('ALTER TABLE %s SET TABLESPACE %s', relation, new_tablespace); + EXECUTE pg_catalog.format('ALTER TABLE %s SET TABLESPACE %s', relation, new_tablespace); END IF; END $$ LANGUAGE plpgsql; @@ -661,7 +661,7 @@ EXECUTE PROCEDURE @extschema@.pathman_ddl_trigger_func(); /* * Get partitioning key. */ -CREATE OR REPLACE FUNCTION @extschema@.get_partition_key( +CREATE FUNCTION @extschema@.get_partition_key( parent_relid REGCLASS) RETURNS TEXT AS $$ @@ -674,7 +674,7 @@ LANGUAGE sql STRICT; /* * Get partitioning key type. */ -CREATE OR REPLACE FUNCTION @extschema@.get_partition_key_type( +CREATE FUNCTION @extschema@.get_partition_key_type( parent_relid REGCLASS) RETURNS REGTYPE AS 'pg_pathman', 'get_partition_key_type_pl' LANGUAGE C STRICT; @@ -682,7 +682,7 @@ LANGUAGE C STRICT; /* * Get parsed and analyzed expression. */ -CREATE OR REPLACE FUNCTION @extschema@.get_partition_cooked_key( +CREATE FUNCTION @extschema@.get_partition_cooked_key( parent_relid REGCLASS) RETURNS TEXT AS 'pg_pathman', 'get_partition_cooked_key_pl' LANGUAGE C STRICT; @@ -690,7 +690,7 @@ LANGUAGE C STRICT; /* * Get partitioning type. */ -CREATE OR REPLACE FUNCTION @extschema@.get_partition_type( +CREATE FUNCTION @extschema@.get_partition_type( parent_relid REGCLASS) RETURNS INT4 AS $$ @@ -703,11 +703,11 @@ LANGUAGE sql STRICT; /* * Get number of partitions managed by pg_pathman. */ -CREATE OR REPLACE FUNCTION @extschema@.get_number_of_partitions( +CREATE FUNCTION @extschema@.get_number_of_partitions( parent_relid REGCLASS) RETURNS INT4 AS $$ - SELECT count(*)::INT4 + SELECT pg_catalog.count(*)::INT4 FROM pg_catalog.pg_inherits WHERE inhparent = parent_relid; $$ @@ -716,7 +716,7 @@ LANGUAGE sql STRICT; /* * Get parent of pg_pathman's partition. */ -CREATE OR REPLACE FUNCTION @extschema@.get_parent_of_partition( +CREATE FUNCTION @extschema@.get_parent_of_partition( partition_relid REGCLASS) RETURNS REGCLASS AS 'pg_pathman', 'get_parent_of_partition_pl' LANGUAGE C STRICT; @@ -724,7 +724,7 @@ LANGUAGE C STRICT; /* * Extract basic type of a domain. */ -CREATE OR REPLACE FUNCTION @extschema@.get_base_type( +CREATE FUNCTION @extschema@.get_base_type( typid REGTYPE) RETURNS REGTYPE AS 'pg_pathman', 'get_base_type_pl' LANGUAGE C STRICT; @@ -732,7 +732,7 @@ LANGUAGE C STRICT; /* * Return tablespace name for specified relation. */ -CREATE OR REPLACE FUNCTION @extschema@.get_tablespace( +CREATE FUNCTION @extschema@.get_tablespace( relid REGCLASS) RETURNS TEXT AS 'pg_pathman', 'get_tablespace_pl' LANGUAGE C STRICT; @@ -741,7 +741,7 @@ LANGUAGE C STRICT; /* * Check that relation exists. */ -CREATE OR REPLACE FUNCTION @extschema@.validate_relname( +CREATE FUNCTION @extschema@.validate_relname( relid REGCLASS) RETURNS VOID AS 'pg_pathman', 'validate_relname' LANGUAGE C; @@ -749,7 +749,7 @@ LANGUAGE C; /* * Check that expression is valid */ -CREATE OR REPLACE FUNCTION @extschema@.validate_expression( +CREATE FUNCTION @extschema@.validate_expression( relid REGCLASS, expression TEXT) RETURNS VOID AS 'pg_pathman', 'validate_expression' @@ -758,7 +758,7 @@ LANGUAGE C; /* * Check if regclass is date or timestamp. */ -CREATE OR REPLACE FUNCTION @extschema@.is_date_type( +CREATE FUNCTION @extschema@.is_date_type( typid REGTYPE) RETURNS BOOLEAN AS 'pg_pathman', 'is_date_type' LANGUAGE C STRICT; @@ -766,7 +766,7 @@ LANGUAGE C STRICT; /* * Check if TYPE supports the specified operator. */ -CREATE OR REPLACE FUNCTION @extschema@.is_operator_supported( +CREATE FUNCTION @extschema@.is_operator_supported( type_oid REGTYPE, opname TEXT) RETURNS BOOLEAN AS 'pg_pathman', 'is_operator_supported' @@ -775,7 +775,7 @@ LANGUAGE C STRICT; /* * Check if tuple from first relation can be converted to fit the second one. */ -CREATE OR REPLACE FUNCTION @extschema@.is_tuple_convertible( +CREATE FUNCTION @extschema@.is_tuple_convertible( relation1 REGCLASS, relation2 REGCLASS) RETURNS BOOL AS 'pg_pathman', 'is_tuple_convertible' @@ -785,7 +785,7 @@ LANGUAGE C STRICT; /* * Build check constraint name for a specified relation's column. */ -CREATE OR REPLACE FUNCTION @extschema@.build_check_constraint_name( +CREATE FUNCTION @extschema@.build_check_constraint_name( partition_relid REGCLASS) RETURNS TEXT AS 'pg_pathman', 'build_check_constraint_name' LANGUAGE C STRICT; @@ -793,7 +793,7 @@ LANGUAGE C STRICT; /* * Add record to pathman_config (RANGE) and validate partitions. */ -CREATE OR REPLACE FUNCTION @extschema@.add_to_pathman_config( +CREATE FUNCTION @extschema@.add_to_pathman_config( parent_relid REGCLASS, expression TEXT, range_interval TEXT) @@ -803,7 +803,7 @@ LANGUAGE C; /* * Add record to pathman_config (HASH) and validate partitions. */ -CREATE OR REPLACE FUNCTION @extschema@.add_to_pathman_config( +CREATE FUNCTION @extschema@.add_to_pathman_config( parent_relid REGCLASS, expression TEXT) RETURNS BOOLEAN AS 'pg_pathman', 'add_to_pathman_config' @@ -814,7 +814,7 @@ LANGUAGE C; * Lock partitioned relation to restrict concurrent * modification of partitioning scheme. */ -CREATE OR REPLACE FUNCTION @extschema@.prevent_part_modification( +CREATE FUNCTION @extschema@.prevent_part_modification( parent_relid REGCLASS) RETURNS VOID AS 'pg_pathman', 'prevent_part_modification' LANGUAGE C STRICT; @@ -822,7 +822,7 @@ LANGUAGE C STRICT; /* * Lock relation to restrict concurrent modification of data. */ -CREATE OR REPLACE FUNCTION @extschema@.prevent_data_modification( +CREATE FUNCTION @extschema@.prevent_data_modification( parent_relid REGCLASS) RETURNS VOID AS 'pg_pathman', 'prevent_data_modification' LANGUAGE C STRICT; @@ -831,7 +831,7 @@ LANGUAGE C STRICT; /* * Invoke init_callback on RANGE partition. */ -CREATE OR REPLACE FUNCTION @extschema@.invoke_on_partition_created_callback( +CREATE FUNCTION @extschema@.invoke_on_partition_created_callback( parent_relid REGCLASS, partition_relid REGCLASS, init_callback REGPROCEDURE, @@ -843,7 +843,7 @@ LANGUAGE C; /* * Invoke init_callback on HASH partition. */ -CREATE OR REPLACE FUNCTION @extschema@.invoke_on_partition_created_callback( +CREATE FUNCTION @extschema@.invoke_on_partition_created_callback( parent_relid REGCLASS, partition_relid REGCLASS, init_callback REGPROCEDURE) @@ -853,10 +853,10 @@ LANGUAGE C; /* * DEBUG: Place this inside some plpgsql fuction and set breakpoint. */ -CREATE OR REPLACE FUNCTION @extschema@.debug_capture() +CREATE FUNCTION @extschema@.debug_capture() RETURNS VOID AS 'pg_pathman', 'debug_capture' LANGUAGE C STRICT; -CREATE OR REPLACE FUNCTION @extschema@.pathman_version() +CREATE FUNCTION @extschema@.pathman_version() RETURNS CSTRING AS 'pg_pathman', 'pathman_version' LANGUAGE C STRICT; diff --git a/range.sql b/range.sql index ef439cee..5af17014 100644 --- a/range.sql +++ b/range.sql @@ -3,7 +3,7 @@ * range.sql * RANGE partitioning functions * - * Copyright (c) 2015-2016, Postgres Professional + * Copyright (c) 2015-2020, Postgres Professional * * ------------------------------------------------------------------------ */ @@ -11,7 +11,7 @@ /* * Check RANGE partition boundaries. */ -CREATE OR REPLACE FUNCTION @extschema@.check_boundaries( +CREATE FUNCTION @extschema@.check_boundaries( parent_relid REGCLASS, expression TEXT, start_value ANYELEMENT, @@ -24,7 +24,7 @@ DECLARE BEGIN /* Get min and max values */ - EXECUTE format('SELECT count(*), min(%1$s), max(%1$s) + EXECUTE pg_catalog.format('SELECT count(*), min(%1$s), max(%1$s) FROM %2$s WHERE NOT %1$s IS NULL', expression, parent_relid::TEXT) INTO rows_count, min_value, max_value; @@ -49,7 +49,7 @@ $$ LANGUAGE plpgsql; /* * Creates RANGE partitions for specified relation based on datetime attribute */ -CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( +CREATE FUNCTION @extschema@.create_range_partitions( parent_relid REGCLASS, expression TEXT, start_value ANYELEMENT, @@ -76,7 +76,7 @@ BEGIN /* Try to determine partitions count if not set */ IF p_count IS NULL THEN - EXECUTE format('SELECT count(*), max(%s) FROM %s', expression, parent_relid) + EXECUTE pg_catalog.format('SELECT count(*), max(%s) FROM %s', expression, parent_relid) INTO rows_count, max_value; IF rows_count = 0 THEN @@ -142,7 +142,7 @@ $$ LANGUAGE plpgsql; /* * Creates RANGE partitions for specified relation based on numerical expression */ -CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( +CREATE FUNCTION @extschema@.create_range_partitions( parent_relid REGCLASS, expression TEXT, start_value ANYELEMENT, @@ -169,7 +169,7 @@ BEGIN /* Try to determine partitions count if not set */ IF p_count IS NULL THEN - EXECUTE format('SELECT count(*), max(%s) FROM %s', expression, parent_relid) + EXECUTE pg_catalog.format('SELECT count(*), max(%s) FROM %s', expression, parent_relid) INTO rows_count, max_value; IF rows_count = 0 THEN @@ -239,7 +239,7 @@ $$ LANGUAGE plpgsql; /* * Creates RANGE partitions for specified relation based on bounds array */ -CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( +CREATE FUNCTION @extschema@.create_range_partitions( parent_relid REGCLASS, expression TEXT, bounds ANYARRAY, @@ -297,7 +297,7 @@ LANGUAGE plpgsql; /* * Append new partition. */ -CREATE OR REPLACE FUNCTION @extschema@.append_range_partition( +CREATE FUNCTION @extschema@.append_range_partition( parent_relid REGCLASS, partition_name TEXT DEFAULT NULL, tablespace TEXT DEFAULT NULL) @@ -326,7 +326,7 @@ BEGIN INTO part_interval; EXECUTE - format('SELECT @extschema@.append_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', + pg_catalog.format('SELECT @extschema@.append_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', @extschema@.get_base_type(part_expr_type)::TEXT) USING parent_relid, @@ -347,7 +347,7 @@ $$ LANGUAGE plpgsql; * * NOTE: we don't take a xact_handling lock here. */ -CREATE OR REPLACE FUNCTION @extschema@.append_partition_internal( +CREATE FUNCTION @extschema@.append_partition_internal( parent_relid REGCLASS, p_atttype REGTYPE, p_interval TEXT, @@ -368,7 +368,7 @@ BEGIN part_expr_type := @extschema@.get_base_type(p_atttype); /* We have to pass fake NULL casted to column's type */ - EXECUTE format('SELECT @extschema@.get_part_range($1, -1, NULL::%s)', + EXECUTE pg_catalog.format('SELECT @extschema@.get_part_range($1, -1, NULL::%s)', part_expr_type::TEXT) USING parent_relid INTO p_range; @@ -378,13 +378,13 @@ BEGIN END IF; IF @extschema@.is_date_type(p_atttype) THEN - v_args_format := format('$1, $2, ($2 + $3::interval)::%s, $4, $5', part_expr_type::TEXT); + v_args_format := pg_catalog.format('$1, $2, ($2 + $3::interval)::%s, $4, $5', part_expr_type::TEXT); ELSE - v_args_format := format('$1, $2, $2 + $3::%s, $4, $5', part_expr_type::TEXT); + v_args_format := pg_catalog.format('$1, $2, $2 + $3::%s, $4, $5', part_expr_type::TEXT); END IF; EXECUTE - format('SELECT @extschema@.create_single_range_partition(%s)', v_args_format) + pg_catalog.format('SELECT @extschema@.create_single_range_partition(%s)', v_args_format) USING parent_relid, p_range[2], @@ -401,7 +401,7 @@ $$ LANGUAGE plpgsql; /* * Prepend new partition. */ -CREATE OR REPLACE FUNCTION @extschema@.prepend_range_partition( +CREATE FUNCTION @extschema@.prepend_range_partition( parent_relid REGCLASS, partition_name TEXT DEFAULT NULL, tablespace TEXT DEFAULT NULL) @@ -430,7 +430,7 @@ BEGIN INTO part_interval; EXECUTE - format('SELECT @extschema@.prepend_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', + pg_catalog.format('SELECT @extschema@.prepend_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', @extschema@.get_base_type(part_expr_type)::TEXT) USING parent_relid, @@ -451,7 +451,7 @@ $$ LANGUAGE plpgsql; * * NOTE: we don't take a xact_handling lock here. */ -CREATE OR REPLACE FUNCTION @extschema@.prepend_partition_internal( +CREATE FUNCTION @extschema@.prepend_partition_internal( parent_relid REGCLASS, p_atttype REGTYPE, p_interval TEXT, @@ -472,7 +472,7 @@ BEGIN part_expr_type := @extschema@.get_base_type(p_atttype); /* We have to pass fake NULL casted to column's type */ - EXECUTE format('SELECT @extschema@.get_part_range($1, 0, NULL::%s)', + EXECUTE pg_catalog.format('SELECT @extschema@.get_part_range($1, 0, NULL::%s)', part_expr_type::TEXT) USING parent_relid INTO p_range; @@ -482,13 +482,13 @@ BEGIN END IF; IF @extschema@.is_date_type(p_atttype) THEN - v_args_format := format('$1, ($2 - $3::interval)::%s, $2, $4, $5', part_expr_type::TEXT); + v_args_format := pg_catalog.format('$1, ($2 - $3::interval)::%s, $2, $4, $5', part_expr_type::TEXT); ELSE - v_args_format := format('$1, $2 - $3::%s, $2, $4, $5', part_expr_type::TEXT); + v_args_format := pg_catalog.format('$1, $2 - $3::%s, $2, $4, $5', part_expr_type::TEXT); END IF; EXECUTE - format('SELECT @extschema@.create_single_range_partition(%s)', v_args_format) + pg_catalog.format('SELECT @extschema@.create_single_range_partition(%s)', v_args_format) USING parent_relid, p_range[1], @@ -505,7 +505,7 @@ $$ LANGUAGE plpgsql; /* * Add new partition */ -CREATE OR REPLACE FUNCTION @extschema@.add_range_partition( +CREATE FUNCTION @extschema@.add_range_partition( parent_relid REGCLASS, start_value ANYELEMENT, end_value ANYELEMENT, @@ -547,7 +547,7 @@ $$ LANGUAGE plpgsql; /* * Drop range partition */ -CREATE OR REPLACE FUNCTION @extschema@.drop_range_partition( +CREATE FUNCTION @extschema@.drop_range_partition( partition_relid REGCLASS, delete_data BOOLEAN DEFAULT TRUE) RETURNS TEXT AS $$ @@ -576,7 +576,7 @@ BEGIN PERFORM @extschema@.prevent_part_modification(parent_relid); IF NOT delete_data THEN - EXECUTE format('INSERT INTO %s SELECT * FROM %s', + EXECUTE pg_catalog.format('INSERT INTO %s SELECT * FROM %s', parent_relid::TEXT, partition_relid::TEXT); GET DIAGNOSTICS v_rows = ROW_COUNT; @@ -595,9 +595,9 @@ BEGIN * DROP TABLE or DROP FOREIGN TABLE. */ IF v_relkind = 'f' THEN - EXECUTE format('DROP FOREIGN TABLE %s', partition_relid::TEXT); + EXECUTE pg_catalog.format('DROP FOREIGN TABLE %s', partition_relid::TEXT); ELSE - EXECUTE format('DROP TABLE %s', partition_relid::TEXT); + EXECUTE pg_catalog.format('DROP TABLE %s', partition_relid::TEXT); END IF; RETURN part_name; @@ -608,7 +608,7 @@ SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is /* * Attach range partition */ -CREATE OR REPLACE FUNCTION @extschema@.attach_range_partition( +CREATE FUNCTION @extschema@.attach_range_partition( parent_relid REGCLASS, partition_relid REGCLASS, start_value ANYELEMENT, @@ -658,10 +658,10 @@ BEGIN END IF; /* Set inheritance */ - EXECUTE format('ALTER TABLE %s INHERIT %s', partition_relid, parent_relid); + EXECUTE pg_catalog.format('ALTER TABLE %s INHERIT %s', partition_relid, parent_relid); /* Set check constraint */ - EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', + EXECUTE pg_catalog.format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', partition_relid::TEXT, @extschema@.build_check_constraint_name(partition_relid), @extschema@.build_range_condition(partition_relid, @@ -691,7 +691,7 @@ $$ LANGUAGE plpgsql; /* * Detach range partition */ -CREATE OR REPLACE FUNCTION @extschema@.detach_range_partition( +CREATE FUNCTION @extschema@.detach_range_partition( partition_relid REGCLASS) RETURNS TEXT AS $$ DECLARE @@ -718,12 +718,12 @@ BEGIN END IF; /* Remove inheritance */ - EXECUTE format('ALTER TABLE %s NO INHERIT %s', + EXECUTE pg_catalog.format('ALTER TABLE %s NO INHERIT %s', partition_relid::TEXT, parent_relid::TEXT); /* Remove check constraint */ - EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', + EXECUTE pg_catalog.format('ALTER TABLE %s DROP CONSTRAINT %s', partition_relid::TEXT, @extschema@.build_check_constraint_name(partition_relid)); @@ -735,7 +735,7 @@ $$ LANGUAGE plpgsql; /* * Create a naming sequence for partitioned table. */ -CREATE OR REPLACE FUNCTION @extschema@.create_naming_sequence( +CREATE FUNCTION @extschema@.create_naming_sequence( parent_relid REGCLASS) RETURNS TEXT AS $$ DECLARE @@ -744,8 +744,8 @@ DECLARE BEGIN seq_name := @extschema@.build_sequence_name(parent_relid); - EXECUTE format('DROP SEQUENCE IF EXISTS %s', seq_name); - EXECUTE format('CREATE SEQUENCE %s START 1', seq_name); + EXECUTE pg_catalog.format('DROP SEQUENCE IF EXISTS %s', seq_name); + EXECUTE pg_catalog.format('CREATE SEQUENCE %s START 1', seq_name); RETURN seq_name; END @@ -755,7 +755,7 @@ SET client_min_messages = WARNING; /* mute NOTICE message */ /* * Drop a naming sequence for partitioned table. */ -CREATE OR REPLACE FUNCTION @extschema@.drop_naming_sequence( +CREATE FUNCTION @extschema@.drop_naming_sequence( parent_relid REGCLASS) RETURNS VOID AS $$ DECLARE @@ -764,7 +764,7 @@ DECLARE BEGIN seq_name := @extschema@.build_sequence_name(parent_relid); - EXECUTE format('DROP SEQUENCE IF EXISTS %s', seq_name); + EXECUTE pg_catalog.format('DROP SEQUENCE IF EXISTS %s', seq_name); END $$ LANGUAGE plpgsql SET client_min_messages = WARNING; /* mute NOTICE message */ @@ -773,7 +773,7 @@ SET client_min_messages = WARNING; /* mute NOTICE message */ /* * Split RANGE partition in two using a pivot. */ -CREATE OR REPLACE FUNCTION @extschema@.split_range_partition( +CREATE FUNCTION @extschema@.split_range_partition( partition_relid REGCLASS, split_value ANYELEMENT, partition_name TEXT DEFAULT NULL, @@ -784,7 +784,7 @@ LANGUAGE C; /* * Merge RANGE partitions. */ -CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions( +CREATE FUNCTION @extschema@.merge_range_partitions( variadic partitions REGCLASS[]) RETURNS REGCLASS AS 'pg_pathman', 'merge_range_partitions' LANGUAGE C STRICT; @@ -796,12 +796,12 @@ LANGUAGE C STRICT; * DROP PARTITION. In Oracle partitions only have upper bound and when * partition is dropped the next one automatically covers freed range */ -CREATE OR REPLACE FUNCTION @extschema@.drop_range_partition_expand_next( +CREATE FUNCTION @extschema@.drop_range_partition_expand_next( partition_relid REGCLASS) RETURNS VOID AS 'pg_pathman', 'drop_range_partition_expand_next' LANGUAGE C STRICT; -CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions_internal( +CREATE FUNCTION @extschema@.create_range_partitions_internal( parent_relid REGCLASS, bounds ANYARRAY, partition_names TEXT[], @@ -813,7 +813,7 @@ LANGUAGE C; * Creates new RANGE partition. Returns partition name. * NOTE: This function SHOULD NOT take xact_handling lock (BGWs in 9.5). */ -CREATE OR REPLACE FUNCTION @extschema@.create_single_range_partition( +CREATE FUNCTION @extschema@.create_single_range_partition( parent_relid REGCLASS, start_value ANYELEMENT, end_value ANYELEMENT, @@ -825,7 +825,7 @@ LANGUAGE C; /* * Construct CHECK constraint condition for a range partition. */ -CREATE OR REPLACE FUNCTION @extschema@.build_range_condition( +CREATE FUNCTION @extschema@.build_range_condition( partition_relid REGCLASS, expression TEXT, start_value ANYELEMENT, @@ -836,7 +836,7 @@ LANGUAGE C; /* * Generate a name for naming sequence. */ -CREATE OR REPLACE FUNCTION @extschema@.build_sequence_name( +CREATE FUNCTION @extschema@.build_sequence_name( parent_relid REGCLASS) RETURNS TEXT AS 'pg_pathman', 'build_sequence_name' LANGUAGE C STRICT; @@ -844,7 +844,7 @@ LANGUAGE C STRICT; /* * Returns N-th range (as an array of two elements). */ -CREATE OR REPLACE FUNCTION @extschema@.get_part_range( +CREATE FUNCTION @extschema@.get_part_range( parent_relid REGCLASS, partition_idx INTEGER, dummy ANYELEMENT) @@ -854,7 +854,7 @@ LANGUAGE C; /* * Returns min and max values for specified RANGE partition. */ -CREATE OR REPLACE FUNCTION @extschema@.get_part_range( +CREATE FUNCTION @extschema@.get_part_range( partition_relid REGCLASS, dummy ANYELEMENT) RETURNS ANYARRAY AS 'pg_pathman', 'get_part_range_by_oid' @@ -864,7 +864,7 @@ LANGUAGE C; * Checks if range overlaps with existing partitions. * Returns TRUE if overlaps and FALSE otherwise. */ -CREATE OR REPLACE FUNCTION @extschema@.check_range_available( +CREATE FUNCTION @extschema@.check_range_available( parent_relid REGCLASS, range_min ANYELEMENT, range_max ANYELEMENT) @@ -874,14 +874,14 @@ LANGUAGE C; /* * Generate range bounds starting with 'p_start' using 'p_interval'. */ -CREATE OR REPLACE FUNCTION @extschema@.generate_range_bounds( +CREATE FUNCTION @extschema@.generate_range_bounds( p_start ANYELEMENT, p_interval INTERVAL, p_count INTEGER) RETURNS ANYARRAY AS 'pg_pathman', 'generate_range_bounds_pl' LANGUAGE C STRICT; -CREATE OR REPLACE FUNCTION @extschema@.generate_range_bounds( +CREATE FUNCTION @extschema@.generate_range_bounds( p_start ANYELEMENT, p_interval ANYELEMENT, p_count INTEGER) diff --git a/sql/pathman_CVE-2020-14350.sql b/sql/pathman_CVE-2020-14350.sql new file mode 100644 index 00000000..877f3280 --- /dev/null +++ b/sql/pathman_CVE-2020-14350.sql @@ -0,0 +1,77 @@ +/* + * Check fix for CVE-2020-14350. + * See also 7eeb1d986 postgresql commit. + */ + +SET client_min_messages = 'warning'; +DROP FUNCTION IF EXISTS _partition_data_concurrent(oid,integer); +DROP FUNCTION IF EXISTS create_single_range_partition(TEXT,ANYELEMENT,ANYELEMENT,TEXT); +DROP TABLE IF EXISTS test1 CASCADE; +DROP TABLE IF EXISTS test2 CASCADE; +DROP ROLE IF EXISTS regress_hacker; +SET client_min_messages = 'notice'; + +CREATE EXTENSION pg_pathman; +CREATE ROLE regress_hacker LOGIN; + +-- Test 1 +RESET ROLE; +ALTER ROLE regress_hacker NOSUPERUSER; + +SET ROLE regress_hacker; +SHOW is_superuser; +CREATE FUNCTION _partition_data_concurrent(relation oid, p_limit INT, OUT p_total BIGINT) +RETURNS bigint +AS $$ +BEGIN + ALTER ROLE regress_hacker SUPERUSER; + SELECT _partition_data_concurrent(relation, NULL::text, NULL::text, p_limit) INTO p_total; +END +$$ LANGUAGE plpgsql; + +CREATE TABLE test1(i INT4 NOT NULL); +INSERT INTO test1 SELECT generate_series(1, 500); +SELECT create_hash_partitions('test1', 'i', 5, false); + +RESET ROLE; +SELECT partition_table_concurrently('test1', 10, 1); +SELECT pg_sleep(1); + +-- Test result (must be 'off') +SET ROLE regress_hacker; +SHOW is_superuser; + +-- Test 2 +RESET ROLE; +ALTER ROLE regress_hacker NOSUPERUSER; + +SET ROLE regress_hacker; +SHOW is_superuser; +CREATE FUNCTION create_single_range_partition(parent_relid TEXT, start_value ANYELEMENT, end_value ANYELEMENT, partition_name TEXT) +RETURNS REGCLASS +AS $$ +BEGIN + ALTER ROLE regress_hacker SUPERUSER; + RETURN create_single_range_partition(parent_relid, start_value, end_value, partition_name, NULL::text); +END +$$ LANGUAGE plpgsql; + +RESET ROLE; +CREATE TABLE test2(i INT4 NOT NULL); +INSERT INTO test2 VALUES(0); +SELECT create_range_partitions('test2', 'i', 0, 1); +INSERT INTO test2 values(1); + +-- Test result (must be 'off') +SET ROLE regress_hacker; +SHOW is_superuser; + +-- Cleanup +RESET ROLE; +DROP FUNCTION _partition_data_concurrent(oid,integer); +DROP FUNCTION create_single_range_partition(TEXT,ANYELEMENT,ANYELEMENT,TEXT); +DROP TABLE test1 CASCADE; +DROP TABLE test2 CASCADE; +DROP ROLE regress_hacker; +DROP EXTENSION pg_pathman; + diff --git a/src/partition_creation.c b/src/partition_creation.c index cd2a7b82..c86ba7aa 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -604,7 +604,7 @@ spawn_partitions_val(Oid parent_relid, /* parent's Oid */ /* Construct call to create_single_range_partition() */ create_sql = psprintf( - "select %s.create_single_range_partition('%s.%s', '%s'::%s, '%s'::%s, '%s.%s')", + "select %s.create_single_range_partition('%s.%s'::regclass, '%s'::%s, '%s'::%s, '%s.%s', NULL::text)", quote_identifier(get_namespace_name(get_pathman_schema())), quote_identifier(parent_nsp_name), quote_identifier(get_rel_name(parent_relid)), diff --git a/src/pathman_workers.c b/src/pathman_workers.c index 54d62e7f..a75e912b 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -523,7 +523,7 @@ bgw_main_concurrent_part(Datum main_arg) * context will be destroyed after transaction finishes */ current_mcxt = MemoryContextSwitchTo(TopPathmanContext); - sql = psprintf("SELECT %s._partition_data_concurrent($1::oid, p_limit:=$2)", + sql = psprintf("SELECT %s._partition_data_concurrent($1::regclass, NULL::text, NULL::text, p_limit:=$2)", get_namespace_name(get_pathman_schema())); MemoryContextSwitchTo(current_mcxt); } From 1e82fd397d7acf14d4f7791adb1eefa7f8aaa06e Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Sun, 8 Nov 2020 17:17:21 +0300 Subject: [PATCH 447/528] Bump 1.5.12 lib version. --- META.json | 4 ++-- expected/pathman_calamity.out | 2 +- expected/pathman_calamity_1.out | 2 +- expected/pathman_calamity_2.out | 2 +- src/include/init.h | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/META.json b/META.json index 6bd1607d..c32d74ba 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_pathman", "abstract": "Fast partitioning tool for PostgreSQL", "description": "pg_pathman provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.5.11", + "version": "1.5.12", "maintainer": [ "Arseny Sher " ], @@ -22,7 +22,7 @@ "pg_pathman": { "file": "pg_pathman--1.5.sql", "docfile": "README.md", - "version": "1.5.11", + "version": "1.5.12", "abstract": "Effective partitioning tool for PostgreSQL 9.5 and higher" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 50bfd803..7e794a72 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -23,7 +23,7 @@ SELECT debug_capture(); SELECT pathman_version(); pathman_version ----------------- - 1.5.11 + 1.5.12 (1 row) set client_min_messages = NOTICE; diff --git a/expected/pathman_calamity_1.out b/expected/pathman_calamity_1.out index 20c2ea6c..60313bfd 100644 --- a/expected/pathman_calamity_1.out +++ b/expected/pathman_calamity_1.out @@ -23,7 +23,7 @@ SELECT debug_capture(); SELECT pathman_version(); pathman_version ----------------- - 1.5.11 + 1.5.12 (1 row) set client_min_messages = NOTICE; diff --git a/expected/pathman_calamity_2.out b/expected/pathman_calamity_2.out index 0c7757a9..e621831b 100644 --- a/expected/pathman_calamity_2.out +++ b/expected/pathman_calamity_2.out @@ -23,7 +23,7 @@ SELECT debug_capture(); SELECT pathman_version(); pathman_version ----------------- - 1.5.11 + 1.5.12 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index f7f3df59..f2234c8f 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -160,7 +160,7 @@ simplify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT "1.5.0" /* Current version of native C library */ -#define CURRENT_LIB_VERSION "1.5.11" +#define CURRENT_LIB_VERSION "1.5.12" void *pathman_cache_search_relid(HTAB *cache_table, From d31988d910ad84b9573d9b964f8f6b73b93adb0f Mon Sep 17 00:00:00 2001 From: Victor Wagner Date: Wed, 11 Nov 2020 16:12:24 +0300 Subject: [PATCH 448/528] Sinence compiler warnings found on buildfarm --- src/init.c | 2 +- src/partition_filter.c | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/init.c b/src/init.c index 86e96ebe..99b79f55 100644 --- a/src/init.c +++ b/src/init.c @@ -930,7 +930,7 @@ read_opexpr_const(const OpExpr *opexpr, /* Update RIGHT */ right = (Node *) constant; } - /* FALL THROUGH (no break) */ + /* FALLTHROUGH */ case T_Const: { diff --git a/src/partition_filter.c b/src/partition_filter.c index 3808dc26..b8b3b03c 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -1016,6 +1016,7 @@ prepare_rri_fdw_for_insert(ResultRelInfoHolder *rri_holder, elog(ERROR, "FDWs other than postgres_fdw are restricted"); + break; case PF_FDW_INSERT_ANY_FDW: elog(WARNING, "unrestricted FDW mode may lead to crashes"); From c25ba927ede826a229a533d184d78f73468da7cd Mon Sep 17 00:00:00 2001 From: Victor Spirin Date: Thu, 10 Dec 2020 19:06:58 +0300 Subject: [PATCH 449/528] pathman_dropped_cols test fixed --- expected/pathman_dropped_cols.out | 20 ++++++++++---------- sql/pathman_dropped_cols.sql | 2 +- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/expected/pathman_dropped_cols.out b/expected/pathman_dropped_cols.out index 79e781b2..220f6750 100644 --- a/expected/pathman_dropped_cols.out +++ b/expected/pathman_dropped_cols.out @@ -183,22 +183,22 @@ EXECUTE getbyroot(2); 4 | 2 | 10-10-2010 | num_2 | 1 | | | 3 (2 rows) -EXPLAIN EXECUTE getbyroot(2); - QUERY PLAN --------------------------------------------------------------------------------------------- - Custom Scan (RuntimeAppend) (cost=4.17..11.28 rows=3 width=128) +EXPLAIN (COSTS OFF) EXECUTE getbyroot(2); + QUERY PLAN +---------------------------------------------------------- + Custom Scan (RuntimeAppend) Prune by: (root_dict.root_id = $1) - -> Bitmap Heap Scan on root_dict_0 root_dict (cost=4.17..11.28 rows=3 width=128) + -> Bitmap Heap Scan on root_dict_0 root_dict Recheck Cond: (root_id = $1) - -> Bitmap Index Scan on root_dict_0_root_id_idx (cost=0.00..4.17 rows=3 width=0) + -> Bitmap Index Scan on root_dict_0_root_id_idx Index Cond: (root_id = $1) - -> Bitmap Heap Scan on root_dict_1 root_dict (cost=4.17..11.28 rows=3 width=128) + -> Bitmap Heap Scan on root_dict_1 root_dict Recheck Cond: (root_id = $1) - -> Bitmap Index Scan on root_dict_1_root_id_idx (cost=0.00..4.17 rows=3 width=0) + -> Bitmap Index Scan on root_dict_1_root_id_idx Index Cond: (root_id = $1) - -> Bitmap Heap Scan on root_dict_2 root_dict (cost=4.17..11.28 rows=3 width=128) + -> Bitmap Heap Scan on root_dict_2 root_dict Recheck Cond: (root_id = $1) - -> Bitmap Index Scan on root_dict_2_root_id_idx (cost=0.00..4.17 rows=3 width=0) + -> Bitmap Index Scan on root_dict_2_root_id_idx Index Cond: (root_id = $1) (14 rows) diff --git a/sql/pathman_dropped_cols.sql b/sql/pathman_dropped_cols.sql index 0ae16c8a..cb6acc57 100644 --- a/sql/pathman_dropped_cols.sql +++ b/sql/pathman_dropped_cols.sql @@ -96,7 +96,7 @@ EXECUTE getbyroot(2); -- errors usually start here EXECUTE getbyroot(2); EXECUTE getbyroot(2); -EXPLAIN EXECUTE getbyroot(2); +EXPLAIN (COSTS OFF) EXECUTE getbyroot(2); DEALLOCATE getbyroot; DROP TABLE root_dict CASCADE; From 6b484c2ca9d071037be83c4f3c32df3348bf867d Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 15 Dec 2020 15:57:55 +0300 Subject: [PATCH 450/528] Remove queries from calamity test which depend on num of entries in relcache. Autovacuum blows out relcache, so it rarely fails if it is agressive enough. --- expected/pathman_calamity.out | 54 ++++++++++++++++----------------- expected/pathman_calamity_1.out | 54 ++++++++++++++++----------------- expected/pathman_calamity_2.out | 54 ++++++++++++++++----------------- sql/pathman_calamity.sql | 27 +++++++++++------ 4 files changed, 99 insertions(+), 90 deletions(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 7e794a72..d8b6ad96 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -816,25 +816,25 @@ SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10 10 (1 row) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 0 partition parents cache | 0 - partition status cache | 2 -(4 rows) +(3 rows) DROP TABLE calamity.test_pathman_cache_stats CASCADE; NOTICE: drop cascades to 11 other objects -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 0 partition parents cache | 0 - partition status cache | 2 -(4 rows) +(3 rows) /* Change this setting for code coverage */ SET pg_pathman.enable_bounds_cache = false; @@ -862,25 +862,25 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; -> Seq Scan on test_pathman_cache_stats_10 (11 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 0 partition parents cache | 10 - partition status cache | 3 -(4 rows) +(3 rows) DROP TABLE calamity.test_pathman_cache_stats CASCADE; NOTICE: drop cascades to 11 other objects -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 0 partition parents cache | 0 - partition status cache | 2 -(4 rows) +(3 rows) /* Restore this GUC */ SET pg_pathman.enable_bounds_cache = true; @@ -908,25 +908,25 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; -> Seq Scan on test_pathman_cache_stats_10 (11 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 10 partition parents cache | 10 - partition status cache | 3 -(4 rows) +(3 rows) DROP TABLE calamity.test_pathman_cache_stats CASCADE; NOTICE: drop cascades to 11 other objects -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 0 partition parents cache | 0 - partition status cache | 2 -(4 rows) +(3 rows) /* check that parents cache has been flushed after partition was dropped */ CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); @@ -952,14 +952,14 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; -> Seq Scan on test_pathman_cache_stats_10 (11 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 10 partition parents cache | 10 - partition status cache | 3 -(4 rows) +(3 rows) SELECT drop_range_partition('calamity.test_pathman_cache_stats_1'); drop_range_partition @@ -967,25 +967,25 @@ SELECT drop_range_partition('calamity.test_pathman_cache_stats_1'); calamity.test_pathman_cache_stats_1 (1 row) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 9 partition parents cache | 9 - partition status cache | 2 -(4 rows) +(3 rows) DROP TABLE calamity.test_pathman_cache_stats CASCADE; NOTICE: drop cascades to 10 other objects -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 0 partition parents cache | 0 - partition status cache | 2 -(4 rows) +(3 rows) DROP SCHEMA calamity CASCADE; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_calamity_1.out b/expected/pathman_calamity_1.out index 60313bfd..2b0f98e5 100644 --- a/expected/pathman_calamity_1.out +++ b/expected/pathman_calamity_1.out @@ -816,25 +816,25 @@ SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10 10 (1 row) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 0 partition parents cache | 0 - partition status cache | 2 -(4 rows) +(3 rows) DROP TABLE calamity.test_pathman_cache_stats CASCADE; NOTICE: drop cascades to 11 other objects -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 0 partition parents cache | 0 - partition status cache | 2 -(4 rows) +(3 rows) /* Change this setting for code coverage */ SET pg_pathman.enable_bounds_cache = false; @@ -862,25 +862,25 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; -> Seq Scan on test_pathman_cache_stats_10 (11 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 0 partition parents cache | 10 - partition status cache | 3 -(4 rows) +(3 rows) DROP TABLE calamity.test_pathman_cache_stats CASCADE; NOTICE: drop cascades to 11 other objects -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 0 partition parents cache | 0 - partition status cache | 2 -(4 rows) +(3 rows) /* Restore this GUC */ SET pg_pathman.enable_bounds_cache = true; @@ -908,25 +908,25 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; -> Seq Scan on test_pathman_cache_stats_10 (11 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 10 partition parents cache | 10 - partition status cache | 3 -(4 rows) +(3 rows) DROP TABLE calamity.test_pathman_cache_stats CASCADE; NOTICE: drop cascades to 11 other objects -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 0 partition parents cache | 0 - partition status cache | 2 -(4 rows) +(3 rows) /* check that parents cache has been flushed after partition was dropped */ CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); @@ -952,14 +952,14 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; -> Seq Scan on test_pathman_cache_stats_10 (11 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 10 partition parents cache | 10 - partition status cache | 3 -(4 rows) +(3 rows) SELECT drop_range_partition('calamity.test_pathman_cache_stats_1'); drop_range_partition @@ -967,25 +967,25 @@ SELECT drop_range_partition('calamity.test_pathman_cache_stats_1'); calamity.test_pathman_cache_stats_1 (1 row) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 9 partition parents cache | 9 - partition status cache | 2 -(4 rows) +(3 rows) DROP TABLE calamity.test_pathman_cache_stats CASCADE; NOTICE: drop cascades to 10 other objects -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 0 partition parents cache | 0 - partition status cache | 2 -(4 rows) +(3 rows) DROP SCHEMA calamity CASCADE; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_calamity_2.out b/expected/pathman_calamity_2.out index e621831b..b6fafc83 100644 --- a/expected/pathman_calamity_2.out +++ b/expected/pathman_calamity_2.out @@ -816,25 +816,25 @@ SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10 10 (1 row) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 0 partition parents cache | 0 - partition status cache | 2 -(4 rows) +(3 rows) DROP TABLE calamity.test_pathman_cache_stats CASCADE; NOTICE: drop cascades to 11 other objects -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 0 partition parents cache | 0 - partition status cache | 2 -(4 rows) +(3 rows) /* Change this setting for code coverage */ SET pg_pathman.enable_bounds_cache = false; @@ -862,25 +862,25 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; -> Seq Scan on test_pathman_cache_stats_10 (11 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 0 partition parents cache | 10 - partition status cache | 3 -(4 rows) +(3 rows) DROP TABLE calamity.test_pathman_cache_stats CASCADE; NOTICE: drop cascades to 11 other objects -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 0 partition parents cache | 0 - partition status cache | 2 -(4 rows) +(3 rows) /* Restore this GUC */ SET pg_pathman.enable_bounds_cache = true; @@ -908,25 +908,25 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; -> Seq Scan on test_pathman_cache_stats_10 (11 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 10 partition parents cache | 10 - partition status cache | 3 -(4 rows) +(3 rows) DROP TABLE calamity.test_pathman_cache_stats CASCADE; NOTICE: drop cascades to 11 other objects -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 0 partition parents cache | 0 - partition status cache | 2 -(4 rows) +(3 rows) /* check that parents cache has been flushed after partition was dropped */ CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); @@ -952,14 +952,14 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; -> Seq Scan on test_pathman_cache_stats_10 (11 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 10 partition parents cache | 10 - partition status cache | 3 -(4 rows) +(3 rows) SELECT drop_range_partition('calamity.test_pathman_cache_stats_1'); drop_range_partition @@ -967,25 +967,25 @@ SELECT drop_range_partition('calamity.test_pathman_cache_stats_1'); calamity.test_pathman_cache_stats_1 (1 row) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 9 partition parents cache | 9 - partition status cache | 2 -(4 rows) +(3 rows) DROP TABLE calamity.test_pathman_cache_stats CASCADE; NOTICE: drop cascades to 10 other objects -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 0 partition parents cache | 0 - partition status cache | 2 -(4 rows) +(3 rows) DROP SCHEMA calamity CASCADE; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index b49d061c..6ad0df0e 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -383,9 +383,11 @@ CREATE EXTENSION pg_pathman; /* check that cache loading is lazy */ CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ DROP TABLE calamity.test_pathman_cache_stats CASCADE; -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ /* Change this setting for code coverage */ SET pg_pathman.enable_bounds_cache = false; @@ -394,9 +396,11 @@ SET pg_pathman.enable_bounds_cache = false; CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ DROP TABLE calamity.test_pathman_cache_stats CASCADE; -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ /* Restore this GUC */ SET pg_pathman.enable_bounds_cache = true; @@ -405,19 +409,24 @@ SET pg_pathman.enable_bounds_cache = true; CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ DROP TABLE calamity.test_pathman_cache_stats CASCADE; -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ /* check that parents cache has been flushed after partition was dropped */ CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ SELECT drop_range_partition('calamity.test_pathman_cache_stats_1'); -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ DROP TABLE calamity.test_pathman_cache_stats CASCADE; -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ DROP SCHEMA calamity CASCADE; DROP EXTENSION pg_pathman; From 2062ab9538b94f2f23e14cf2ba4d9cdac2b07601 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Mon, 28 Jun 2021 15:21:49 +0300 Subject: [PATCH 451/528] [PGPRO-5255] fix that ALTER TABLE IF EXISTS ... RENAME TO of not existed table generate ERROR instead of NOTICE --- expected/pathman_utility_stmt.out | 7 +++++++ sql/pathman_utility_stmt.sql | 6 ++++++ src/utility_stmt_hooking.c | 5 ++++- 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/expected/pathman_utility_stmt.out b/expected/pathman_utility_stmt.out index 4cc4d493..0001b2f0 100644 --- a/expected/pathman_utility_stmt.out +++ b/expected/pathman_utility_stmt.out @@ -370,4 +370,11 @@ SELECT create_hash_partitions('drop_index.test', 'val', 2); DROP INDEX CONCURRENTLY drop_index.test_0_val_idx; DROP SCHEMA drop_index CASCADE; NOTICE: drop cascades to 3 other objects +/* + * Test, that ALTER TABLE IF EXISTS ... RENAME TO of not existed table generate NOTICE instead of ERROR + */ +CREATE SCHEMA rename_nonexistent; +ALTER TABLE IF EXISTS rename_nonexistent.nonexistent_table RENAME TO other_table_name; +NOTICE: relation "nonexistent_table" does not exist, skipping +DROP SCHEMA rename_nonexistent CASCADE; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_utility_stmt.sql b/sql/pathman_utility_stmt.sql index 31232ce1..c5e940ce 100644 --- a/sql/pathman_utility_stmt.sql +++ b/sql/pathman_utility_stmt.sql @@ -250,6 +250,12 @@ DROP INDEX CONCURRENTLY drop_index.test_0_val_idx; DROP SCHEMA drop_index CASCADE; +/* + * Test, that ALTER TABLE IF EXISTS ... RENAME TO of not existed table generate NOTICE instead of ERROR + */ +CREATE SCHEMA rename_nonexistent; +ALTER TABLE IF EXISTS rename_nonexistent.nonexistent_table RENAME TO other_table_name; +DROP SCHEMA rename_nonexistent CASCADE; DROP EXTENSION pg_pathman; diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index c9ffbf14..8b160f64 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -175,7 +175,10 @@ is_pathman_related_table_rename(Node *parsetree, /* Fetch Oid of this relation */ relation_oid = RangeVarGetRelid(rename_stmt->relation, AccessShareLock, - false); + rename_stmt->missing_ok); + /* PGPRO-5255: check ALTER TABLE IF EXISTS of non existent table */ + if (rename_stmt->missing_ok && relation_oid == InvalidOid) + return false; /* Assume it's a parent */ if (has_pathman_relation_info(relation_oid)) From 363efa969a473680f54314df0af0313c9d9dda8b Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Mon, 28 Jun 2021 18:34:11 +0300 Subject: [PATCH 452/528] [PGPRO-5255] corrections based on the review --- expected/pathman_declarative.out | 4 ++ expected/pathman_declarative_1.out | 4 ++ expected/pathman_utility_stmt.out | 67 ++++++++++++++++++++++++++++-- sql/pathman_declarative.sql | 3 ++ sql/pathman_utility_stmt.sql | 49 ++++++++++++++++++++-- src/declarative.c | 6 ++- src/utility_stmt_hooking.c | 9 +++- 7 files changed, 131 insertions(+), 11 deletions(-) diff --git a/expected/pathman_declarative.out b/expected/pathman_declarative.out index 011a0f71..c13c0010 100644 --- a/expected/pathman_declarative.out +++ b/expected/pathman_declarative.out @@ -94,6 +94,10 @@ Check constraints: "pathman_r4_check" CHECK (dt >= '06-01-2015'::date AND dt < '01-01-2016'::date) Inherits: test.range_rel +ALTER TABLE IF EXISTS test.nonexistent_table ATTACH PARTITION baz DEFAULT; +NOTICE: relation "nonexistent_table" does not exist, skipping +ALTER TABLE IF EXISTS test.nonexistent_table DETACH PARTITION baz; +NOTICE: relation "nonexistent_table" does not exist, skipping DROP SCHEMA test CASCADE; NOTICE: drop cascades to 8 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_declarative_1.out b/expected/pathman_declarative_1.out index 8ef4e556..d720d335 100644 --- a/expected/pathman_declarative_1.out +++ b/expected/pathman_declarative_1.out @@ -94,6 +94,10 @@ Check constraints: "pathman_r4_check" CHECK (dt >= '06-01-2015'::date AND dt < '01-01-2016'::date) Inherits: test.range_rel +ALTER TABLE IF EXISTS test.nonexistent_table ATTACH PARTITION baz DEFAULT; +NOTICE: relation "nonexistent_table" does not exist, skipping +ALTER TABLE IF EXISTS test.nonexistent_table DETACH PARTITION baz; +NOTICE: relation "nonexistent_table" does not exist, skipping DROP SCHEMA test CASCADE; NOTICE: drop cascades to 8 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_utility_stmt.out b/expected/pathman_utility_stmt.out index 0001b2f0..6e137b37 100644 --- a/expected/pathman_utility_stmt.out +++ b/expected/pathman_utility_stmt.out @@ -371,10 +371,69 @@ DROP INDEX CONCURRENTLY drop_index.test_0_val_idx; DROP SCHEMA drop_index CASCADE; NOTICE: drop cascades to 3 other objects /* - * Test, that ALTER TABLE IF EXISTS ... RENAME TO of not existed table generate NOTICE instead of ERROR + * Checking that ALTER TABLE IF EXISTS with loaded (and created) pg_pathman extension works the same as in vanilla */ -CREATE SCHEMA rename_nonexistent; -ALTER TABLE IF EXISTS rename_nonexistent.nonexistent_table RENAME TO other_table_name; +CREATE SCHEMA test_nonexistance; +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table RENAME TO other_table_name; NOTICE: relation "nonexistent_table" does not exist, skipping -DROP SCHEMA rename_nonexistent CASCADE; +/* renaming existent tables already tested earlier (see rename.plain_test) */ +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table ADD COLUMN IF NOT EXISTS j INT4; +NOTICE: relation "nonexistent_table" does not exist, skipping +CREATE TABLE test_nonexistance.existent_table(i INT4); +ALTER TABLE IF EXISTS test_nonexistance.existent_table ADD COLUMN IF NOT EXISTS i INT4; +NOTICE: column "i" of relation "existent_table" already exists, skipping +ALTER TABLE IF EXISTS test_nonexistance.existent_table ADD COLUMN IF NOT EXISTS j INT4; +SELECT attname FROM pg_attribute WHERE attnum > 0 AND attrelid = 'test_nonexistance.existent_table'::REGCLASS; + attname +--------- + i + j +(2 rows) + +DROP TABLE test_nonexistance.existent_table; +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table DROP COLUMN IF EXISTS i; +NOTICE: relation "nonexistent_table" does not exist, skipping +CREATE TABLE test_nonexistance.existent_table(i INT4); +ALTER TABLE IF EXISTS test_nonexistance.existent_table DROP COLUMN IF EXISTS i; +ALTER TABLE IF EXISTS test_nonexistance.existent_table DROP COLUMN IF EXISTS nonexistent_column; +NOTICE: column "nonexistent_column" of relation "existent_table" does not exist, skipping +SELECT attname FROM pg_attribute WHERE attnum > 0 AND attrelid = 'test_nonexistance.existent_table'::REGCLASS; + attname +------------------------------ + ........pg.dropped.1........ +(1 row) + +DROP TABLE test_nonexistance.existent_table; +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table RENAME COLUMN i TO j; +NOTICE: relation "nonexistent_table" does not exist, skipping +CREATE TABLE test_nonexistance.existent_table(i INT4); +ALTER TABLE IF EXISTS test_nonexistance.existent_table RENAME COLUMN i TO j; +SELECT attname FROM pg_attribute WHERE attnum > 0 AND attrelid = 'test_nonexistance.existent_table'::REGCLASS; + attname +--------- + j +(1 row) + +DROP TABLE test_nonexistance.existent_table; +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table RENAME CONSTRAINT baz TO bar; +NOTICE: relation "nonexistent_table" does not exist, skipping +CREATE TABLE test_nonexistance.existent_table(i INT4 CONSTRAINT existent_table_i_check CHECK (i < 100)); +ALTER TABLE IF EXISTS test_nonexistance.existent_table RENAME CONSTRAINT existent_table_i_check TO existent_table_i_other_check; +DROP TABLE test_nonexistance.existent_table; +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table SET SCHEMA nonexistent_schema; +NOTICE: relation "nonexistent_table" does not exist, skipping +CREATE TABLE test_nonexistance.existent_table(i INT4); +ALTER TABLE IF EXISTS test_nonexistance.existent_table SET SCHEMA nonexistent_schema; +ERROR: schema "nonexistent_schema" does not exist +CREATE SCHEMA test_nonexistance2; +ALTER TABLE IF EXISTS test_nonexistance.existent_table SET SCHEMA test_nonexistance2; +DROP TABLE test_nonexistance2.existent_table; +DROP SCHEMA test_nonexistance2 CASCADE; +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table SET TABLESPACE nonexistent_tablespace; +NOTICE: relation "nonexistent_table" does not exist, skipping +CREATE TABLE test_nonexistance.existent_table(i INT4); +ALTER TABLE IF EXISTS test_nonexistance.existent_table SET TABLESPACE nonexistent_tablespace; +ERROR: tablespace "nonexistent_tablespace" does not exist +DROP TABLE test_nonexistance.existent_table; +DROP SCHEMA test_nonexistance CASCADE; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_declarative.sql b/sql/pathman_declarative.sql index 864e3af8..347627a7 100644 --- a/sql/pathman_declarative.sql +++ b/sql/pathman_declarative.sql @@ -39,6 +39,9 @@ CREATE TABLE test.r4 PARTITION OF test.range_rel FOR VALUES FROM ('2015-06-01') TO ('2016-01-01'); \d+ test.r4; +ALTER TABLE IF EXISTS test.nonexistent_table ATTACH PARTITION baz DEFAULT; +ALTER TABLE IF EXISTS test.nonexistent_table DETACH PARTITION baz; + DROP SCHEMA test CASCADE; DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman CASCADE; diff --git a/sql/pathman_utility_stmt.sql b/sql/pathman_utility_stmt.sql index c5e940ce..c0832f34 100644 --- a/sql/pathman_utility_stmt.sql +++ b/sql/pathman_utility_stmt.sql @@ -251,11 +251,52 @@ DROP INDEX CONCURRENTLY drop_index.test_0_val_idx; DROP SCHEMA drop_index CASCADE; /* - * Test, that ALTER TABLE IF EXISTS ... RENAME TO of not existed table generate NOTICE instead of ERROR + * Checking that ALTER TABLE IF EXISTS with loaded (and created) pg_pathman extension works the same as in vanilla */ -CREATE SCHEMA rename_nonexistent; -ALTER TABLE IF EXISTS rename_nonexistent.nonexistent_table RENAME TO other_table_name; -DROP SCHEMA rename_nonexistent CASCADE; +CREATE SCHEMA test_nonexistance; + +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table RENAME TO other_table_name; +/* renaming existent tables already tested earlier (see rename.plain_test) */ + +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table ADD COLUMN IF NOT EXISTS j INT4; +CREATE TABLE test_nonexistance.existent_table(i INT4); +ALTER TABLE IF EXISTS test_nonexistance.existent_table ADD COLUMN IF NOT EXISTS i INT4; +ALTER TABLE IF EXISTS test_nonexistance.existent_table ADD COLUMN IF NOT EXISTS j INT4; +SELECT attname FROM pg_attribute WHERE attnum > 0 AND attrelid = 'test_nonexistance.existent_table'::REGCLASS; +DROP TABLE test_nonexistance.existent_table; + +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table DROP COLUMN IF EXISTS i; +CREATE TABLE test_nonexistance.existent_table(i INT4); +ALTER TABLE IF EXISTS test_nonexistance.existent_table DROP COLUMN IF EXISTS i; +ALTER TABLE IF EXISTS test_nonexistance.existent_table DROP COLUMN IF EXISTS nonexistent_column; +SELECT attname FROM pg_attribute WHERE attnum > 0 AND attrelid = 'test_nonexistance.existent_table'::REGCLASS; +DROP TABLE test_nonexistance.existent_table; + +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table RENAME COLUMN i TO j; +CREATE TABLE test_nonexistance.existent_table(i INT4); +ALTER TABLE IF EXISTS test_nonexistance.existent_table RENAME COLUMN i TO j; +SELECT attname FROM pg_attribute WHERE attnum > 0 AND attrelid = 'test_nonexistance.existent_table'::REGCLASS; +DROP TABLE test_nonexistance.existent_table; + +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table RENAME CONSTRAINT baz TO bar; +CREATE TABLE test_nonexistance.existent_table(i INT4 CONSTRAINT existent_table_i_check CHECK (i < 100)); +ALTER TABLE IF EXISTS test_nonexistance.existent_table RENAME CONSTRAINT existent_table_i_check TO existent_table_i_other_check; +DROP TABLE test_nonexistance.existent_table; + +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table SET SCHEMA nonexistent_schema; +CREATE TABLE test_nonexistance.existent_table(i INT4); +ALTER TABLE IF EXISTS test_nonexistance.existent_table SET SCHEMA nonexistent_schema; +CREATE SCHEMA test_nonexistance2; +ALTER TABLE IF EXISTS test_nonexistance.existent_table SET SCHEMA test_nonexistance2; +DROP TABLE test_nonexistance2.existent_table; +DROP SCHEMA test_nonexistance2 CASCADE; + +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table SET TABLESPACE nonexistent_tablespace; +CREATE TABLE test_nonexistance.existent_table(i INT4); +ALTER TABLE IF EXISTS test_nonexistance.existent_table SET TABLESPACE nonexistent_tablespace; +DROP TABLE test_nonexistance.existent_table; + +DROP SCHEMA test_nonexistance CASCADE; DROP EXTENSION pg_pathman; diff --git a/src/declarative.c b/src/declarative.c index ca4fe165..367df752 100644 --- a/src/declarative.c +++ b/src/declarative.c @@ -75,7 +75,11 @@ is_pathman_related_partitioning_cmd(Node *parsetree, Oid *parent_relid) AlterTableStmt *stmt = (AlterTableStmt *) parsetree; int cnt = 0; - *parent_relid = RangeVarGetRelid(stmt->relation, NoLock, false); + *parent_relid = RangeVarGetRelid(stmt->relation, NoLock, stmt->missing_ok); + + if (stmt->missing_ok && *parent_relid == InvalidOid) + return false; + if ((prel = get_pathman_relation_info(*parent_relid)) == NULL) return false; diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 8b160f64..1949d970 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -176,7 +176,8 @@ is_pathman_related_table_rename(Node *parsetree, relation_oid = RangeVarGetRelid(rename_stmt->relation, AccessShareLock, rename_stmt->missing_ok); - /* PGPRO-5255: check ALTER TABLE IF EXISTS of non existent table */ + + /* Check ALTER TABLE ... IF EXISTS of nonexistent table */ if (rename_stmt->missing_ok && relation_oid == InvalidOid) return false; @@ -235,7 +236,11 @@ is_pathman_related_alter_column_type(Node *parsetree, /* Assume it's a parent, fetch its Oid */ parent_relid = RangeVarGetRelid(alter_table_stmt->relation, AccessShareLock, - false); + alter_table_stmt->missing_ok); + + /* Check ALTER TABLE ... IF EXISTS of nonexistent table */ + if (alter_table_stmt->missing_ok && parent_relid == InvalidOid) + return false; /* Is parent partitioned? */ if ((prel = get_pathman_relation_info(parent_relid)) != NULL) From 98b1f181b442e38d2e28d463c70c66511e7b8736 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Tue, 29 Jun 2021 05:09:58 +0300 Subject: [PATCH 453/528] fix travis-ci build, remove deprecated options from yaml, move to travis-ci.com (from .org) --- .travis.yml | 6 ++++-- Dockerfile.tmpl | 4 ++-- README.md | 2 +- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/.travis.yml b/.travis.yml index b020780b..7f22cf8e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,4 +1,6 @@ -sudo: required +os: linux + +dist: focal language: c @@ -31,7 +33,7 @@ env: - PG_VERSION=9.5 LEVEL=hardcore - PG_VERSION=9.5 -matrix: +jobs: allow_failures: - env: PG_VERSION=10 LEVEL=nightmare - env: PG_VERSION=9.6 LEVEL=nightmare diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index 85b159cf..e1e3b0e6 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -29,8 +29,8 @@ ADD . /pg/testdir # Grant privileges RUN chown -R postgres:postgres ${PGDATA} && \ chown -R postgres:postgres /pg/testdir && \ - chmod a+rwx /usr/local/lib/postgresql && \ - chmod a+rwx /usr/local/share/postgresql/extension + chmod a+rwx /usr/local/share/postgresql/extension && \ + find /usr/local/lib/postgresql -type d -print0 | xargs -0 chmod a+rwx COPY run_tests.sh /run.sh RUN chmod 755 /run.sh diff --git a/README.md b/README.md index 94133b32..d4b8e3bb 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -[![Build Status](https://travis-ci.org/postgrespro/pg_pathman.svg?branch=master)](https://travis-ci.org/postgrespro/pg_pathman) +[![Build Status](https://travis-ci.com/postgrespro/pg_pathman.svg?branch=master)](https://travis-ci.com/postgrespro/pg_pathman) [![PGXN version](https://badge.fury.io/pg/pg_pathman.svg)](https://badge.fury.io/pg/pg_pathman) [![codecov](https://codecov.io/gh/postgrespro/pg_pathman/branch/master/graph/badge.svg)](https://codecov.io/gh/postgrespro/pg_pathman) [![GitHub license](https://img.shields.io/badge/license-PostgreSQL-blue.svg)](https://raw.githubusercontent.com/postgrespro/pg_pathman/master/LICENSE) From 2e174bad0beb4835d2ab18675e32f3b81a7d7c4a Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Tue, 29 Jun 2021 14:54:14 +0300 Subject: [PATCH 454/528] [PGPRO-5255] fix tests for postgres 9.5 and 10 --- expected/pathman_declarative.out | 3 ++- expected/pathman_declarative_1.out | 3 ++- expected/pathman_utility_stmt.out | 6 ++---- sql/pathman_declarative.sql | 3 ++- sql/pathman_utility_stmt.sql | 5 ++--- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/expected/pathman_declarative.out b/expected/pathman_declarative.out index c13c0010..01f924ae 100644 --- a/expected/pathman_declarative.out +++ b/expected/pathman_declarative.out @@ -94,7 +94,8 @@ Check constraints: "pathman_r4_check" CHECK (dt >= '06-01-2015'::date AND dt < '01-01-2016'::date) Inherits: test.range_rel -ALTER TABLE IF EXISTS test.nonexistent_table ATTACH PARTITION baz DEFAULT; +/* Note: PG-10 doesn't support ATTACH PARTITION ... DEFAULT */ +ALTER TABLE IF EXISTS test.nonexistent_table ATTACH PARTITION baz FOR VALUES IN (42); NOTICE: relation "nonexistent_table" does not exist, skipping ALTER TABLE IF EXISTS test.nonexistent_table DETACH PARTITION baz; NOTICE: relation "nonexistent_table" does not exist, skipping diff --git a/expected/pathman_declarative_1.out b/expected/pathman_declarative_1.out index d720d335..9870a3e7 100644 --- a/expected/pathman_declarative_1.out +++ b/expected/pathman_declarative_1.out @@ -94,7 +94,8 @@ Check constraints: "pathman_r4_check" CHECK (dt >= '06-01-2015'::date AND dt < '01-01-2016'::date) Inherits: test.range_rel -ALTER TABLE IF EXISTS test.nonexistent_table ATTACH PARTITION baz DEFAULT; +/* Note: PG-10 doesn't support ATTACH PARTITION ... DEFAULT */ +ALTER TABLE IF EXISTS test.nonexistent_table ATTACH PARTITION baz FOR VALUES IN (42); NOTICE: relation "nonexistent_table" does not exist, skipping ALTER TABLE IF EXISTS test.nonexistent_table DETACH PARTITION baz; NOTICE: relation "nonexistent_table" does not exist, skipping diff --git a/expected/pathman_utility_stmt.out b/expected/pathman_utility_stmt.out index 6e137b37..7e59fa23 100644 --- a/expected/pathman_utility_stmt.out +++ b/expected/pathman_utility_stmt.out @@ -377,12 +377,10 @@ CREATE SCHEMA test_nonexistance; ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table RENAME TO other_table_name; NOTICE: relation "nonexistent_table" does not exist, skipping /* renaming existent tables already tested earlier (see rename.plain_test) */ -ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table ADD COLUMN IF NOT EXISTS j INT4; +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table ADD COLUMN j INT4; NOTICE: relation "nonexistent_table" does not exist, skipping CREATE TABLE test_nonexistance.existent_table(i INT4); -ALTER TABLE IF EXISTS test_nonexistance.existent_table ADD COLUMN IF NOT EXISTS i INT4; -NOTICE: column "i" of relation "existent_table" already exists, skipping -ALTER TABLE IF EXISTS test_nonexistance.existent_table ADD COLUMN IF NOT EXISTS j INT4; +ALTER TABLE IF EXISTS test_nonexistance.existent_table ADD COLUMN j INT4; SELECT attname FROM pg_attribute WHERE attnum > 0 AND attrelid = 'test_nonexistance.existent_table'::REGCLASS; attname --------- diff --git a/sql/pathman_declarative.sql b/sql/pathman_declarative.sql index 347627a7..d89ce3ed 100644 --- a/sql/pathman_declarative.sql +++ b/sql/pathman_declarative.sql @@ -39,7 +39,8 @@ CREATE TABLE test.r4 PARTITION OF test.range_rel FOR VALUES FROM ('2015-06-01') TO ('2016-01-01'); \d+ test.r4; -ALTER TABLE IF EXISTS test.nonexistent_table ATTACH PARTITION baz DEFAULT; +/* Note: PG-10 doesn't support ATTACH PARTITION ... DEFAULT */ +ALTER TABLE IF EXISTS test.nonexistent_table ATTACH PARTITION baz FOR VALUES IN (42); ALTER TABLE IF EXISTS test.nonexistent_table DETACH PARTITION baz; DROP SCHEMA test CASCADE; diff --git a/sql/pathman_utility_stmt.sql b/sql/pathman_utility_stmt.sql index c0832f34..3b99a2f3 100644 --- a/sql/pathman_utility_stmt.sql +++ b/sql/pathman_utility_stmt.sql @@ -258,10 +258,9 @@ CREATE SCHEMA test_nonexistance; ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table RENAME TO other_table_name; /* renaming existent tables already tested earlier (see rename.plain_test) */ -ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table ADD COLUMN IF NOT EXISTS j INT4; +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table ADD COLUMN j INT4; CREATE TABLE test_nonexistance.existent_table(i INT4); -ALTER TABLE IF EXISTS test_nonexistance.existent_table ADD COLUMN IF NOT EXISTS i INT4; -ALTER TABLE IF EXISTS test_nonexistance.existent_table ADD COLUMN IF NOT EXISTS j INT4; +ALTER TABLE IF EXISTS test_nonexistance.existent_table ADD COLUMN j INT4; SELECT attname FROM pg_attribute WHERE attnum > 0 AND attrelid = 'test_nonexistance.existent_table'::REGCLASS; DROP TABLE test_nonexistance.existent_table; From 4b0252aaa1ebe1fc035ca7a48f606b2af896bb89 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Mon, 5 Jul 2021 19:26:27 +0300 Subject: [PATCH 455/528] [PGPRO-5306] more correct checking of b-tree search strategies --- src/pg_pathman.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index e3a46abd..f06e794e 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -1159,8 +1159,14 @@ handle_array(ArrayType *array, bool elem_byval; char elem_align; - /* Check if we can work with this strategy */ - if (strategy == 0) + /* + * Check if we can work with this strategy + * We can work only with BTLessStrategyNumber, BTLessEqualStrategyNumber, + * BTEqualStrategyNumber, BTGreaterEqualStrategyNumber and BTGreaterStrategyNumber. + * If new search strategies appear in the future, then access optimizations from + * this function will not work, and the default behavior (handle_array_return:) will work. + */ + if (strategy == InvalidStrategy || strategy > BTGreaterStrategyNumber) goto handle_array_return; /* Get element's properties */ @@ -1177,8 +1183,12 @@ handle_array(ArrayType *array, List *ranges; int i; - /* This is only for paranoia's sake */ - Assert(BTMaxStrategyNumber == 5 && BTEqualStrategyNumber == 3); + /* This is only for paranoia's sake (checking correctness of following take_min calculation) */ + Assert(BTEqualStrategyNumber == 3 + && BTLessStrategyNumber < BTEqualStrategyNumber + && BTLessEqualStrategyNumber < BTEqualStrategyNumber + && BTGreaterEqualStrategyNumber > BTEqualStrategyNumber + && BTGreaterStrategyNumber > BTEqualStrategyNumber); /* Optimizations for <, <=, >=, > */ if (strategy != BTEqualStrategyNumber) From 4870f5cc5d2d40d7019d8b47e709241c8ef28252 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Fri, 22 Oct 2021 16:42:41 +0300 Subject: [PATCH 456/528] Changes for gcc-11 compilation --- src/pathman_workers.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/pathman_workers.c b/src/pathman_workers.c index a75e912b..1bfda8f1 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -57,8 +57,8 @@ extern PGDLLEXPORT void bgw_main_concurrent_part(Datum main_arg); static void handle_sigterm(SIGNAL_ARGS); static void bg_worker_load_config(const char *bgw_name); -static bool start_bgworker(const char bgworker_name[BGW_MAXLEN], - const char bgworker_proc[BGW_MAXLEN], +static bool start_bgworker(const char *bgworker_name, + const char *bgworker_proc, Datum bgw_arg, bool wait_for_shutdown); @@ -166,8 +166,8 @@ bg_worker_load_config(const char *bgw_name) * Common function to start background worker. */ static bool -start_bgworker(const char bgworker_name[BGW_MAXLEN], - const char bgworker_proc[BGW_MAXLEN], +start_bgworker(const char *bgworker_name, + const char *bgworker_proc, Datum bgw_arg, bool wait_for_shutdown) { #define HandleError(condition, new_state) \ From cffbe81c227ad62111a19bd82f2b84d405a2c8e6 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Wed, 27 Oct 2021 14:43:31 +0300 Subject: [PATCH 457/528] Change for online upgrade --- src/pathman_workers.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/pathman_workers.c b/src/pathman_workers.c index 1bfda8f1..7b37d7ba 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -195,6 +195,9 @@ start_bgworker(const char *bgworker_name, snprintf(worker.bgw_library_name, BGW_MAXLEN, "pg_pathman"); worker.bgw_flags = BGWORKER_SHMEM_ACCESS | +#if defined(PGPRO_EE) && PG_VERSION_NUM == 130000 /* FIXME: need to replace "==" to ">=" in future */ + BGWORKER_CLASS_PERSISTENT | +#endif BGWORKER_BACKEND_DATABASE_CONNECTION; worker.bgw_start_time = BgWorkerStart_RecoveryFinished; worker.bgw_restart_time = BGW_NEVER_RESTART; From ca078423cbf8299ad71b1ca98ff7cc6e5c74222f Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Wed, 27 Oct 2021 17:43:12 +0300 Subject: [PATCH 458/528] Fixed PG_VERSION_NUM condition --- src/pathman_workers.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pathman_workers.c b/src/pathman_workers.c index 7b37d7ba..38d61622 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -195,7 +195,7 @@ start_bgworker(const char *bgworker_name, snprintf(worker.bgw_library_name, BGW_MAXLEN, "pg_pathman"); worker.bgw_flags = BGWORKER_SHMEM_ACCESS | -#if defined(PGPRO_EE) && PG_VERSION_NUM == 130000 /* FIXME: need to replace "==" to ">=" in future */ +#if defined(PGPRO_EE) && PG_VERSION_NUM >= 130000 && PG_VERSION_NUM < 140000 /* FIXME: need to remove last condition in future */ BGWORKER_CLASS_PERSISTENT | #endif BGWORKER_BACKEND_DATABASE_CONNECTION; From 7df6cdfb582c5f752304c7cb49e0e54ee51af055 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Thu, 21 Oct 2021 09:20:00 +0300 Subject: [PATCH 459/528] PostgreSQL v14 compatibility Porting to v14 has difficulties because the internal API has changed seriously. So this porting requires some changes in the source PostgreSQL codes and can not be applied to vanilla without patch. --- expected/pathman_calamity_3.out | 1068 ++++++++++++++++++++ expected/pathman_cte_2.out | 252 +++++ expected/pathman_join_clause_2.out | 155 +++ expected/pathman_subpartitions.out | 3 +- expected/pathman_subpartitions_1.out | 3 +- expected/pathman_views_3.out | 189 ++++ patches/REL_14_STABLE-pg_pathman-core.diff | 533 ++++++++++ sql/pathman_subpartitions.sql | 3 +- src/compat/pg_compat.c | 8 +- src/hooks.c | 41 +- src/include/compat/pg_compat.h | 89 +- src/include/hooks.h | 17 +- src/include/partition_router.h | 7 +- src/nodes_common.c | 36 + src/partition_filter.c | 99 +- src/partition_overseer.c | 42 +- src/partition_router.c | 104 +- src/planner_tree_modification.c | 53 + src/relation_info.c | 4 +- src/utility_stmt_hooking.c | 52 +- 20 files changed, 2706 insertions(+), 52 deletions(-) create mode 100644 expected/pathman_calamity_3.out create mode 100644 expected/pathman_cte_2.out create mode 100644 expected/pathman_join_clause_2.out create mode 100644 expected/pathman_views_3.out create mode 100644 patches/REL_14_STABLE-pg_pathman-core.diff diff --git a/expected/pathman_calamity_3.out b/expected/pathman_calamity_3.out new file mode 100644 index 00000000..9aec9765 --- /dev/null +++ b/expected/pathman_calamity_3.out @@ -0,0 +1,1068 @@ +/* + * pathman_calamity.out and pathman_calamity_1.out differ only in that since + * 12 we get + * ERROR: invalid input syntax for type integer: "abc" + * instead of + * ERROR: invalid input syntax for integer: "15.6" + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA calamity; +/* call for coverage test */ +set client_min_messages = ERROR; +SELECT debug_capture(); + debug_capture +--------------- + +(1 row) + +SELECT pathman_version(); + pathman_version +----------------- + 1.5.12 +(1 row) + +set client_min_messages = NOTICE; +/* create table to be partitioned */ +CREATE TABLE calamity.part_test(val serial); +/* test pg_pathman's cache */ +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 + drop_partitions +----------------- + 3 +(1 row) + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 + drop_partitions +----------------- + 3 +(1 row) + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT append_range_partition('calamity.part_test'); + append_range_partition +------------------------ + calamity.part_test_4 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 +NOTICE: 0 rows copied from calamity.part_test_4 + drop_partitions +----------------- + 4 +(1 row) + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT append_range_partition('calamity.part_test'); + append_range_partition +------------------------ + calamity.part_test_4 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 +NOTICE: 0 rows copied from calamity.part_test_4 + drop_partitions +----------------- + 4 +(1 row) + +SELECT count(*) FROM calamity.part_test; + count +------- + 30 +(1 row) + +DELETE FROM calamity.part_test; +/* test function create_single_range_partition() */ +SELECT create_single_range_partition(NULL, NULL::INT4, NULL); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT create_single_range_partition('pg_class', NULL::INT4, NULL); /* not ok */ +ERROR: table "pg_class" is not partitioned by RANGE +SELECT add_to_pathman_config('calamity.part_test', 'val'); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT create_single_range_partition('calamity.part_test', NULL::INT4, NULL); /* not ok */ +ERROR: table "part_test" is not partitioned by RANGE +DELETE FROM pathman_config WHERE partrel = 'calamity.part_test'::REGCLASS; +/* test function create_range_partitions_internal() */ +SELECT create_range_partitions_internal(NULL, '{}'::INT[], NULL, NULL); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT create_range_partitions_internal('calamity.part_test', + NULL::INT[], NULL, NULL); /* not ok */ +ERROR: 'bounds' should not be NULL +SELECT create_range_partitions_internal('calamity.part_test', '{1}'::INT[], + '{part_1}'::TEXT[], NULL); /* not ok */ +ERROR: wrong length of 'partition_names' array +SELECT create_range_partitions_internal('calamity.part_test', '{1}'::INT[], + NULL, '{tblspc_1}'::TEXT[]); /* not ok */ +ERROR: wrong length of 'tablespaces' array +SELECT create_range_partitions_internal('calamity.part_test', + '{1, NULL}'::INT[], NULL, NULL); /* not ok */ +ERROR: only first bound can be NULL +SELECT create_range_partitions_internal('calamity.part_test', + '{2, 1}'::INT[], NULL, NULL); /* not ok */ +ERROR: 'bounds' array must be ascending +/* test function create_hash_partitions() */ +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY[]::TEXT[]); /* not ok */ +ERROR: array should not be empty +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY[ 'p1', NULL ]::TEXT[]); /* not ok */ +ERROR: array should not contain NULLs +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY[ ['p1'], ['p2'] ]::TEXT[]); /* not ok */ +ERROR: array should contain only 1 dimension +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY['calamity.p1']::TEXT[]); /* not ok */ +ERROR: size of 'partition_names' must be equal to 'partitions_count' +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + tablespaces := ARRAY['abcd']::TEXT[]); /* not ok */ +ERROR: size of 'tablespaces' must be equal to 'partitions_count' +/* test case when naming sequence does not exist */ +CREATE TABLE calamity.no_naming_seq(val INT4 NOT NULL); +SELECT add_to_pathman_config('calamity.no_naming_seq', 'val', '100'); + add_to_pathman_config +----------------------- + t +(1 row) + +select add_range_partition(' calamity.no_naming_seq', 10, 20); +ERROR: auto naming sequence "no_naming_seq_seq" does not exist +DROP TABLE calamity.no_naming_seq CASCADE; +/* test (-inf, +inf) partition creation */ +CREATE TABLE calamity.double_inf(val INT4 NOT NULL); +SELECT add_to_pathman_config('calamity.double_inf', 'val', '10'); + add_to_pathman_config +----------------------- + t +(1 row) + +select add_range_partition('calamity.double_inf', NULL::INT4, NULL::INT4, + partition_name := 'double_inf_part'); +ERROR: cannot create partition with range (-inf, +inf) +DROP TABLE calamity.double_inf CASCADE; +/* test stub 'enable_parent' value for PATHMAN_CONFIG_PARAMS */ +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +DELETE FROM pathman_config_params WHERE partrel = 'calamity.part_test'::regclass; +SELECT append_range_partition('calamity.part_test'); + append_range_partition +------------------------ + calamity.part_test_4 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_test; + QUERY PLAN +------------------------------- + Append + -> Seq Scan on part_test_1 + -> Seq Scan on part_test_2 + -> Seq Scan on part_test_3 + -> Seq Scan on part_test_4 +(5 rows) + +SELECT drop_partitions('calamity.part_test', true); + drop_partitions +----------------- + 4 +(1 row) + +DELETE FROM calamity.part_test; +/* check function validate_interval_value() */ +SELECT set_interval('pg_catalog.pg_class', 100); /* not ok */ +ERROR: table "pg_class" is not partitioned by RANGE +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT set_interval('calamity.part_test', 100); /* ok */ + set_interval +-------------- + +(1 row) + +SELECT set_interval('calamity.part_test', 15.6); /* not ok */ +ERROR: invalid input syntax for type integer: "15.6" +SELECT set_interval('calamity.part_test', 'abc'::text); /* not ok */ +ERROR: invalid input syntax for type integer: "abc" +SELECT drop_partitions('calamity.part_test', true); + drop_partitions +----------------- + 3 +(1 row) + +DELETE FROM calamity.part_test; +/* check function build_hash_condition() */ +SELECT build_hash_condition('int4', 'val', 10, 1); + build_hash_condition +------------------------------------------------- + public.get_hash_part_idx(hashint4(val), 10) = 1 +(1 row) + +SELECT build_hash_condition('text', 'val', 10, 1); + build_hash_condition +------------------------------------------------- + public.get_hash_part_idx(hashtext(val), 10) = 1 +(1 row) + +SELECT build_hash_condition('int4', 'val', 1, 1); +ERROR: 'partition_index' must be lower than 'partitions_count' +SELECT build_hash_condition('int4', 'val', 10, 20); +ERROR: 'partition_index' must be lower than 'partitions_count' +SELECT build_hash_condition('text', 'val', 10, NULL) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT build_hash_condition('calamity.part_test', 'val', 10, 1); + build_hash_condition +---------------------------------------------------- + public.get_hash_part_idx(hash_record(val), 10) = 1 +(1 row) + +/* check function build_range_condition() */ +SELECT build_range_condition(NULL, 'val', 10, 20); /* not ok */ +ERROR: 'partition_relid' should not be NULL +SELECT build_range_condition('calamity.part_test', NULL, 10, 20); /* not ok */ +ERROR: 'expression' should not be NULL +SELECT build_range_condition('calamity.part_test', 'val', 10, 20); /* OK */ + build_range_condition +------------------------------ + ((val >= 10) AND (val < 20)) +(1 row) + +SELECT build_range_condition('calamity.part_test', 'val', 10, NULL); /* OK */ + build_range_condition +----------------------- + ((val >= 10)) +(1 row) + +SELECT build_range_condition('calamity.part_test', 'val', NULL, 10); /* OK */ + build_range_condition +----------------------- + ((val < 10)) +(1 row) + +/* check function validate_interval_value() */ +SELECT validate_interval_value(1::REGCLASS, 'expr', 2, '1 mon'); /* not ok */ +ERROR: relation "1" does not exist +SELECT validate_interval_value(NULL, 'expr', 2, '1 mon'); /* not ok */ +ERROR: 'partrel' should not be NULL +SELECT validate_interval_value('pg_class', NULL, 2, '1 mon'); /* not ok */ +ERROR: 'expression' should not be NULL +SELECT validate_interval_value('pg_class', 'relname', NULL, '1 mon'); /* not ok */ +ERROR: 'parttype' should not be NULL +SELECT validate_interval_value('pg_class', 'relname', 1, 'HASH'); /* not ok */ +ERROR: interval should be NULL for HASH partitioned table +SELECT validate_interval_value('pg_class', 'expr', 2, '1 mon'); /* not ok */ +ERROR: failed to analyze partitioning expression "expr" +SELECT validate_interval_value('pg_class', 'expr', 2, NULL); /* not ok */ +ERROR: failed to analyze partitioning expression "expr" +SELECT validate_interval_value('pg_class', 'EXPR', 1, 'HASH'); /* not ok */ +ERROR: failed to analyze partitioning expression "EXPR" +/* check function validate_relname() */ +SELECT validate_relname('calamity.part_test'); + validate_relname +------------------ + +(1 row) + +SELECT validate_relname(1::REGCLASS); +ERROR: relation "1" does not exist +SELECT validate_relname(NULL); +ERROR: relation should not be NULL +/* check function validate_expression() */ +SELECT validate_expression(1::regclass, NULL); /* not ok */ +ERROR: relation "1" does not exist +SELECT validate_expression(NULL::regclass, NULL); /* not ok */ +ERROR: 'relid' should not be NULL +SELECT validate_expression('calamity.part_test', NULL); /* not ok */ +ERROR: 'expression' should not be NULL +SELECT validate_expression('calamity.part_test', 'valval'); /* not ok */ +ERROR: failed to analyze partitioning expression "valval" +SELECT validate_expression('calamity.part_test', 'random()'); /* not ok */ +ERROR: failed to analyze partitioning expression "random()" +SELECT validate_expression('calamity.part_test', 'val'); /* OK */ + validate_expression +--------------------- + +(1 row) + +SELECT validate_expression('calamity.part_test', 'VaL'); /* OK */ + validate_expression +--------------------- + +(1 row) + +/* check function get_number_of_partitions() */ +SELECT get_number_of_partitions('calamity.part_test'); + get_number_of_partitions +-------------------------- + 0 +(1 row) + +SELECT get_number_of_partitions(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function get_parent_of_partition() */ +SELECT get_parent_of_partition('calamity.part_test'); +ERROR: "part_test" is not a partition +SELECT get_parent_of_partition(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function get_base_type() */ +CREATE DOMAIN calamity.test_domain AS INT4; +SELECT get_base_type('int4'::regtype); + get_base_type +--------------- + integer +(1 row) + +SELECT get_base_type('calamity.test_domain'::regtype); + get_base_type +--------------- + integer +(1 row) + +SELECT get_base_type(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function get_partition_key_type() */ +SELECT get_partition_key_type('calamity.part_test'); +ERROR: relation "part_test" has no partitions +SELECT get_partition_key_type(0::regclass); +ERROR: relation "0" has no partitions +SELECT get_partition_key_type(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function build_check_constraint_name() */ +SELECT build_check_constraint_name('calamity.part_test'); /* OK */ + build_check_constraint_name +----------------------------- + pathman_part_test_check +(1 row) + +SELECT build_check_constraint_name(0::REGCLASS); /* not ok */ +ERROR: relation "0" does not exist +SELECT build_check_constraint_name(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function build_sequence_name() */ +SELECT build_sequence_name('calamity.part_test'); /* OK */ + build_sequence_name +------------------------ + calamity.part_test_seq +(1 row) + +SELECT build_sequence_name(1::REGCLASS); /* not ok */ +ERROR: relation "1" does not exist +SELECT build_sequence_name(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function partition_table_concurrently() */ +SELECT partition_table_concurrently(1::REGCLASS); /* not ok */ +ERROR: relation "1" has no partitions +SELECT partition_table_concurrently('pg_class', 0); /* not ok */ +ERROR: 'batch_size' should not be less than 1 or greater than 10000 +SELECT partition_table_concurrently('pg_class', 1, 1E-5); /* not ok */ +ERROR: 'sleep_time' should not be less than 0.5 +SELECT partition_table_concurrently('pg_class'); /* not ok */ +ERROR: relation "pg_class" has no partitions +/* check function stop_concurrent_part_task() */ +SELECT stop_concurrent_part_task(1::REGCLASS); /* not ok */ +ERROR: cannot find worker for relation "1" +/* check function drop_range_partition_expand_next() */ +SELECT drop_range_partition_expand_next('pg_class'); /* not ok */ +ERROR: relation "pg_class" is not a partition +SELECT drop_range_partition_expand_next(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function generate_range_bounds() */ +SELECT generate_range_bounds(NULL, 100, 10) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT generate_range_bounds(0, NULL::INT4, 10) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT generate_range_bounds(0, 100, NULL) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT generate_range_bounds(0, 100, 0); /* not ok */ +ERROR: 'p_count' must be greater than zero +SELECT generate_range_bounds('a'::TEXT, 'test'::TEXT, 10); /* not ok */ +ERROR: cannot find operator +(text, text) +SELECT generate_range_bounds('a'::TEXT, '1 mon'::INTERVAL, 10); /* not ok */ +ERROR: cannot find operator +(text, interval) +SELECT generate_range_bounds(0::NUMERIC, 1::NUMERIC, 10); /* OK */ + generate_range_bounds +-------------------------- + {0,1,2,3,4,5,6,7,8,9,10} +(1 row) + +SELECT generate_range_bounds('1-jan-2017'::DATE, + '1 day'::INTERVAL, + 4); /* OK */ + generate_range_bounds +---------------------------------------------------------- + {01-01-2017,01-02-2017,01-03-2017,01-04-2017,01-05-2017} +(1 row) + +SELECT check_range_available(NULL, NULL::INT4, NULL); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT check_range_available('pg_class', 1, 10); /* OK (not partitioned) */ +WARNING: table "pg_class" is not partitioned + check_range_available +----------------------- + +(1 row) + +/* check invoke_on_partition_created_callback() */ +CREATE FUNCTION calamity.dummy_cb(arg jsonb) RETURNS void AS $$ + begin + raise warning 'arg: %', arg::text; + end +$$ LANGUAGE plpgsql; +/* Invalid args */ +SELECT invoke_on_partition_created_callback(NULL, 'calamity.part_test', 1); +ERROR: 'parent_relid' should not be NULL +SELECT invoke_on_partition_created_callback('calamity.part_test', NULL, 1); +ERROR: 'partition_relid' should not be NULL +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 0); + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 1); +ERROR: callback function 1 does not exist +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', NULL); + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +/* HASH */ +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure); +WARNING: arg: {"parent": null, "parttype": "1", "partition": null, "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +/* RANGE */ +SELECT invoke_on_partition_created_callback('calamity.part_test'::regclass, 'pg_class'::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL::int, NULL); +WARNING: arg: {"parent": "part_test", "parttype": "2", "partition": "pg_class", "range_max": null, "range_min": null, "parent_schema": "calamity", "partition_schema": "pg_catalog"} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL::int, NULL); +WARNING: arg: {"parent": null, "parttype": "2", "partition": null, "range_max": null, "range_min": null, "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, 1, NULL); +WARNING: arg: {"parent": null, "parttype": "2", "partition": null, "range_max": null, "range_min": "1", "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL, 1); +WARNING: arg: {"parent": null, "parttype": "2", "partition": null, "range_max": "1", "range_min": null, "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +DROP FUNCTION calamity.dummy_cb(arg jsonb); +/* check function add_to_pathman_config() -- PHASE #1 */ +SELECT add_to_pathman_config(NULL, 'val'); /* no table */ +ERROR: 'parent_relid' should not be NULL +SELECT add_to_pathman_config(0::REGCLASS, 'val'); /* no table (oid) */ +ERROR: relation "0" does not exist +SELECT add_to_pathman_config('calamity.part_test', NULL); /* no expr */ +ERROR: 'expression' should not be NULL +SELECT add_to_pathman_config('calamity.part_test', 'V_A_L'); /* wrong expr */ +ERROR: failed to analyze partitioning expression "V_A_L" +SELECT add_to_pathman_config('calamity.part_test', 'val'); /* OK */ + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT disable_pathman_for('calamity.part_test'); + disable_pathman_for +--------------------- + +(1 row) + +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); /* OK */ + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT disable_pathman_for('calamity.part_test'); + disable_pathman_for +--------------------- + +(1 row) + +/* check function add_to_pathman_config() -- PHASE #2 */ +CREATE TABLE calamity.part_ok(val serial); +INSERT INTO calamity.part_ok SELECT generate_series(1, 2); +SELECT create_hash_partitions('calamity.part_ok', 'val', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +CREATE TABLE calamity.wrong_partition (LIKE calamity.part_test) INHERITS (calamity.part_test); /* wrong partition w\o constraints */ +NOTICE: merging column "val" with inherited definition +SELECT add_to_pathman_config('calamity.part_test', 'val'); +ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +----------------------------- + Append + -> Seq Scan on part_ok_0 + -> Seq Scan on part_ok_1 + -> Seq Scan on part_ok_2 + -> Seq Scan on part_ok_3 +(5 rows) + +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +----------------------------- + Append + -> Seq Scan on part_ok_0 + -> Seq Scan on part_ok_1 + -> Seq Scan on part_ok_2 + -> Seq Scan on part_ok_3 +(5 rows) + +ALTER TABLE calamity.wrong_partition +ADD CONSTRAINT pathman_wrong_partition_check +CHECK (val = 1 OR val = 2); /* wrong constraint */ +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +ERROR: wrong constraint format for RANGE partition "wrong_partition" +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +----------------------------- + Append + -> Seq Scan on part_ok_0 + -> Seq Scan on part_ok_1 + -> Seq Scan on part_ok_2 + -> Seq Scan on part_ok_3 +(5 rows) + +ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_check; +ALTER TABLE calamity.wrong_partition +ADD CONSTRAINT pathman_wrong_partition_check +CHECK (val >= 10 AND val = 2); /* wrong constraint */ +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +ERROR: wrong constraint format for RANGE partition "wrong_partition" +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +----------------------------- + Append + -> Seq Scan on part_ok_0 + -> Seq Scan on part_ok_1 + -> Seq Scan on part_ok_2 + -> Seq Scan on part_ok_3 +(5 rows) + +ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_check; +/* check GUC variable */ +SHOW pg_pathman.enable; + pg_pathman.enable +------------------- + on +(1 row) + +/* check function create_hash_partitions_internal() (called for the 2nd time) */ +CREATE TABLE calamity.hash_two_times(val serial); +SELECT create_hash_partitions_internal('calamity.hash_two_times', 'val', 2); +ERROR: table "hash_two_times" is not partitioned +SELECT create_hash_partitions('calamity.hash_two_times', 'val', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +SELECT create_hash_partitions_internal('calamity.hash_two_times', 'val', 2); +ERROR: cannot add new HASH partitions +/* check function disable_pathman_for() */ +CREATE TABLE calamity.to_be_disabled(val INT NOT NULL); +SELECT create_hash_partitions('calamity.to_be_disabled', 'val', 3); /* add row to main config */ + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT set_enable_parent('calamity.to_be_disabled', true); /* add row to params */ + set_enable_parent +------------------- + +(1 row) + +SELECT disable_pathman_for('calamity.to_be_disabled'); /* should delete both rows */ + disable_pathman_for +--------------------- + +(1 row) + +SELECT count(*) FROM pathman_config WHERE partrel = 'calamity.to_be_disabled'::REGCLASS; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pathman_config_params WHERE partrel = 'calamity.to_be_disabled'::REGCLASS; + count +------- + 0 +(1 row) + +/* check function get_part_range_by_idx() */ +CREATE TABLE calamity.test_range_idx(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.test_range_idx', 'val', 1, 10, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +SELECT get_part_range(NULL, 1, NULL::INT4); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT get_part_range('calamity.test_range_idx', NULL, NULL::INT4); /* not ok */ +ERROR: 'partition_idx' should not be NULL +SELECT get_part_range('calamity.test_range_idx', 0, NULL::INT2); /* not ok */ +ERROR: pg_typeof(dummy) should be integer +SELECT get_part_range('calamity.test_range_idx', -2, NULL::INT4); /* not ok */ +ERROR: negative indices other than -1 (last partition) are not allowed +SELECT get_part_range('calamity.test_range_idx', 4, NULL::INT4); /* not ok */ +ERROR: partition #4 does not exist (total amount is 1) +SELECT get_part_range('calamity.test_range_idx', 0, NULL::INT4); /* OK */ + get_part_range +---------------- + {1,11} +(1 row) + +DROP TABLE calamity.test_range_idx CASCADE; +NOTICE: drop cascades to 2 other objects +/* check function get_part_range_by_oid() */ +CREATE TABLE calamity.test_range_oid(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.test_range_oid', 'val', 1, 10, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +SELECT get_part_range(NULL, NULL::INT4); /* not ok */ +ERROR: 'partition_relid' should not be NULL +SELECT get_part_range('pg_class', NULL::INT4); /* not ok */ +ERROR: relation "pg_class" is not a partition +SELECT get_part_range('calamity.test_range_oid_1', NULL::INT2); /* not ok */ +ERROR: pg_typeof(dummy) should be integer +SELECT get_part_range('calamity.test_range_oid_1', NULL::INT4); /* OK */ + get_part_range +---------------- + {1,11} +(1 row) + +DROP TABLE calamity.test_range_oid CASCADE; +NOTICE: drop cascades to 2 other objects +/* check function merge_range_partitions() */ +SELECT merge_range_partitions('pg_class'); /* not ok */ +ERROR: cannot merge partitions +SELECT merge_range_partitions('pg_class', 'pg_inherits'); /* not ok */ +ERROR: cannot merge partitions +CREATE TABLE calamity.merge_test_a(val INT4 NOT NULL); +CREATE TABLE calamity.merge_test_b(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.merge_test_a', 'val', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_range_partitions('calamity.merge_test_b', 'val', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT merge_range_partitions('calamity.merge_test_a_1', + 'calamity.merge_test_b_1'); /* not ok */ +ERROR: cannot merge partitions +DROP TABLE calamity.merge_test_a,calamity.merge_test_b CASCADE; +NOTICE: drop cascades to 6 other objects +DROP SCHEMA calamity CASCADE; +NOTICE: drop cascades to 15 other objects +DROP EXTENSION pg_pathman; +/* + * ------------------------------- + * Special tests (SET statement) + * ------------------------------- + */ +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = false; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SET pg_pathman.enable = true; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SET pg_pathman.enable = false; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +RESET pg_pathman.enable; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +RESET ALL; +BEGIN; ROLLBACK; +BEGIN ISOLATION LEVEL SERIALIZABLE; ROLLBACK; +BEGIN; SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; ROLLBACK; +DROP EXTENSION pg_pathman; +/* + * ------------------------------------- + * Special tests (pathman_cache_stats) + * ------------------------------------- + */ +CREATE SCHEMA calamity; +CREATE EXTENSION pg_pathman; +/* check that cache loading is lazy */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 11 other objects +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +/* Change this setting for code coverage */ +SET pg_pathman.enable_bounds_cache = false; +/* check view pathman_cache_stats (bounds cache disabled) */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on test_pathman_cache_stats_1 + -> Seq Scan on test_pathman_cache_stats_2 + -> Seq Scan on test_pathman_cache_stats_3 + -> Seq Scan on test_pathman_cache_stats_4 + -> Seq Scan on test_pathman_cache_stats_5 + -> Seq Scan on test_pathman_cache_stats_6 + -> Seq Scan on test_pathman_cache_stats_7 + -> Seq Scan on test_pathman_cache_stats_8 + -> Seq Scan on test_pathman_cache_stats_9 + -> Seq Scan on test_pathman_cache_stats_10 +(11 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 10 +(3 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 11 other objects +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +/* Restore this GUC */ +SET pg_pathman.enable_bounds_cache = true; +/* check view pathman_cache_stats (bounds cache enabled) */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on test_pathman_cache_stats_1 + -> Seq Scan on test_pathman_cache_stats_2 + -> Seq Scan on test_pathman_cache_stats_3 + -> Seq Scan on test_pathman_cache_stats_4 + -> Seq Scan on test_pathman_cache_stats_5 + -> Seq Scan on test_pathman_cache_stats_6 + -> Seq Scan on test_pathman_cache_stats_7 + -> Seq Scan on test_pathman_cache_stats_8 + -> Seq Scan on test_pathman_cache_stats_9 + -> Seq Scan on test_pathman_cache_stats_10 +(11 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 +(3 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 11 other objects +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +/* check that parents cache has been flushed after partition was dropped */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on test_pathman_cache_stats_1 + -> Seq Scan on test_pathman_cache_stats_2 + -> Seq Scan on test_pathman_cache_stats_3 + -> Seq Scan on test_pathman_cache_stats_4 + -> Seq Scan on test_pathman_cache_stats_5 + -> Seq Scan on test_pathman_cache_stats_6 + -> Seq Scan on test_pathman_cache_stats_7 + -> Seq Scan on test_pathman_cache_stats_8 + -> Seq Scan on test_pathman_cache_stats_9 + -> Seq Scan on test_pathman_cache_stats_10 +(11 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 +(3 rows) + +SELECT drop_range_partition('calamity.test_pathman_cache_stats_1'); + drop_range_partition +------------------------------------- + calamity.test_pathman_cache_stats_1 +(1 row) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 9 + partition parents cache | 9 +(3 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 10 other objects +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +DROP SCHEMA calamity CASCADE; +DROP EXTENSION pg_pathman; +/* + * ------------------------------------------ + * Special tests (uninitialized pg_pathman) + * ------------------------------------------ + */ +CREATE SCHEMA calamity; +CREATE EXTENSION pg_pathman; +/* check function pathman_cache_search_relid() */ +CREATE TABLE calamity.survivor(val INT NOT NULL); +SELECT create_range_partitions('calamity.survivor', 'val', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +DROP EXTENSION pg_pathman CASCADE; +SET pg_pathman.enable = f; /* DON'T LOAD CONFIG */ +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +CREATE EXTENSION pg_pathman; +SHOW pg_pathman.enable; + pg_pathman.enable +------------------- + off +(1 row) + +SELECT add_to_pathman_config('calamity.survivor', 'val', '10'); /* not ok */ +ERROR: pg_pathman is disabled +SELECT * FROM pathman_partition_list; /* not ok */ +ERROR: pg_pathman is not initialized yet +SELECT get_part_range('calamity.survivor', 0, NULL::INT); /* not ok */ +ERROR: pg_pathman is disabled +EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ + QUERY PLAN +----------------------------------------- + Append + -> Seq Scan on survivor survivor_1 + -> Seq Scan on survivor_1 survivor_2 + -> Seq Scan on survivor_2 survivor_3 +(4 rows) + +SET pg_pathman.enable = t; /* LOAD CONFIG */ +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT add_to_pathman_config('calamity.survivor', 'val', '10'); /* OK */ + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT * FROM pathman_partition_list; /* OK */ + parent | partition | parttype | expr | range_min | range_max +-------------------+---------------------+----------+------+-----------+----------- + calamity.survivor | calamity.survivor_1 | 2 | val | 1 | 11 + calamity.survivor | calamity.survivor_2 | 2 | val | 11 | 21 +(2 rows) + +SELECT get_part_range('calamity.survivor', 0, NULL::INT); /* OK */ + get_part_range +---------------- + {1,11} +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ + QUERY PLAN +------------------------------ + Append + -> Seq Scan on survivor_1 + -> Seq Scan on survivor_2 +(3 rows) + +DROP TABLE calamity.survivor CASCADE; +NOTICE: drop cascades to 3 other objects +DROP SCHEMA calamity CASCADE; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_cte_2.out b/expected/pathman_cte_2.out new file mode 100644 index 00000000..455a7cad --- /dev/null +++ b/expected/pathman_cte_2.out @@ -0,0 +1,252 @@ +/* + * Test simple CTE queries. + * Since 12 (608b167f9f), CTEs which are scanned once are no longer an + * optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_cte_1.out instead. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_cte; +CREATE TABLE test_cte.range_rel ( + id INT4, + dt TIMESTAMP NOT NULL, + txt TEXT); +INSERT INTO test_cte.range_rel (dt, txt) +SELECT g, md5(g::TEXT) +FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) AS g; +SELECT create_range_partitions('test_cte.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +/* perform a query */ +EXPLAIN (COSTS OFF) + WITH ttt AS (SELECT * FROM test_cte.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-15') +SELECT * FROM ttt; + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_2 + -> Seq Scan on range_rel_3 + Filter: (dt < 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) +(4 rows) + +DROP TABLE test_cte.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +CREATE TABLE test_cte.hash_rel ( + id INT4, + value INTEGER NOT NULL); +INSERT INTO test_cte.hash_rel VALUES (1, 1); +INSERT INTO test_cte.hash_rel VALUES (2, 2); +INSERT INTO test_cte.hash_rel VALUES (3, 3); +SELECT create_hash_partitions('test_cte.hash_rel', 'value', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +/* perform a query */ +EXPLAIN (COSTS OFF) + WITH ttt AS (SELECT * FROM test_cte.hash_rel WHERE value = 2) +SELECT * FROM ttt; + QUERY PLAN +------------------------ + Seq Scan on hash_rel_1 + Filter: (value = 2) +(2 rows) + +DROP TABLE test_cte.hash_rel CASCADE; +NOTICE: drop cascades to 3 other objects +/* + * Test CTE query - by @parihaaraka (add varno to WalkerContext) + */ +CREATE TABLE test_cte.cte_del_xacts (id BIGSERIAL PRIMARY KEY, pdate DATE NOT NULL); +INSERT INTO test_cte.cte_del_xacts (pdate) +SELECT gen_date +FROM generate_series('2016-01-01'::date, '2016-04-9'::date, '1 day') AS gen_date; +CREATE TABLE test_cte.cte_del_xacts_specdata +( + tid BIGINT PRIMARY KEY, + test_mode SMALLINT, + state_code SMALLINT NOT NULL DEFAULT 8, + regtime TIMESTAMP WITHOUT TIME ZONE NOT NULL +); +INSERT INTO test_cte.cte_del_xacts_specdata VALUES (1, 1, 1, current_timestamp); /* for subquery test */ +/* create 2 partitions */ +SELECT create_range_partitions('test_cte.cte_del_xacts'::regclass, 'pdate', + '2016-01-01'::date, '50 days'::interval); + create_range_partitions +------------------------- + 2 +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Delete on cte_del_xacts t + Delete on cte_del_xacts t_1 + Delete on cte_del_xacts_1 t_2 + Delete on cte_del_xacts_2 t_3 + -> Hash Join + Hash Cond: ((t.id = cte_del_xacts_specdata.tid) AND (t.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Append + -> Seq Scan on cte_del_xacts t_1 + -> Seq Scan on cte_del_xacts_1 t_2 + -> Seq Scan on cte_del_xacts_2 t_3 + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) +(13 rows) + +SELECT drop_partitions('test_cte.cte_del_xacts'); /* now drop partitions */ +NOTICE: 50 rows copied from test_cte.cte_del_xacts_1 +NOTICE: 50 rows copied from test_cte.cte_del_xacts_2 + drop_partitions +----------------- + 2 +(1 row) + +/* create 1 partition */ +SELECT create_range_partitions('test_cte.cte_del_xacts'::regclass, 'pdate', + '2016-01-01'::date, '1 year'::interval); + create_range_partitions +------------------------- + 1 +(1 row) + +/* parent enabled! */ +SELECT set_enable_parent('test_cte.cte_del_xacts', true); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Delete on cte_del_xacts t + Delete on cte_del_xacts t_1 + Delete on cte_del_xacts_1 t_2 + -> Hash Join + Hash Cond: ((t.id = cte_del_xacts_specdata.tid) AND (t.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Append + -> Seq Scan on cte_del_xacts t_1 + -> Seq Scan on cte_del_xacts_1 t_2 + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) +(11 rows) + +/* parent disabled! */ +SELECT set_enable_parent('test_cte.cte_del_xacts', false); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Delete on cte_del_xacts_1 t + -> Hash Join + Hash Cond: ((t.id = cte_del_xacts_specdata.tid) AND (t.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Seq Scan on cte_del_xacts_1 t + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) +(7 rows) + +/* create stub pl/PgSQL function */ +CREATE OR REPLACE FUNCTION test_cte.cte_del_xacts_stab(name TEXT) +RETURNS smallint AS +$$ +begin + return 2::smallint; +end +$$ +LANGUAGE plpgsql STABLE; +/* test subquery planning */ +WITH tmp AS ( + SELECT tid FROM test_cte.cte_del_xacts_specdata + WHERE state_code != test_cte.cte_del_xacts_stab('test')) +SELECT * FROM test_cte.cte_del_xacts t JOIN tmp ON t.id = tmp.tid; + id | pdate | tid +----+------------+----- + 1 | 01-01-2016 | 1 +(1 row) + +/* test subquery planning (one more time) */ +WITH tmp AS ( + SELECT tid FROM test_cte.cte_del_xacts_specdata + WHERE state_code != test_cte.cte_del_xacts_stab('test')) +SELECT * FROM test_cte.cte_del_xacts t JOIN tmp ON t.id = tmp.tid; + id | pdate | tid +----+------------+----- + 1 | 01-01-2016 | 1 +(1 row) + +DROP FUNCTION test_cte.cte_del_xacts_stab(TEXT); +DROP TABLE test_cte.cte_del_xacts, test_cte.cte_del_xacts_specdata CASCADE; +NOTICE: drop cascades to 2 other objects +/* Test recursive CTE */ +CREATE TABLE test_cte.recursive_cte_test_tbl(id INT NOT NULL, name TEXT NOT NULL); +SELECT create_hash_partitions('test_cte.recursive_cte_test_tbl', 'id', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||id FROM generate_series(1,100) f(id); +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||(id + 1) FROM generate_series(1,100) f(id); +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||(id + 2) FROM generate_series(1,100) f(id); +SELECT * FROM test_cte.recursive_cte_test_tbl WHERE id = 5; + id | name +----+------- + 5 | name5 + 5 | name6 + 5 | name7 +(3 rows) + +WITH RECURSIVE test AS ( + SELECT min(name) AS name + FROM test_cte.recursive_cte_test_tbl + WHERE id = 5 + UNION ALL + SELECT (SELECT min(name) + FROM test_cte.recursive_cte_test_tbl + WHERE id = 5 AND name > test.name) + FROM test + WHERE name IS NOT NULL) +SELECT * FROM test; + name +------- + name5 + name6 + name7 + +(4 rows) + +DROP SCHEMA test_cte CASCADE; +NOTICE: drop cascades to 3 other objects +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_join_clause_2.out b/expected/pathman_join_clause_2.out new file mode 100644 index 00000000..d58ff6f6 --- /dev/null +++ b/expected/pathman_join_clause_2.out @@ -0,0 +1,155 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +/* + * Test push down a join clause into child nodes of append + */ +/* create test tables */ +CREATE TABLE test.fk ( + id1 INT NOT NULL, + id2 INT NOT NULL, + start_key INT, + end_key INT, + PRIMARY KEY (id1, id2)); +CREATE TABLE test.mytbl ( + id1 INT NOT NULL, + id2 INT NOT NULL, + key INT NOT NULL, + CONSTRAINT fk_fk FOREIGN KEY (id1, id2) REFERENCES test.fk(id1, id2), + PRIMARY KEY (id1, key)); +SELECT pathman.create_hash_partitions('test.mytbl', 'id1', 8); + create_hash_partitions +------------------------ + 8 +(1 row) + +/* ...fill out with test data */ +INSERT INTO test.fk VALUES (1, 1); +INSERT INTO test.mytbl VALUES (1, 1, 5), (1, 1, 6); +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; +/* run test queries */ +EXPLAIN (COSTS OFF) /* test plan */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Nested Loop + -> Seq Scan on fk + -> Custom Scan (RuntimeAppend) + Prune by: (fk.id1 = m.id1) + -> Seq Scan on mytbl_0 m + Filter: ((fk.id1 = id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_1 m + Filter: ((fk.id1 = id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_2 m + Filter: ((fk.id1 = id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_3 m + Filter: ((fk.id1 = id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_4 m + Filter: ((fk.id1 = id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_5 m + Filter: ((fk.id1 = id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_6 m + Filter: ((fk.id1 = id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_7 m + Filter: ((fk.id1 = id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) +(20 rows) + +/* test joint data */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + tableoid | id1 | id2 | key | start_key | end_key +--------------+-----+-----+-----+-----------+--------- + test.mytbl_6 | 1 | 1 | 5 | | +(1 row) + +/* + * Test case by @dimarick + */ +CREATE TABLE test.parent ( + id SERIAL NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child_nopart ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +INSERT INTO test.parent (owner_id) VALUES (1), (2), (3), (3); +INSERT INTO test.child (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +INSERT INTO test.child_nopart (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +SELECT pathman.create_hash_partitions('test.child', 'owner_id', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; +/* Query #1 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +----------------------------------------------------------------------------------------------------- + Nested Loop Left Join + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Custom Scan (RuntimeAppend) + Prune by: ((child.owner_id = 3) AND (child.owner_id = parent.owner_id)) + -> Seq Scan on child_1 child + Filter: ((owner_id = 3) AND (owner_id = parent.owner_id) AND (parent_id = parent.id)) +(7 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + +/* Query #2 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +---------------------------------------------------------------------- + Nested Loop Left Join + Join Filter: (child_1.parent_id = parent.id) + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Seq Scan on child_1 + Filter: (owner_id = 3) +(6 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + +DROP SCHEMA test CASCADE; +NOTICE: drop cascades to 15 other objects +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman CASCADE; diff --git a/expected/pathman_subpartitions.out b/expected/pathman_subpartitions.out index c13b4ee8..25b36492 100644 --- a/expected/pathman_subpartitions.out +++ b/expected/pathman_subpartitions.out @@ -417,7 +417,8 @@ SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY id1, id2, val; (4 rows) SET pg_pathman.enable_partitionrouter = ON; -UPDATE subpartitions.abc SET id1 = -1, id2 = -1 RETURNING tableoid::regclass, *; +WITH updated AS (UPDATE subpartitions.abc SET id1 = -1, id2 = -1 RETURNING tableoid::regclass, *) +SELECT * FROM updated ORDER BY val ASC; tableoid | id1 | id2 | val -----------------------+-----+-----+----- subpartitions.abc_3_4 | -1 | -1 | 1 diff --git a/expected/pathman_subpartitions_1.out b/expected/pathman_subpartitions_1.out index f190f798..5ea33044 100644 --- a/expected/pathman_subpartitions_1.out +++ b/expected/pathman_subpartitions_1.out @@ -411,7 +411,8 @@ SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY id1, id2, val; (4 rows) SET pg_pathman.enable_partitionrouter = ON; -UPDATE subpartitions.abc SET id1 = -1, id2 = -1 RETURNING tableoid::regclass, *; +WITH updated AS (UPDATE subpartitions.abc SET id1 = -1, id2 = -1 RETURNING tableoid::regclass, *) +SELECT * FROM updated ORDER BY val ASC; tableoid | id1 | id2 | val -----------------------+-----+-----+----- subpartitions.abc_3_4 | -1 | -1 | 1 diff --git a/expected/pathman_views_3.out b/expected/pathman_views_3.out new file mode 100644 index 00000000..09b5718f --- /dev/null +++ b/expected/pathman_views_3.out @@ -0,0 +1,189 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_views_2.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA views; +/* create a partitioned table */ +create table views._abc(id int4 not null); +select create_hash_partitions('views._abc', 'id', 10); + create_hash_partitions +------------------------ + 10 +(1 row) + +insert into views._abc select generate_series(1, 100); +/* create a dummy table */ +create table views._abc_add (like views._abc); +vacuum analyze; +/* create a facade view */ +create view views.abc as select * from views._abc; +create or replace function views.disable_modification() +returns trigger as +$$ +BEGIN + RAISE EXCEPTION '%', TG_OP; + RETURN NULL; +END; +$$ +language 'plpgsql'; +create trigger abc_mod_tr +instead of insert or update or delete +on views.abc for each row +execute procedure views.disable_modification(); +/* Test SELECT */ +explain (costs off) select * from views.abc; + QUERY PLAN +-------------------------- + Append + -> Seq Scan on _abc_0 + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 +(11 rows) + +explain (costs off) select * from views.abc where id = 1; + QUERY PLAN +-------------------- + Seq Scan on _abc_0 + Filter: (id = 1) +(2 rows) + +explain (costs off) select * from views.abc where id = 1 for update; + QUERY PLAN +-------------------------- + LockRows + -> Seq Scan on _abc_0 + Filter: (id = 1) +(3 rows) + +select * from views.abc where id = 1 for update; + id +---- + 1 +(1 row) + +select count (*) from views.abc; + count +------- + 100 +(1 row) + +/* Test INSERT */ +explain (costs off) insert into views.abc values (1); + QUERY PLAN +--------------- + Insert on abc + -> Result +(2 rows) + +insert into views.abc values (1); +ERROR: INSERT +/* Test UPDATE */ +explain (costs off) update views.abc set id = 2 where id = 1 or id = 2; + QUERY PLAN +-------------------------------------- + Update on abc + -> Result + -> Append + -> Seq Scan on _abc_0 + Filter: (id = 1) + -> Seq Scan on _abc_6 + Filter: (id = 2) +(7 rows) + +update views.abc set id = 2 where id = 1 or id = 2; +ERROR: UPDATE +/* Test DELETE */ +explain (costs off) delete from views.abc where id = 1 or id = 2; + QUERY PLAN +-------------------------------------- + Delete on abc + -> Result + -> Append + -> Seq Scan on _abc_0 + Filter: (id = 1) + -> Seq Scan on _abc_6 + Filter: (id = 2) +(7 rows) + +delete from views.abc where id = 1 or id = 2; +ERROR: DELETE +/* Test SELECT with UNION */ +create view views.abc_union as table views._abc union table views._abc_add; +create view views.abc_union_all as table views._abc union all table views._abc_add; +explain (costs off) table views.abc_union; + QUERY PLAN +-------------------------------------- + HashAggregate + Group Key: _abc_0.id + -> Append + -> Append + -> Seq Scan on _abc_0 + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 + -> Seq Scan on _abc_add +(15 rows) + +explain (costs off) select * from views.abc_union where id = 5; + QUERY PLAN +---------------------------------------- + Unique + -> Sort + Sort Key: _abc_8.id + -> Append + -> Seq Scan on _abc_8 + Filter: (id = 5) + -> Seq Scan on _abc_add + Filter: (id = 5) +(8 rows) + +explain (costs off) table views.abc_union_all; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on _abc_0 + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 + -> Seq Scan on _abc_add +(12 rows) + +explain (costs off) select * from views.abc_union_all where id = 5; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on _abc_8 + Filter: (id = 5) + -> Seq Scan on _abc_add + Filter: (id = 5) +(5 rows) + +DROP SCHEMA views CASCADE; +NOTICE: drop cascades to 16 other objects +DROP EXTENSION pg_pathman; diff --git a/patches/REL_14_STABLE-pg_pathman-core.diff b/patches/REL_14_STABLE-pg_pathman-core.diff new file mode 100644 index 00000000..e3e7c549 --- /dev/null +++ b/patches/REL_14_STABLE-pg_pathman-core.diff @@ -0,0 +1,533 @@ +diff --git a/contrib/Makefile b/contrib/Makefile +index f27e458482..ea47c341c1 100644 +--- a/contrib/Makefile ++++ b/contrib/Makefile +@@ -32,6 +32,7 @@ SUBDIRS = \ + passwordcheck \ + pg_buffercache \ + pg_freespacemap \ ++ pg_pathman \ + pg_prewarm \ + pg_stat_statements \ + pg_surgery \ +diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c +index ca6f6d57d3..8ab313b910 100644 +--- a/src/backend/access/transam/xact.c ++++ b/src/backend/access/transam/xact.c +@@ -76,7 +76,7 @@ int DefaultXactIsoLevel = XACT_READ_COMMITTED; + int XactIsoLevel; + + bool DefaultXactReadOnly = false; +-bool XactReadOnly; ++bool XactReadOnly = false; + + bool DefaultXactDeferrable = false; + bool XactDeferrable; +diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c +index 5483dee650..e2864e6ae9 100644 +--- a/src/backend/executor/execExprInterp.c ++++ b/src/backend/executor/execExprInterp.c +@@ -1799,6 +1799,16 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) + } + + out: ++ ++ /* ++ * pg_pathman: pass 'tts_tableOid' to result tuple for determine from ++ * which partition the touple was read ++ */ ++ if (resultslot) ++ { ++ resultslot->tts_tableOid = scanslot ? scanslot->tts_tableOid : ++ (innerslot ? innerslot->tts_tableOid : (outerslot ? outerslot->tts_tableOid : InvalidOid)); ++ } + *isnull = state->resnull; + return state->resvalue; + } +diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c +index b3ce4bae53..8f2bb12542 100644 +--- a/src/backend/executor/execMain.c ++++ b/src/backend/executor/execMain.c +@@ -824,6 +824,13 @@ InitPlan(QueryDesc *queryDesc, int eflags) + + estate->es_plannedstmt = plannedstmt; + ++ /* ++ * Fields "es_result_relation_info", "es_original_tuple" are used for ++ * pg_pathman only: ++ */ ++ estate->es_result_relation_info = NULL; ++ estate->es_original_tuple = NULL; ++ + /* + * Next, build the ExecRowMark array from the PlanRowMark(s), if any. + */ +@@ -2713,6 +2720,13 @@ EvalPlanQualStart(EPQState *epqstate, Plan *planTree) + rcestate->es_junkFilter = parentestate->es_junkFilter; + rcestate->es_output_cid = parentestate->es_output_cid; + ++ /* ++ * Fields "es_result_relation_info", "es_original_tuple" are used for ++ * pg_pathman only: ++ */ ++ rcestate->es_result_relation_info = NULL; ++ rcestate->es_original_tuple = NULL; ++ + /* + * ResultRelInfos needed by subplans are initialized from scratch when the + * subplans themselves are initialized. +diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c +index d328856ae5..27235ec869 100644 +--- a/src/backend/executor/nodeModifyTable.c ++++ b/src/backend/executor/nodeModifyTable.c +@@ -450,7 +450,7 @@ ExecInitInsertProjection(ModifyTableState *mtstate, + * This is also a convenient place to verify that the output of an UPDATE + * matches the target table (ExecBuildUpdateProjection does that). + */ +-static void ++void + ExecInitUpdateProjection(ModifyTableState *mtstate, + ResultRelInfo *resultRelInfo) + { +@@ -2363,6 +2363,7 @@ ExecModifyTable(PlanState *pstate) + PartitionTupleRouting *proute = node->mt_partition_tuple_routing; + List *relinfos = NIL; + ListCell *lc; ++ ResultRelInfo *saved_resultRelInfo; + + CHECK_FOR_INTERRUPTS(); + +@@ -2400,12 +2401,23 @@ ExecModifyTable(PlanState *pstate) + resultRelInfo = node->resultRelInfo + node->mt_lastResultIndex; + subplanstate = outerPlanState(node); + ++ saved_resultRelInfo = estate->es_result_relation_info; ++ estate->es_result_relation_info = NULL; ++ + /* + * Fetch rows from subplan, and execute the required table modification + * for each row. + */ + for (;;) + { ++ /* ++ * "es_original_tuple" should contains original modified tuple (new ++ * values of the changed columns plus row identity information such as ++ * CTID) in case tuple planSlot is replaced in pg_pathman to new value ++ * in call "ExecProcNode(subplanstate)". ++ */ ++ estate->es_original_tuple = NULL; ++ + /* + * Reset the per-output-tuple exprcontext. This is needed because + * triggers expect to use that context as workspace. It's a bit ugly +@@ -2439,7 +2451,9 @@ ExecModifyTable(PlanState *pstate) + bool isNull; + Oid resultoid; + +- datum = ExecGetJunkAttribute(planSlot, node->mt_resultOidAttno, ++ datum = ExecGetJunkAttribute(estate->es_original_tuple ? ++ estate->es_original_tuple : planSlot, ++ node->mt_resultOidAttno, + &isNull); + if (isNull) + elog(ERROR, "tableoid is NULL"); +@@ -2458,6 +2472,8 @@ ExecModifyTable(PlanState *pstate) + if (resultRelInfo->ri_usesFdwDirectModify) + { + Assert(resultRelInfo->ri_projectReturning); ++ /* PartitionRouter does not support foreign data wrappers: */ ++ Assert(estate->es_original_tuple == NULL); + + /* + * A scan slot containing the data that was actually inserted, +@@ -2467,6 +2483,7 @@ ExecModifyTable(PlanState *pstate) + */ + slot = ExecProcessReturning(resultRelInfo, NULL, planSlot); + ++ estate->es_result_relation_info = saved_resultRelInfo; + return slot; + } + +@@ -2496,7 +2513,8 @@ ExecModifyTable(PlanState *pstate) + { + /* ri_RowIdAttNo refers to a ctid attribute */ + Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)); +- datum = ExecGetJunkAttribute(slot, ++ datum = ExecGetJunkAttribute(estate->es_original_tuple ++ ? estate->es_original_tuple : slot, + resultRelInfo->ri_RowIdAttNo, + &isNull); + /* shouldn't ever get a null result... */ +@@ -2526,7 +2544,8 @@ ExecModifyTable(PlanState *pstate) + */ + else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)) + { +- datum = ExecGetJunkAttribute(slot, ++ datum = ExecGetJunkAttribute(estate->es_original_tuple ++ ? estate->es_original_tuple : slot, + resultRelInfo->ri_RowIdAttNo, + &isNull); + /* shouldn't ever get a null result... */ +@@ -2557,8 +2576,12 @@ ExecModifyTable(PlanState *pstate) + /* Initialize projection info if first time for this table */ + if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) + ExecInitInsertProjection(node, resultRelInfo); +- slot = ExecGetInsertNewTuple(resultRelInfo, planSlot); +- slot = ExecInsert(node, resultRelInfo, slot, planSlot, ++ /* Do nothing in case tuple was modified in pg_pathman: */ ++ if (!estate->es_original_tuple) ++ slot = ExecGetInsertNewTuple(resultRelInfo, planSlot); ++ slot = ExecInsert(node, estate->es_result_relation_info ? ++ estate->es_result_relation_info : resultRelInfo, ++ slot, planSlot, + estate, node->canSetTag); + break; + case CMD_UPDATE: +@@ -2566,37 +2589,45 @@ ExecModifyTable(PlanState *pstate) + if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) + ExecInitUpdateProjection(node, resultRelInfo); + +- /* +- * Make the new tuple by combining plan's output tuple with +- * the old tuple being updated. +- */ +- oldSlot = resultRelInfo->ri_oldTupleSlot; +- if (oldtuple != NULL) +- { +- /* Use the wholerow junk attr as the old tuple. */ +- ExecForceStoreHeapTuple(oldtuple, oldSlot, false); +- } +- else ++ /* Do nothing in case tuple was modified in pg_pathman: */ ++ if (!estate->es_original_tuple) + { +- /* Fetch the most recent version of old tuple. */ +- Relation relation = resultRelInfo->ri_RelationDesc; +- +- Assert(tupleid != NULL); +- if (!table_tuple_fetch_row_version(relation, tupleid, +- SnapshotAny, +- oldSlot)) +- elog(ERROR, "failed to fetch tuple being updated"); ++ /* ++ * Make the new tuple by combining plan's output tuple ++ * with the old tuple being updated. ++ */ ++ oldSlot = resultRelInfo->ri_oldTupleSlot; ++ if (oldtuple != NULL) ++ { ++ /* Use the wholerow junk attr as the old tuple. */ ++ ExecForceStoreHeapTuple(oldtuple, oldSlot, false); ++ } ++ else ++ { ++ /* Fetch the most recent version of old tuple. */ ++ Relation relation = resultRelInfo->ri_RelationDesc; ++ ++ Assert(tupleid != NULL); ++ if (!table_tuple_fetch_row_version(relation, tupleid, ++ SnapshotAny, ++ oldSlot)) ++ elog(ERROR, "failed to fetch tuple being updated"); ++ } ++ slot = ExecGetUpdateNewTuple(resultRelInfo, planSlot, ++ oldSlot); + } +- slot = ExecGetUpdateNewTuple(resultRelInfo, planSlot, +- oldSlot); + + /* Now apply the update. */ +- slot = ExecUpdate(node, resultRelInfo, tupleid, oldtuple, slot, ++ slot = ExecUpdate(node, estate->es_result_relation_info ? ++ estate->es_result_relation_info : resultRelInfo, ++ tupleid, oldtuple, slot, + planSlot, &node->mt_epqstate, estate, + node->canSetTag); + break; + case CMD_DELETE: +- slot = ExecDelete(node, resultRelInfo, tupleid, oldtuple, ++ slot = ExecDelete(node, estate->es_result_relation_info ? ++ estate->es_result_relation_info : resultRelInfo, ++ tupleid, oldtuple, + planSlot, &node->mt_epqstate, estate, + true, /* processReturning */ + node->canSetTag, +@@ -2613,7 +2644,10 @@ ExecModifyTable(PlanState *pstate) + * the work on next call. + */ + if (slot) ++ { ++ estate->es_result_relation_info = saved_resultRelInfo; + return slot; ++ } + } + + /* +@@ -2642,6 +2676,7 @@ ExecModifyTable(PlanState *pstate) + + node->mt_done = true; + ++ estate->es_result_relation_info = saved_resultRelInfo; + return NULL; + } + +@@ -2716,6 +2751,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) + ListCell *l; + int i; + Relation rel; ++ ResultRelInfo *saved_resultRelInfo; + + /* check for unsupported flags */ + Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); +@@ -2812,6 +2848,13 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) + i++; + } + ++ /* ++ * pg_pathman: set "estate->es_result_relation_info" value for take it in ++ * functions partition_filter_begin(), partition_router_begin() ++ */ ++ saved_resultRelInfo = estate->es_result_relation_info; ++ estate->es_result_relation_info = mtstate->resultRelInfo; ++ + /* + * Now we may initialize the subplan. + */ +@@ -2884,6 +2927,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) + } + } + ++ estate->es_result_relation_info = saved_resultRelInfo; ++ + /* + * If this is an inherited update/delete, there will be a junk attribute + * named "tableoid" present in the subplan's targetlist. It will be used +diff --git a/src/backend/utils/init/globals.c b/src/backend/utils/init/globals.c +index 381d9e548d..9d101c3a86 100644 +--- a/src/backend/utils/init/globals.c ++++ b/src/backend/utils/init/globals.c +@@ -25,7 +25,7 @@ + #include "storage/backendid.h" + + +-ProtocolVersion FrontendProtocol; ++ProtocolVersion FrontendProtocol = (ProtocolVersion)0; + + volatile sig_atomic_t InterruptPending = false; + volatile sig_atomic_t QueryCancelPending = false; +diff --git a/src/include/access/xact.h b/src/include/access/xact.h +index 134f6862da..92ff475332 100644 +--- a/src/include/access/xact.h ++++ b/src/include/access/xact.h +@@ -53,7 +53,9 @@ extern PGDLLIMPORT int XactIsoLevel; + + /* Xact read-only state */ + extern bool DefaultXactReadOnly; +-extern bool XactReadOnly; ++ ++#define PGPRO_PATHMAN_AWARE_COPY ++extern PGDLLIMPORT bool XactReadOnly; + + /* flag for logging statements in this transaction */ + extern bool xact_is_sampled; +diff --git a/src/include/catalog/objectaddress.h b/src/include/catalog/objectaddress.h +index 2b4e104bb9..80d1274efe 100644 +--- a/src/include/catalog/objectaddress.h ++++ b/src/include/catalog/objectaddress.h +@@ -28,7 +28,7 @@ typedef struct ObjectAddress + int32 objectSubId; /* Subitem within object (eg column), or 0 */ + } ObjectAddress; + +-extern const ObjectAddress InvalidObjectAddress; ++extern PGDLLIMPORT const ObjectAddress InvalidObjectAddress; + + #define ObjectAddressSubSet(addr, class_id, object_id, object_sub_id) \ + do { \ +diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h +index 3dc03c913e..1002d97499 100644 +--- a/src/include/executor/executor.h ++++ b/src/include/executor/executor.h +@@ -657,5 +657,7 @@ extern ResultRelInfo *ExecLookupResultRelByOid(ModifyTableState *node, + Oid resultoid, + bool missing_ok, + bool update_cache); ++extern void ExecInitUpdateProjection(ModifyTableState *mtstate, ++ ResultRelInfo *resultRelInfo); + + #endif /* EXECUTOR_H */ +diff --git a/src/include/libpq/libpq-be.h b/src/include/libpq/libpq-be.h +index 02015efe13..2091f7f3b7 100644 +--- a/src/include/libpq/libpq-be.h ++++ b/src/include/libpq/libpq-be.h +@@ -327,7 +327,7 @@ extern ssize_t be_gssapi_read(Port *port, void *ptr, size_t len); + extern ssize_t be_gssapi_write(Port *port, void *ptr, size_t len); + #endif /* ENABLE_GSS */ + +-extern ProtocolVersion FrontendProtocol; ++extern PGDLLIMPORT ProtocolVersion FrontendProtocol; + + /* TCP keepalives configuration. These are no-ops on an AF_UNIX socket. */ + +diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h +index 105180764e..2a40d2ce15 100644 +--- a/src/include/nodes/execnodes.h ++++ b/src/include/nodes/execnodes.h +@@ -579,6 +579,12 @@ typedef struct EState + * es_result_relations in no + * specific order */ + ++ /* These fields was added for compatibility pg_pathman with 14: */ ++ ResultRelInfo *es_result_relation_info; /* currently active array elt */ ++ TupleTableSlot *es_original_tuple; /* original modified tuple (new values ++ * of the changed columns plus row ++ * identity information such as CTID) */ ++ + PartitionDirectory es_partition_directory; /* for PartitionDesc lookup */ + + /* +diff --git a/src/tools/msvc/Install.pm b/src/tools/msvc/Install.pm +index de22c9ba2c..c8be5323b8 100644 +--- a/src/tools/msvc/Install.pm ++++ b/src/tools/msvc/Install.pm +@@ -30,6 +30,18 @@ my @client_program_files = ( + 'pg_receivewal', 'pg_recvlogical', 'pg_restore', 'psql', + 'reindexdb', 'vacuumdb', @client_contribs); + ++sub SubstituteMakefileVariables { ++ local $_ = shift; # Line to substitue ++ my $mf = shift; # Makefile text ++ while (/\$\((\w+)\)/) { ++ my $varname = $1; ++ if ($mf =~ /^$varname\s*=\s*(.*)$/mg) { ++ my $varvalue=$1; ++ s/\$\($varname\)/$varvalue/g; ++ } ++ } ++ return $_; ++} + sub lcopy + { + my $src = shift; +@@ -608,7 +620,7 @@ sub ParseAndCleanRule + substr($flist, 0, index($flist, '$(addsuffix ')) + . substr($flist, $i + 1); + } +- return $flist; ++ return SubstituteMakefileVariables($flist, $mf); + } + + sub CopyIncludeFiles +diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm +index 05ff67e693..d169271df1 100644 +--- a/src/tools/msvc/Mkvcbuild.pm ++++ b/src/tools/msvc/Mkvcbuild.pm +@@ -41,7 +41,10 @@ my @contrib_uselibpq = + my @contrib_uselibpgport = ('libpq_pipeline', 'oid2name', 'vacuumlo'); + my @contrib_uselibpgcommon = ('libpq_pipeline', 'oid2name', 'vacuumlo'); + my $contrib_extralibs = { 'libpq_pipeline' => ['ws2_32.lib'] }; +-my $contrib_extraincludes = { 'dblink' => ['src/backend'] }; ++my $contrib_extraincludes = { ++ 'dblink' => ['src/backend'], ++ 'pg_pathman' => ['contrib/pg_pathman/src/include'] ++}; + my $contrib_extrasource = { + 'cube' => [ 'contrib/cube/cubescan.l', 'contrib/cube/cubeparse.y' ], + 'seg' => [ 'contrib/seg/segscan.l', 'contrib/seg/segparse.y' ], +@@ -970,6 +973,7 @@ sub AddContrib + my $dn = $1; + my $proj = $solution->AddProject($dn, 'dll', 'contrib', "$subdir/$n"); + $proj->AddReference($postgres); ++ $proj->RemoveFile("$subdir/$n/src/declarative.c") if $n eq 'pg_pathman'; + AdjustContribProj($proj); + } + elsif ($mf =~ /^MODULES\s*=\s*(.*)$/mg) +@@ -999,6 +1003,19 @@ sub AddContrib + return; + } + ++sub SubstituteMakefileVariables { ++ local $_ = shift; # Line to substitue ++ my $mf = shift; # Makefile text ++ while (/\$\((\w+)\)/) { ++ my $varname = $1; ++ if ($mf =~ /^$varname\s*=\s*(.*)$/mg) { ++ my $varvalue=$1; ++ s/\$\($varname\)/$varvalue/g; ++ } ++ } ++ return $_; ++} ++ + sub GenerateContribSqlFiles + { + my $n = shift; +@@ -1023,23 +1040,53 @@ sub GenerateContribSqlFiles + substr($l, 0, index($l, '$(addsuffix ')) . substr($l, $i + 1); + } + ++ $l = SubstituteMakefileVariables($l,$mf); + foreach my $d (split /\s+/, $l) + { +- my $in = "$d.in"; +- my $out = "$d"; +- +- if (Solution::IsNewer("contrib/$n/$out", "contrib/$n/$in")) +- { +- print "Building $out from $in (contrib/$n)...\n"; +- my $cont = Project::read_file("contrib/$n/$in"); +- my $dn = $out; +- $dn =~ s/\.sql$//; +- $cont =~ s/MODULE_PATHNAME/\$libdir\/$dn/g; +- my $o; +- open($o, '>', "contrib/$n/$out") +- || croak "Could not write to contrib/$n/$d"; +- print $o $cont; +- close($o); ++ if ( -f "contrib/$n/$d.in" ) { ++ my $in = "$d.in"; ++ my $out = "$d"; ++ if (Solution::IsNewer("contrib/$n/$out", "contrib/$n/$in")) ++ { ++ print "Building $out from $in (contrib/$n)...\n"; ++ my $cont = Project::read_file("contrib/$n/$in"); ++ my $dn = $out; ++ $dn =~ s/\.sql$//; ++ $cont =~ s/MODULE_PATHNAME/\$libdir\/$dn/g; ++ my $o; ++ open($o, '>', "contrib/$n/$out") ++ || croak "Could not write to contrib/$n/$d"; ++ print $o $cont; ++ close($o); ++ } ++ } else { ++ # Search for makefile rule. ++ # For now we do not process rule command and assume ++ # that we should just concatenate all prerequisites ++ # ++ my @prereq = (); ++ my $target; ++ my @rules = $mf =~ /^(\S+)\s*:\s*([^=].*)$/mg; ++ RULE: ++ while (@rules) { ++ $target = SubstituteMakefileVariables(shift @rules,$mf); ++ @prereq = split(/\s+/,SubstituteMakefileVariables(shift @rules,$mf)); ++ last RULE if ($target eq $d); ++ @prereq = (); ++ } ++ croak "Don't know how to build contrib/$n/$d" unless @prereq; ++ if (grep(Solution::IsNewer("contrib/$n/$d","contrib/$n/$_"), ++ @prereq)) { ++ print STDERR "building $d from @prereq by concatentation\n"; ++ my $o; ++ open $o, ">contrib/$n/$d" ++ or croak("Couldn't write to contrib/$n/$d:$!"); ++ for my $in (@prereq) { ++ my $data = Project::read_file("contrib/$n/$in"); ++ print $o $data; ++ } ++ close $o; ++ } + } + } + } diff --git a/sql/pathman_subpartitions.sql b/sql/pathman_subpartitions.sql index 5aaea49a..7a4dc606 100644 --- a/sql/pathman_subpartitions.sql +++ b/sql/pathman_subpartitions.sql @@ -142,7 +142,8 @@ INSERT INTO subpartitions.abc VALUES (10, 0), (110, 0), (-1, 0), (-1, -1); SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY id1, id2, val; SET pg_pathman.enable_partitionrouter = ON; -UPDATE subpartitions.abc SET id1 = -1, id2 = -1 RETURNING tableoid::regclass, *; +WITH updated AS (UPDATE subpartitions.abc SET id1 = -1, id2 = -1 RETURNING tableoid::regclass, *) +SELECT * FROM updated ORDER BY val ASC; DROP TABLE subpartitions.abc CASCADE; diff --git a/src/compat/pg_compat.c b/src/compat/pg_compat.c index abf71f9d..7afdd99a 100644 --- a/src/compat/pg_compat.c +++ b/src/compat/pg_compat.c @@ -181,7 +181,9 @@ make_restrictinfos_from_actual_clauses(PlannerInfo *root, root->hasPseudoConstantQuals = true; } - rinfo = make_restrictinfo(clause, + rinfo = make_restrictinfo_compat( + root, + clause, true, false, pseudoconstant, @@ -235,7 +237,9 @@ McxtStatsInternal(MemoryContext context, int level, AssertArg(MemoryContextIsValid(context)); /* Examine the context itself */ -#if PG_VERSION_NUM >= 110000 +#if PG_VERSION_NUM >= 140000 + (*context->methods->stats) (context, NULL, NULL, totals, true); +#elif PG_VERSION_NUM >= 110000 (*context->methods->stats) (context, NULL, NULL, totals); #else (*context->methods->stats) (context, level, false, totals); diff --git a/src/hooks.c b/src/hooks.c index e9ff1ed7..276f6cfd 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -751,12 +751,25 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) * Post parse analysis hook. It makes sure the config is loaded before executing * any statement, including utility commands. */ +#if PG_VERSION_NUM >= 140000 +/* + * pathman_post_parse_analyze_hook(), pathman_post_parse_analyze_hook_next(): + * in 14 new argument was added (5fd9dfa5f50) + */ +void +pathman_post_parse_analyze_hook(ParseState *pstate, Query *query, JumbleState *jstate) +{ + /* Invoke original hook if needed */ + if (pathman_post_parse_analyze_hook_next) + pathman_post_parse_analyze_hook_next(pstate, query, jstate); +#else void pathman_post_parse_analyze_hook(ParseState *pstate, Query *query) { /* Invoke original hook if needed */ if (pathman_post_parse_analyze_hook_next) pathman_post_parse_analyze_hook_next(pstate, query); +#endif /* See cook_partitioning_expression() */ if (!pathman_hooks_enabled) @@ -944,7 +957,23 @@ pathman_relcache_hook(Datum arg, Oid relid) * In PG 13 (2f9661311b8) command completion tags was reworked (added QueryCompletion struct) */ void -#if PG_VERSION_NUM >= 130000 +#if PG_VERSION_NUM >= 140000 +/* + * pathman_post_parse_analyze_hook(), pathman_post_parse_analyze_hook_next(): + * in 14 new argument was added (5fd9dfa5f50) + */ +pathman_process_utility_hook(PlannedStmt *first_arg, + const char *queryString, + bool readOnlyTree, + ProcessUtilityContext context, + ParamListInfo params, + QueryEnvironment *queryEnv, + DestReceiver *dest, QueryCompletion *queryCompletion) +{ + Node *parsetree = first_arg->utilityStmt; + int stmt_location = first_arg->stmt_location, + stmt_len = first_arg->stmt_len; +#elif PG_VERSION_NUM >= 130000 pathman_process_utility_hook(PlannedStmt *first_arg, const char *queryString, ProcessUtilityContext context, @@ -1068,7 +1097,15 @@ pathman_process_utility_hook(Node *first_arg, } /* Finally call process_utility_hook_next or standard_ProcessUtility */ -#if PG_VERSION_NUM >= 130000 +#if PG_VERSION_NUM >= 140000 + call_process_utility_compat((pathman_process_utility_hook_next ? + pathman_process_utility_hook_next : + standard_ProcessUtility), + first_arg, queryString, + readOnlyTree, + context, params, queryEnv, + dest, queryCompletion); +#elif PG_VERSION_NUM >= 130000 call_process_utility_compat((pathman_process_utility_hook_next ? pathman_process_utility_hook_next : standard_ProcessUtility), diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 24a36fea..a551b7ed 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -130,7 +130,12 @@ /* * BeginCopyFrom() */ -#if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 140000 +#define BeginCopyFromCompat(pstate, rel, filename, is_program, data_source_cb, \ + attnamelist, options) \ + BeginCopyFrom((pstate), (rel), NULL, (filename), (is_program), \ + (data_source_cb), (attnamelist), (options)) +#elif PG_VERSION_NUM >= 100000 #define BeginCopyFromCompat(pstate, rel, filename, is_program, data_source_cb, \ attnamelist, options) \ BeginCopyFrom((pstate), (rel), (filename), (is_program), \ @@ -174,7 +179,14 @@ * - in pg 10 PlannedStmt object * - in pg 9.6 and lower Node parsetree */ -#if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 140000 +#define call_process_utility_compat(process_utility, first_arg, query_string, \ + readOnlyTree, context, params, query_env, \ + dest, completion_tag) \ + (process_utility)((first_arg), (query_string), readOnlyTree, \ + (context), (params), \ + (query_env), (dest), (completion_tag)) +#elif PG_VERSION_NUM >= 100000 #define call_process_utility_compat(process_utility, first_arg, query_string, \ context, params, query_env, dest, \ completion_tag) \ @@ -240,7 +252,11 @@ /* * create_append_path() */ -#if PG_VERSION_NUM >= 130000 +#if PG_VERSION_NUM >= 140000 +#define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ + create_append_path(NULL, (rel), (subpaths), NIL, NIL, (required_outer), \ + (parallel_workers), false, -1) +#elif PG_VERSION_NUM >= 130000 /* * PGPRO-3938 made create_append_path compatible with vanilla again */ @@ -303,7 +319,12 @@ /* * create_merge_append_path() */ -#if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 140000 +#define create_merge_append_path_compat(root, rel, subpaths, pathkeys, \ + required_outer) \ + create_merge_append_path((root), (rel), (subpaths), (pathkeys), \ + (required_outer)) +#elif PG_VERSION_NUM >= 100000 #define create_merge_append_path_compat(root, rel, subpaths, pathkeys, \ required_outer) \ create_merge_append_path((root), (rel), (subpaths), (pathkeys), \ @@ -650,7 +671,20 @@ extern int oid_cmp(const void *p1, const void *p2); * * for v10 set NULL into 'queryEnv' argument */ -#if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 140000 +#define ProcessUtilityCompat(parsetree, queryString, context, params, dest, \ + completionTag) \ + do { \ + PlannedStmt *stmt = makeNode(PlannedStmt); \ + stmt->commandType = CMD_UTILITY; \ + stmt->canSetTag = true; \ + stmt->utilityStmt = (parsetree); \ + stmt->stmt_location = -1; \ + stmt->stmt_len = 0; \ + ProcessUtility(stmt, (queryString), false, (context), (params), NULL, \ + (dest), (completionTag)); \ + } while (0) +#elif PG_VERSION_NUM >= 100000 #define ProcessUtilityCompat(parsetree, queryString, context, params, dest, \ completionTag) \ do { \ @@ -709,6 +743,9 @@ extern void set_rel_consider_parallel(PlannerInfo *root, * in compat version the type of first argument is (Expr *) */ #if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 140000 /* function removed in 375398244168add84a884347625d14581a421e71 */ +extern TargetEntry *tlist_member_ignore_relabel(Expr * node, List * targetlist); +#endif #define tlist_member_ignore_relabel_compat(expr, targetlist) \ tlist_member_ignore_relabel((expr), (targetlist)) #elif PG_VERSION_NUM >= 90500 @@ -961,12 +998,16 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, /* * ExecInsertIndexTuples. Since 12 slot contains tupleid. + * Since 14: new fields "resultRelInfo", "update". */ -#if PG_VERSION_NUM >= 120000 -#define ExecInsertIndexTuplesCompat(slot, tupleid, estate, noDupError, specConflict, arbiterIndexes) \ +#if PG_VERSION_NUM >= 140000 +#define ExecInsertIndexTuplesCompat(resultRelInfo, slot, tupleid, estate, update, noDupError, specConflict, arbiterIndexes) \ + ExecInsertIndexTuples((resultRelInfo), (slot), (estate), (update), (noDupError), (specConflict), (arbiterIndexes)) +#elif PG_VERSION_NUM >= 120000 +#define ExecInsertIndexTuplesCompat(resultRelInfo, slot, tupleid, estate, update, noDupError, specConflict, arbiterIndexes) \ ExecInsertIndexTuples((slot), (estate), (noDupError), (specConflict), (arbiterIndexes)) #else -#define ExecInsertIndexTuplesCompat(slot, tupleid, estate, noDupError, specConflict, arbiterIndexes) \ +#define ExecInsertIndexTuplesCompat(resultRelInfo, slot, tupleid, estate, update, noDupError, specConflict, arbiterIndexes) \ ExecInsertIndexTuples((slot), (tupleid), (estate), (noDupError), (specConflict), (arbiterIndexes)) #endif @@ -1006,7 +1047,7 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, * macro (and never will be, for old versions), so distinguish via macro added * by the commit. */ -#ifdef QTW_DONT_COPY_DEFAULT +#if defined(QTW_DONT_COPY_DEFAULT) && (PG_VERSION_NUM < 140000) #define expression_tree_mutator_compat(node, mutator, context) \ expression_tree_mutator((node), (mutator), (context), 0) #else @@ -1101,4 +1142,34 @@ void set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti); #define convert_tuples_by_name_compat(i, o, m) convert_tuples_by_name((i), (o), (m)) #endif +/* + * raw_parser() + * In 14 new argument was added (844fe9f159a) + */ +#if PG_VERSION_NUM >= 140000 +#define raw_parser_compat(s) raw_parser((s), RAW_PARSE_DEFAULT) +#else +#define raw_parser_compat(s) raw_parser(s) +#endif + +/* + * make_restrictinfo() + * In >=14 new argument was added (55dc86eca70) + */ +#if PG_VERSION_NUM >= 140000 +#define make_restrictinfo_compat(r, c, ipd, od, p, sl, rr, or, nr) make_restrictinfo((r), (c), (ipd), (od), (p), (sl), (rr), (or), (nr)) +#else +#define make_restrictinfo_compat(r, c, ipd, od, p, sl, rr, or, nr) make_restrictinfo((c), (ipd), (od), (p), (sl), (rr), (or), (nr)) +#endif + +/* + * pull_varnos() + * In >=14 new argument was added (55dc86eca70) + */ +#if PG_VERSION_NUM >= 140000 +#define pull_varnos_compat(r, n) pull_varnos((r), (n)) +#else +#define pull_varnos_compat(r, n) pull_varnos(n) +#endif + #endif /* PG_COMPAT_H */ diff --git a/src/include/hooks.h b/src/include/hooks.h index 49d7e8f1..ccfe060b 100644 --- a/src/include/hooks.h +++ b/src/include/hooks.h @@ -51,14 +51,29 @@ PlannedStmt * pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams); +#if PG_VERSION_NUM >= 140000 +void pathman_post_parse_analyze_hook(ParseState *pstate, + Query *query, + JumbleState *jstate); +#else void pathman_post_parse_analyze_hook(ParseState *pstate, Query *query); +#endif void pathman_shmem_startup_hook(void); void pathman_relcache_hook(Datum arg, Oid relid); -#if PG_VERSION_NUM >= 130000 +#if PG_VERSION_NUM >= 140000 +void pathman_process_utility_hook(PlannedStmt *pstmt, + const char *queryString, + bool readOnlyTree, + ProcessUtilityContext context, + ParamListInfo params, + QueryEnvironment *queryEnv, + DestReceiver *dest, + QueryCompletion *qc); +#elif PG_VERSION_NUM >= 130000 void pathman_process_utility_hook(PlannedStmt *pstmt, const char *queryString, ProcessUtilityContext context, diff --git a/src/include/partition_router.h b/src/include/partition_router.h index 8240d13b..c6924609 100644 --- a/src/include/partition_router.h +++ b/src/include/partition_router.h @@ -32,7 +32,9 @@ typedef struct PartitionRouterState Plan *subplan; /* proxy variable to store subplan */ ExprState *constraint; /* should tuple remain in partition? */ +#if PG_VERSION_NUM < 140000 /* field removed in 86dc90056dfd */ JunkFilter *junkfilter; /* 'ctid' extraction facility */ +#endif ResultRelInfo *current_rri; /* Machinery required for EvalPlanQual */ @@ -42,6 +44,9 @@ typedef struct PartitionRouterState /* Preserved slot from last call */ bool yielded; TupleTableSlot *yielded_slot; +#if PG_VERSION_NUM >= 140000 + TupleTableSlot *yielded_original_slot; +#endif /* Need these for a GREAT deal of hackery */ ModifyTableState *mt_state; @@ -66,8 +71,6 @@ extern CustomExecMethods partition_router_exec_methods; #define MTHackField(mt_state, field) ( (mt_state)->field ) void init_partition_router_static_data(void); -void prepare_modify_table_for_partition_router(PlanState *state, - void *context); void partition_router_begin(CustomScanState *node, EState *estate, int eflags); void partition_router_end(CustomScanState *node); void partition_router_rescan(CustomScanState *node); diff --git a/src/nodes_common.c b/src/nodes_common.c index c2a02649..b6bf24cb 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -184,6 +184,42 @@ build_parent_tlist(List *tlist, AppendRelInfo *appinfo) return temp_tlist; } +#if PG_VERSION_NUM >= 140000 +/* + * Function "tlist_member_ignore_relabel" was removed in vanilla (375398244168) + * Function moved to pg_pathman. + */ +/* + * tlist_member_ignore_relabel + * Finds the (first) member of the given tlist whose expression is + * equal() to the given expression. Result is NULL if no such member. + * We ignore top-level RelabelType nodes + * while checking for a match. This is needed for some scenarios + * involving binary-compatible sort operations. + */ +TargetEntry * +tlist_member_ignore_relabel(Expr *node, List *targetlist) +{ + ListCell *temp; + + while (node && IsA(node, RelabelType)) + node = ((RelabelType *) node)->arg; + + foreach(temp, targetlist) + { + TargetEntry *tlentry = (TargetEntry *) lfirst(temp); + Expr *tlexpr = tlentry->expr; + + while (tlexpr && IsA(tlexpr, RelabelType)) + tlexpr = ((RelabelType *) tlexpr)->arg; + + if (equal(node, tlexpr)) + return tlentry; + } + return NULL; +} +#endif + /* Is tlist 'a' subset of tlist 'b'? (in terms of Vars) */ static bool tlist_is_var_subset(List *a, List *b) diff --git a/src/partition_filter.c b/src/partition_filter.c index b8b3b03c..5d1f4943 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -14,6 +14,7 @@ #include "pathman.h" #include "partition_creation.h" #include "partition_filter.h" +#include "partition_router.h" #include "utils.h" #include "access/htup_details.h" @@ -353,10 +354,13 @@ scan_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid) CopyToResultRelInfo(ri_onConflictSetWhere); #endif +#if PG_VERSION_NUM < 140000 + /* field "ri_junkFilter" removed in 86dc90056dfd */ if (parts_storage->command_type != CMD_UPDATE) CopyToResultRelInfo(ri_junkFilter); else child_result_rel_info->ri_junkFilter = NULL; +#endif /* ri_ConstraintExprs will be initialized by ExecRelCheck() */ child_result_rel_info->ri_ConstraintExprs = NULL; @@ -765,6 +769,32 @@ partition_filter_begin(CustomScanState *node, EState *estate, int eflags) RPS_RRI_CB(NULL, NULL)); } +#if PG_VERSION_NUM >= 140000 +/* + * Re-initialization of PartitionFilterState for using new partition with new + * "current_rri" + */ +static void +reint_partition_filter_state(PartitionFilterState *state, ResultRelInfo *current_rri) +{ + Oid parent_relid = state->partitioned_table; + EState *estate = state->result_parts.estate; + + fini_result_parts_storage(&state->result_parts); + + state->returning_list = current_rri->ri_returningList; + + /* Init ResultRelInfo cache */ + init_result_parts_storage(&state->result_parts, + parent_relid, current_rri, + estate, state->command_type, + RPS_SKIP_RELATIONS, + state->on_conflict_action != ONCONFLICT_NONE, + RPS_RRI_CB(prepare_rri_for_insert, state), + RPS_RRI_CB(NULL, NULL)); +} +#endif + TupleTableSlot * partition_filter_exec(CustomScanState *node) { @@ -782,6 +812,22 @@ partition_filter_exec(CustomScanState *node) MemoryContext old_mcxt; ResultRelInfoHolder *rri_holder; ResultRelInfo *rri; +#if PG_VERSION_NUM >= 140000 + PartitionRouterState *pr_state = linitial(node->custom_ps); + + /* + * For 14: in case UPDATE command, we can scanning several partitions + * in one plan. Need to switch context each time partition is switched. + */ + if (IsPartitionRouterState(pr_state) && + state->result_parts.base_rri != pr_state->current_rri) + { /* + * Slot switched to new partition: need to + * reinitialize some PartitionFilterState variables + */ + reint_partition_filter_state(state, pr_state->current_rri); + } +#endif /* Switch to per-tuple context */ old_mcxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); @@ -1112,9 +1158,18 @@ prepare_rri_fdw_for_insert(ResultRelInfoHolder *rri_holder, NodeSetTag(&mtstate, T_ModifyTableState); mtstate.ps.state = estate; mtstate.operation = CMD_INSERT; +#if PG_VERSION_NUM >= 140000 + /* + * Some fields ("mt_plans", "mt_nplans", "mt_whichplan") removed + * in 86dc90056dfd + */ + outerPlanState(&mtstate.ps) = pstate_ptr; + mtstate.mt_nrels = 1; +#else mtstate.mt_plans = &pstate_ptr; mtstate.mt_nplans = 1; mtstate.mt_whichplan = 0; +#endif mtstate.resultRelInfo = rri; #if PG_VERSION_NUM < 110000 mtstate.mt_onconflict = ONCONFLICT_NONE; @@ -1255,9 +1310,40 @@ append_rte_to_estate(EState *estate, RangeTblEntry *rte, Relation child_rel) static int append_rri_to_estate(EState *estate, ResultRelInfo *rri) { - estate_mod_data *emd_struct = fetch_estate_mod_data(estate); - int result_rels_allocated = emd_struct->estate_alloc_result_rels; + estate_mod_data *emd_struct = fetch_estate_mod_data(estate); + int result_rels_allocated = emd_struct->estate_alloc_result_rels; +#if PG_VERSION_NUM >= 140000 /* reworked in commit a04daa97a433 */ + ResultRelInfo **rri_array = estate->es_result_relations; + + /* + * We already increased variable "estate->es_range_table_size" in previous + * call append_rte_to_estate(): see + * "estate->es_range_table_size = list_length(estate->es_range_table)" + * after "lappend(estate->es_range_table, rte)". So we should append + * new value in "estate->es_result_relations" only. + */ + /* Reallocate estate->es_result_relations if needed */ + if (result_rels_allocated < estate->es_range_table_size) + { + result_rels_allocated = result_rels_allocated * ALLOC_EXP + 1; + estate->es_result_relations = palloc(result_rels_allocated * + sizeof(ResultRelInfo *)); + memcpy(estate->es_result_relations, + rri_array, + (estate->es_range_table_size - 1) * sizeof(ResultRelInfo *)); + } + + estate->es_result_relations[estate->es_range_table_size - 1] = rri; + + estate->es_opened_result_relations = lappend(estate->es_opened_result_relations, rri); + + /* Update estate_mod_data */ + emd_struct->estate_alloc_result_rels = result_rels_allocated; + emd_struct->estate_not_modified = false; + + return estate->es_range_table_size; +#else /* Reallocate estate->es_result_relations if needed */ if (result_rels_allocated <= estate->es_num_result_relations) { @@ -1284,6 +1370,7 @@ append_rri_to_estate(EState *estate, ResultRelInfo *rri) emd_struct->estate_not_modified = false; return estate->es_num_result_relations++; +#endif } @@ -1318,7 +1405,15 @@ fetch_estate_mod_data(EState *estate) /* Have to create a new one */ emd_struct = MemoryContextAlloc(estate_mcxt, sizeof(estate_mod_data)); emd_struct->estate_not_modified = true; +#if PG_VERSION_NUM >= 140000 + /* + * Reworked in commit a04daa97a433: field "es_num_result_relations" + * removed + */ + emd_struct->estate_alloc_result_rels = estate->es_range_table_size; +#else emd_struct->estate_alloc_result_rels = estate->es_num_result_relations; +#endif cb = MemoryContextAlloc(estate_mcxt, sizeof(MemoryContextCallback)); cb->func = pf_memcxt_callback; diff --git a/src/partition_overseer.c b/src/partition_overseer.c index 41590425..ffa770ba 100644 --- a/src/partition_overseer.c +++ b/src/partition_overseer.c @@ -68,23 +68,32 @@ partition_overseer_create_scan_state(CustomScan *node) static void set_mt_state_for_router(PlanState *state, void *context) { +#if PG_VERSION_NUM < 140000 int i; - ModifyTableState *mt_state = (ModifyTableState *) state; +#endif + ModifyTableState *mt_state = (ModifyTableState *) state; - if (!IsA(state, ModifyTableState)) + if (!IsA(state, ModifyTableState)) return; +#if PG_VERSION_NUM >= 140000 + /* Fields "mt_plans", "mt_nplans" removed in 86dc90056dfd */ + { + CustomScanState *pf_state = (CustomScanState *) outerPlanState(mt_state); +#else for (i = 0; i < mt_state->mt_nplans; i++) { - CustomScanState *pf_state = (CustomScanState *) mt_state->mt_plans[i]; - PartitionRouterState *pr_state; - + CustomScanState *pf_state = (CustomScanState *) mt_state->mt_plans[i]; +#endif /* Check if this is a PartitionFilter + PartitionRouter combo */ - if (IsPartitionFilterState(pf_state) && - IsPartitionRouterState(pr_state = linitial(pf_state->custom_ps))) + if (IsPartitionFilterState(pf_state)) { - /* HACK: point to ModifyTable in PartitionRouter */ - pr_state->mt_state = mt_state; + PartitionRouterState *pr_state = linitial(pf_state->custom_ps); + if (IsPartitionRouterState(pr_state)) + { + /* HACK: point to ModifyTable in PartitionRouter */ + pr_state->mt_state = mt_state; + } } } } @@ -116,25 +125,40 @@ partition_overseer_exec(CustomScanState *node) mt_plans_new; /* Get initial signal */ +#if PG_VERSION_NUM >= 140000 /* field "mt_nplans" removed in 86dc90056dfd */ + mt_plans_old = mt_state->mt_nrels; +#else mt_plans_old = mt_state->mt_nplans; +#endif restart: /* Run ModifyTable */ slot = ExecProcNode((PlanState *) mt_state); /* Get current signal */ +#if PG_VERSION_NUM >= 140000 /* field "mt_nplans" removed in 86dc90056dfd */ + mt_plans_new = MTHackField(mt_state, mt_nrels); +#else mt_plans_new = MTHackField(mt_state, mt_nplans); +#endif /* Did PartitionRouter ask us to restart? */ if (mt_plans_new != mt_plans_old) { /* Signal points to current plan */ +#if PG_VERSION_NUM < 140000 int state_idx = -mt_plans_new; +#endif /* HACK: partially restore ModifyTable's state */ MTHackField(mt_state, mt_done) = false; +#if PG_VERSION_NUM >= 140000 + /* Fields "mt_nplans", "mt_whichplan" removed in 86dc90056dfd */ + MTHackField(mt_state, mt_nrels) = mt_plans_old; +#else MTHackField(mt_state, mt_nplans) = mt_plans_old; MTHackField(mt_state, mt_whichplan) = state_idx; +#endif /* Rerun ModifyTable */ goto restart; diff --git a/src/partition_router.c b/src/partition_router.c index b602347b..17013a02 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -72,9 +72,10 @@ static TupleTableSlot *router_set_slot(PartitionRouterState *state, TupleTableSlot *slot, CmdType operation); static TupleTableSlot *router_get_slot(PartitionRouterState *state, + EState *estate, bool *should_process); -static void router_lazy_init_constraint(PartitionRouterState *state); +static void router_lazy_init_constraint(PartitionRouterState *state, bool recreate); static ItemPointerData router_extract_ctid(PartitionRouterState *state, TupleTableSlot *slot); @@ -185,43 +186,97 @@ partition_router_exec(CustomScanState *node) take_next_tuple: /* Get next tuple for processing */ - slot = router_get_slot(state, &should_process); + slot = router_get_slot(state, estate, &should_process); if (should_process) { CmdType new_cmd; bool deleted; ItemPointerData ctid; + /* Variables for prepare a full "new" tuple, after 86dc90056dfd */ +#if PG_VERSION_NUM >= 140000 + TupleTableSlot *old_slot; + ResultRelInfo *rri; +#endif + TupleTableSlot *full_slot = slot; + bool partition_changed = false; ItemPointerSetInvalid(&ctid); +#if PG_VERSION_NUM < 140000 /* Build new junkfilter if needed */ if (state->junkfilter == NULL) state->junkfilter = state->current_rri->ri_junkFilter; +#else + if (slot->tts_tableOid == InvalidOid) + elog(ERROR, "invalid table OID in returned tuple"); + + /* + * For 14: in case UPDATE command we can scanning several partitions + * in one plan. Need to switch context each time partition is switched. + */ + if (RelationGetRelid(state->current_rri->ri_RelationDesc) != slot->tts_tableOid) + { + /* + * Function router_get_slot() switched to new partition: need to + * reinitialize some PartitionRouterState variables + */ + state->current_rri = ExecLookupResultRelByOid(state->mt_state, + slot->tts_tableOid, false, false); + partition_changed = true; + } +#endif - /* Build recheck constraint state lazily */ - router_lazy_init_constraint(state); + /* Build recheck constraint state lazily (and re-create constraint + * in case we start scan another relation) */ + router_lazy_init_constraint(state, partition_changed); /* Extract item pointer from current tuple */ ctid = router_extract_ctid(state, slot); + Assert(ItemPointerIsValid(&ctid)); /* Magic: replace parent's ResultRelInfo with ours */ estate->es_result_relation_info = state->current_rri; +#if PG_VERSION_NUM >= 140000 /* after 86dc90056dfd */ + /* Store original slot */ + estate->es_original_tuple = slot; + /* + * "slot" contains new values of the changed columns plus row + * identity information such as CTID. + * Need to prepare a "newSlot" with full tuple for triggers in + * router_lock_or_delete_tuple(). But we should return old slot + * with CTID because this CTID is used in ExecModifyTable(). + */ + rri = state->current_rri; + + /* Initialize projection info if first time for this table. */ + if (unlikely(!rri->ri_projectNewInfoValid)) + ExecInitUpdateProjection(state->mt_state, rri); + + old_slot = rri->ri_oldTupleSlot; + /* Fetch the most recent version of old tuple. */ + if (!table_tuple_fetch_row_version(rri->ri_RelationDesc, + &ctid, SnapshotAny, old_slot)) + elog(ERROR, "failed to fetch partition tuple being updated"); + + /* Build full tuple (using "old_slot" + changed from "slot"): */ + full_slot = ExecGetUpdateNewTuple(rri, slot, old_slot); +#endif + /* Lock or delete tuple from old partition */ - Assert(ItemPointerIsValid(&ctid)); - slot = router_lock_or_delete_tuple(state, slot, - &ctid, &deleted); + full_slot = router_lock_or_delete_tuple(state, full_slot, + &ctid, &deleted); /* We require a tuple (previous one has vanished) */ - if (TupIsNull(slot)) + if (TupIsNull(full_slot)) goto take_next_tuple; /* Should we use UPDATE or DELETE + INSERT? */ new_cmd = deleted ? CMD_INSERT : CMD_UPDATE; /* Alter ModifyTable's state and return */ - return router_set_slot(state, slot, new_cmd); + return router_set_slot(state, full_slot, new_cmd); } return slot; @@ -265,7 +320,12 @@ router_set_slot(PartitionRouterState *state, return slot; /* HACK: alter ModifyTable's state */ +#if PG_VERSION_NUM >= 140000 + /* Fields "mt_nplans", "mt_whichplan" removed in 86dc90056dfd */ + MTHackField(mt_state, mt_nrels) = -mt_state->mt_nrels; +#else MTHackField(mt_state, mt_nplans) = -mt_state->mt_whichplan; +#endif MTHackField(mt_state, operation) = operation; /* HACK: disable AFTER STATEMENT triggers */ @@ -273,6 +333,9 @@ router_set_slot(PartitionRouterState *state, if (!TupIsNull(slot)) { + EState *estate = mt_state->ps.state; + +#if PG_VERSION_NUM < 140000 /* field "ri_junkFilter" removed in 86dc90056dfd */ /* We should've cached junk filter already */ Assert(state->junkfilter); @@ -280,12 +343,20 @@ router_set_slot(PartitionRouterState *state, state->current_rri->ri_junkFilter = (operation == CMD_UPDATE) ? state->junkfilter : NULL; +#endif /* Don't forget to set saved_slot! */ - state->yielded_slot = ExecInitExtraTupleSlotCompat(mt_state->ps.state, + state->yielded_slot = ExecInitExtraTupleSlotCompat(estate, slot->tts_tupleDescriptor, &TTSOpsHeapTuple); ExecCopySlot(state->yielded_slot, slot); +#if PG_VERSION_NUM >= 140000 + Assert(estate->es_original_tuple != NULL); + state->yielded_original_slot = ExecInitExtraTupleSlotCompat(estate, + estate->es_original_tuple->tts_tupleDescriptor, + &TTSOpsHeapTuple); + ExecCopySlot(state->yielded_original_slot, estate->es_original_tuple); +#endif } /* Yield */ @@ -296,6 +367,7 @@ router_set_slot(PartitionRouterState *state, /* Fetch next tuple (either fresh or yielded) */ static TupleTableSlot * router_get_slot(PartitionRouterState *state, + EState *estate, bool *should_process) { TupleTableSlot *slot; @@ -309,6 +381,10 @@ router_get_slot(PartitionRouterState *state, /* Reset saved slot */ slot = state->yielded_slot; state->yielded_slot = NULL; +#if PG_VERSION_NUM >= 140000 + estate->es_original_tuple = state->yielded_original_slot; + state->yielded_original_slot = NULL; +#endif state->yielded = false; /* We shouldn't process preserved slot... */ @@ -331,9 +407,9 @@ router_get_slot(PartitionRouterState *state, } static void -router_lazy_init_constraint(PartitionRouterState *state) +router_lazy_init_constraint(PartitionRouterState *state, bool reinit) { - if (state->constraint == NULL) + if (state->constraint == NULL || reinit) { Relation rel = state->current_rri->ri_RelationDesc; Oid relid = RelationGetRelid(rel); @@ -376,7 +452,11 @@ router_extract_ctid(PartitionRouterState *state, TupleTableSlot *slot) bool ctid_isnull; ctid_datum = ExecGetJunkAttribute(slot, +#if PG_VERSION_NUM >= 140000 /* field "junkfilter" removed in 86dc90056dfd */ + state->current_rri->ri_RowIdAttNo, +#else state->junkfilter->jf_junkAttNo, +#endif &ctid_isnull); /* shouldn't ever get a null result... */ diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 77a55bd3..2477cc7f 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -185,8 +185,12 @@ plan_tree_visitor(Plan *plan, break; case T_ModifyTable: +#if PG_VERSION_NUM >= 140000 /* reworked in commit 86dc90056dfd */ + plan_tree_visitor(outerPlan(plan), visitor, context); +#else foreach (l, ((ModifyTable *) plan)->plans) plan_tree_visitor((Plan *) lfirst(l), visitor, context); +#endif break; case T_Append: @@ -248,9 +252,13 @@ state_tree_visitor(PlanState *state, break; case T_ModifyTable: +#if PG_VERSION_NUM >= 140000 /* reworked in commit 86dc90056dfd */ + visitor(outerPlanState(state), context); +#else state_visit_members(((ModifyTableState *) state)->mt_plans, ((ModifyTableState *) state)->mt_nplans, visitor, context); +#endif break; case T_Append: @@ -757,9 +765,19 @@ partition_filter_visitor(Plan *plan, void *context) { List *rtable = (List *) context; ModifyTable *modify_table = (ModifyTable *) plan; +#if PG_VERSION_NUM >= 140000 /* for changes 86dc90056dfd */ + /* + * We have only one subplan for 14: need to modify it without + * using any cycle + */ + Plan *subplan = outerPlan(modify_table); + ListCell *lc2, + *lc3; +#else ListCell *lc1, *lc2, *lc3; +#endif /* Skip if not ModifyTable with 'INSERT' command */ if (!IsA(modify_table, ModifyTable) || modify_table->operation != CMD_INSERT) @@ -768,8 +786,12 @@ partition_filter_visitor(Plan *plan, void *context) Assert(rtable && IsA(rtable, List)); lc3 = list_head(modify_table->returningLists); +#if PG_VERSION_NUM >= 140000 /* for changes 86dc90056dfd */ + lc2 = list_head(modify_table->resultRelations); +#else forboth (lc1, modify_table->plans, lc2, modify_table->resultRelations) +#endif { Index rindex = lfirst_int(lc2); Oid relid = getrelid(rindex, rtable); @@ -786,11 +808,19 @@ partition_filter_visitor(Plan *plan, void *context) lc3 = lnext_compat(modify_table->returningLists, lc3); } +#if PG_VERSION_NUM >= 140000 /* for changes 86dc90056dfd */ + outerPlan(modify_table) = make_partition_filter(subplan, relid, + modify_table->nominalRelation, + modify_table->onConflictAction, + modify_table->operation, + returning_list); +#else lfirst(lc1) = make_partition_filter((Plan *) lfirst(lc1), relid, modify_table->nominalRelation, modify_table->onConflictAction, modify_table->operation, returning_list); +#endif } } @@ -807,9 +837,19 @@ partition_router_visitor(Plan *plan, void *context) { List *rtable = (List *) context; ModifyTable *modify_table = (ModifyTable *) plan; +#if PG_VERSION_NUM >= 140000 /* for changes 86dc90056dfd */ + /* + * We have only one subplan for 14: need to modify it without + * using any cycle + */ + Plan *subplan = outerPlan(modify_table); + ListCell *lc2, + *lc3; +#else ListCell *lc1, *lc2, *lc3; +#endif bool changed = false; /* Skip if not ModifyTable with 'UPDATE' command */ @@ -827,8 +867,12 @@ partition_router_visitor(Plan *plan, void *context) } lc3 = list_head(modify_table->returningLists); +#if PG_VERSION_NUM >= 140000 /* for changes 86dc90056dfd */ + lc2 = list_head(modify_table->resultRelations); +#else forboth (lc1, modify_table->plans, lc2, modify_table->resultRelations) +#endif { Index rindex = lfirst_int(lc2); Oid relid = getrelid(rindex, rtable), @@ -852,8 +896,13 @@ partition_router_visitor(Plan *plan, void *context) lc3 = lnext_compat(modify_table->returningLists, lc3); } +#if PG_VERSION_NUM >= 140000 /* for changes 86dc90056dfd */ + prouter = make_partition_router(subplan, + modify_table->epqParam); +#else prouter = make_partition_router((Plan *) lfirst(lc1), modify_table->epqParam); +#endif pfilter = make_partition_filter((Plan *) prouter, relid, modify_table->nominalRelation, @@ -861,7 +910,11 @@ partition_router_visitor(Plan *plan, void *context) CMD_UPDATE, returning_list); +#if PG_VERSION_NUM >= 140000 /* for changes in 86dc90056dfd */ + outerPlan(modify_table) = pfilter; +#else lfirst(lc1) = pfilter; +#endif changed = true; } } diff --git a/src/relation_info.c b/src/relation_info.c index df60dde3..64c04c2f 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -1444,7 +1444,7 @@ parse_partitioning_expression(const Oid relid, PG_TRY(); { - parsetree_list = raw_parser(query_string); + parsetree_list = raw_parser_compat(query_string); } PG_CATCH(); { @@ -1555,7 +1555,7 @@ cook_partitioning_expression(const Oid relid, " must be marked IMMUTABLE"))); /* Sanity check #5 */ - expr_varnos = pull_varnos(expr); + expr_varnos = pull_varnos_compat(NULL, expr); if (bms_num_members(expr_varnos) != 1 || relid != ((RangeTblEntry *) linitial(query->rtable))->relid) { diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 1949d970..89649e0d 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -67,7 +67,12 @@ ProtocolVersion FrontendProtocol = (ProtocolVersion) 0; #define PATHMAN_COPY_WRITE_LOCK RowExclusiveLock -static uint64 PathmanCopyFrom(CopyState cstate, +static uint64 PathmanCopyFrom( +#if PG_VERSION_NUM >= 140000 /* Structure changed in c532d15dddff */ + CopyFromState cstate, +#else + CopyState cstate, +#endif Relation parent_rel, List *range_table, bool old_protocol); @@ -230,7 +235,11 @@ is_pathman_related_alter_column_type(Node *parsetree, return false; /* Are we going to modify some table? */ +#if PG_VERSION_NUM >= 140000 + if (alter_table_stmt->objtype != OBJECT_TABLE) +#else if (alter_table_stmt->relkind != OBJECT_TABLE) +#endif return false; /* Assume it's a parent, fetch its Oid */ @@ -284,7 +293,7 @@ is_pathman_related_alter_column_type(Node *parsetree, } /* - * CopyGetAttnums - build an integer list of attnums to be copied + * PathmanCopyGetAttnums - build an integer list of attnums to be copied * * The input attnamelist is either the user-specified column list, * or NIL if there was none (in which case we want all the non-dropped @@ -293,7 +302,7 @@ is_pathman_related_alter_column_type(Node *parsetree, * rel can be NULL ... it's only used for error reports. */ static List * -CopyGetAttnums(TupleDesc tupDesc, Relation rel, List *attnamelist) +PathmanCopyGetAttnums(TupleDesc tupDesc, Relation rel, List *attnamelist) { List *attnums = NIL; @@ -372,7 +381,11 @@ PathmanDoCopy(const CopyStmt *stmt, int stmt_len, uint64 *processed) { +#if PG_VERSION_NUM >= 140000 /* Structure changed in c532d15dddff */ + CopyFromState cstate; +#else CopyState cstate; +#endif ParseState *pstate; Relation rel; List *range_table = NIL; @@ -419,7 +432,7 @@ PathmanDoCopy(const CopyStmt *stmt, range_table = list_make1(rte); tupDesc = RelationGetDescr(rel); - attnums = CopyGetAttnums(tupDesc, rel, stmt->attlist); + attnums = PathmanCopyGetAttnums(tupDesc, rel, stmt->attlist); foreach(cur, attnums) { int attnum = lfirst_int(cur) - FirstLowInvalidHeapAttributeNumber; @@ -483,7 +496,13 @@ PathmanDoCopy(const CopyStmt *stmt, * Copy FROM file to relation. */ static uint64 -PathmanCopyFrom(CopyState cstate, Relation parent_rel, +PathmanCopyFrom( +#if PG_VERSION_NUM >= 140000 /* Structure changed in c532d15dddff */ + CopyFromState cstate, +#else + CopyState cstate, +#endif + Relation parent_rel, List *range_table, bool old_protocol) { HeapTuple tuple; @@ -510,6 +529,23 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, 0); ExecOpenIndices(parent_rri, false); +#if PG_VERSION_NUM >= 140000 /* reworked in 1375422c7826 */ + /* + * Call ExecInitRangeTable() should be first because in 14 it initializes + * field "estate->es_result_relations": + */ + ExecInitRangeTable(estate, range_table); + estate->es_result_relations = + (ResultRelInfo **) palloc0(list_length(range_table) * sizeof(ResultRelInfo *)); + estate->es_result_relations[0] = parent_rri; + /* + * Saving in the list allows to avoid needlessly traversing the whole + * array when only a few of its entries are possibly non-NULL. + */ + estate->es_opened_result_relations = + lappend(estate->es_opened_result_relations, parent_rri); + estate->es_result_relation_info = parent_rri; +#else estate->es_result_relations = parent_rri; estate->es_num_result_relations = 1; estate->es_result_relation_info = parent_rri; @@ -518,7 +554,7 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, #else estate->es_range_table = range_table; #endif - +#endif /* Initialize ResultPartsStorage */ init_result_parts_storage(&parts_storage, parent_relid, parent_rri, @@ -669,8 +705,8 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, /* ... and create index entries for it */ if (child_rri->ri_NumIndices > 0) - recheckIndexes = ExecInsertIndexTuplesCompat(slot, &(tuple->t_self), - estate, false, NULL, NIL); + recheckIndexes = ExecInsertIndexTuplesCompat(estate->es_result_relation_info, + slot, &(tuple->t_self), estate, false, false, NULL, NIL); } #ifdef PG_SHARDMAN /* Handle foreign tables */ From 90e90e9912b9366e2ce89819c2c399edc7add39d Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Tue, 9 Nov 2021 00:47:23 +0300 Subject: [PATCH 460/528] Changes for PostgreSQL v15 --- src/hooks.c | 4 ++++ src/partition_creation.c | 49 ++++++++++++++++++++++++++++++++++++++-- 2 files changed, 51 insertions(+), 2 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 276f6cfd..f376e4a0 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -293,7 +293,11 @@ pathman_join_pathlist_hook(PlannerInfo *root, * Currently we use get_parameterized_joinrel_size() since * it works just fine, but this might change some day. */ +#if PG_VERSION_NUM >= 150000 /* reason: commit 18fea737b5e4 */ + nest_path->jpath.path.rows = +#else nest_path->path.rows = +#endif get_parameterized_joinrel_size_compat(root, joinrel, outer, inner, extra->sjinfo, diff --git a/src/partition_creation.c b/src/partition_creation.c index 65335c65..2154bc8a 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -92,8 +92,13 @@ static void postprocess_child_table_and_atts(Oid parent_relid, Oid partition_rel static Oid text_to_regprocedure(text *proname_args); static Constraint *make_constraint_common(char *name, Node *raw_expr); -static Value make_string_value_struct(char *str); +#if PG_VERSION_NUM >= 150000 /* reason: commit 639a86e36aae */ +static String make_string_value_struct(char *str); +static Integer make_int_value_struct(int int_val); +#else +static Value make_string_value_struct(char* str); static Value make_int_value_struct(int int_val); +#endif static Node *build_partitioning_expression(Oid parent_relid, Oid *expr_type, @@ -1356,12 +1361,21 @@ build_raw_range_check_tree(Node *raw_expression, const Bound *end_value, Oid value_type) { +#if PG_VERSION_NUM >= 150000 /* reason: commit 639a86e36aae */ +#define BuildConstExpr(node, value, value_type) \ + do { \ + (node)->val.sval = make_string_value_struct( \ + datum_to_cstring((value), (value_type))); \ + (node)->location = -1; \ + } while (0) +#else #define BuildConstExpr(node, value, value_type) \ do { \ (node)->val = make_string_value_struct( \ datum_to_cstring((value), (value_type))); \ (node)->location = -1; \ } while (0) +#endif #define BuildCmpExpr(node, opname, expr, c) \ do { \ @@ -1554,11 +1568,19 @@ build_raw_hash_check_tree(Node *raw_expression, hash_proc = tce->hash_proc; /* Total amount of partitions */ +#if PG_VERSION_NUM >= 150000 /* reason: commit 639a86e36aae */ + part_count_c->val.ival = make_int_value_struct(part_count); +#else part_count_c->val = make_int_value_struct(part_count); +#endif part_count_c->location = -1; /* Index of this partition (hash % total amount) */ +#if PG_VERSION_NUM >= 150000 /* reason: commit 639a86e36aae */ + part_idx_c->val.ival = make_int_value_struct(part_idx); +#else part_idx_c->val = make_int_value_struct(part_idx); +#endif part_idx_c->location = -1; /* Call hash_proc() */ @@ -1649,6 +1671,29 @@ make_constraint_common(char *name, Node *raw_expr) return constraint; } +#if PG_VERSION_NUM >= 150000 /* reason: commit 639a86e36aae */ +static String +make_string_value_struct(char* str) +{ + String val; + + val.type = T_String; + val.val = str; + + return val; +} + +static Integer +make_int_value_struct(int int_val) +{ + Integer val; + + val.type = T_Integer; + val.val = int_val; + + return val; +} +#else static Value make_string_value_struct(char *str) { @@ -1670,7 +1715,7 @@ make_int_value_struct(int int_val) return val; } - +#endif /* PG_VERSION_NUM >= 150000 */ /* * --------------------- From 23122aba6efd3feeee032636c89a100f9940d812 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Tue, 9 Nov 2021 14:43:27 +0300 Subject: [PATCH 461/528] Corrected test after REVOKE PUBLIC CREATE (see commit https://git.postgresql.org/gitweb/?p=postgresql.git&a=commitdiff&h=b073c3ccd06e4cb845e121387a43faa8c68a7b62) --- expected/pathman_CVE-2020-14350.out | 1 + sql/pathman_CVE-2020-14350.sql | 1 + 2 files changed, 2 insertions(+) diff --git a/expected/pathman_CVE-2020-14350.out b/expected/pathman_CVE-2020-14350.out index c91a280f..c4250097 100644 --- a/expected/pathman_CVE-2020-14350.out +++ b/expected/pathman_CVE-2020-14350.out @@ -9,6 +9,7 @@ DROP TABLE IF EXISTS test1 CASCADE; DROP TABLE IF EXISTS test2 CASCADE; DROP ROLE IF EXISTS regress_hacker; SET client_min_messages = 'notice'; +GRANT CREATE ON SCHEMA public TO PUBLIC; CREATE EXTENSION pg_pathman; CREATE ROLE regress_hacker LOGIN; -- Test 1 diff --git a/sql/pathman_CVE-2020-14350.sql b/sql/pathman_CVE-2020-14350.sql index 877f3280..e3730744 100644 --- a/sql/pathman_CVE-2020-14350.sql +++ b/sql/pathman_CVE-2020-14350.sql @@ -10,6 +10,7 @@ DROP TABLE IF EXISTS test1 CASCADE; DROP TABLE IF EXISTS test2 CASCADE; DROP ROLE IF EXISTS regress_hacker; SET client_min_messages = 'notice'; +GRANT CREATE ON SCHEMA public TO PUBLIC; CREATE EXTENSION pg_pathman; CREATE ROLE regress_hacker LOGIN; From 6e155ce29feb1be64643ef44c8acac1e82bf7e83 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Mon, 18 Oct 2021 10:15:48 +0300 Subject: [PATCH 462/528] [PGPRO-5113] Added 'tuple map' for prevent addition extra columns values into partitions --- expected/pathman_rebuild_updates.out | 39 ++++++++ expected/pathman_rebuild_updates_1.out | 39 ++++++++ sql/pathman_rebuild_updates.sql | 19 ++++ src/include/partition_filter.h | 5 + src/partition_filter.c | 125 ++++++++++++++++++++++--- 5 files changed, 215 insertions(+), 12 deletions(-) diff --git a/expected/pathman_rebuild_updates.out b/expected/pathman_rebuild_updates.out index eb078303..dfa4a5ce 100644 --- a/expected/pathman_rebuild_updates.out +++ b/expected/pathman_rebuild_updates.out @@ -155,6 +155,45 @@ UPDATE test_updates.test SET val = -1 WHERE val = 95 RETURNING *, tableoid::RE -1 | 105 | test_updates.test_13 (1 row) +/* basic check for 'ALTER TABLE ... ADD COLUMN'; PGPRO-5113 */ +create table test_updates.test_5113(val int4 not null); +insert into test_updates.test_5113 values (1); +select create_range_partitions('test_updates.test_5113', 'val', 1, 10); + create_range_partitions +------------------------- + 1 +(1 row) + +update test_updates.test_5113 set val = 11 where val = 1; +alter table test_updates.test_5113 add column x varchar; +/* no error here: */ +select * from test_updates.test_5113 where val = 11; + val | x +-----+--- + 11 | +(1 row) + +drop table test_updates.test_5113 cascade; +NOTICE: drop cascades to 3 other objects +create table test_updates.test_5113(val int4 not null); +insert into test_updates.test_5113 values (1); +select create_range_partitions('test_updates.test_5113', 'val', 1, 10); + create_range_partitions +------------------------- + 1 +(1 row) + +update test_updates.test_5113 set val = 11 where val = 1; +alter table test_updates.test_5113 add column x int8; +/* no extra data in column 'x' here: */ +select * from test_updates.test_5113 where val = 11; + val | x +-----+--- + 11 | +(1 row) + +drop table test_updates.test_5113 cascade; +NOTICE: drop cascades to 3 other objects DROP SCHEMA test_updates CASCADE; NOTICE: drop cascades to 15 other objects DROP EXTENSION pg_pathman; diff --git a/expected/pathman_rebuild_updates_1.out b/expected/pathman_rebuild_updates_1.out index 10ec256e..5bda15ce 100644 --- a/expected/pathman_rebuild_updates_1.out +++ b/expected/pathman_rebuild_updates_1.out @@ -155,6 +155,45 @@ UPDATE test_updates.test SET val = -1 WHERE val = 95 RETURNING *, tableoid::RE -1 | 105 | test_updates.test_13 (1 row) +/* basic check for 'ALTER TABLE ... ADD COLUMN'; PGPRO-5113 */ +create table test_updates.test_5113(val int4 not null); +insert into test_updates.test_5113 values (1); +select create_range_partitions('test_updates.test_5113', 'val', 1, 10); + create_range_partitions +------------------------- + 1 +(1 row) + +update test_updates.test_5113 set val = 11 where val = 1; +alter table test_updates.test_5113 add column x varchar; +/* no error here: */ +select * from test_updates.test_5113 where val = 11; + val | x +-----+--- + 11 | +(1 row) + +drop table test_updates.test_5113 cascade; +NOTICE: drop cascades to 3 other objects +create table test_updates.test_5113(val int4 not null); +insert into test_updates.test_5113 values (1); +select create_range_partitions('test_updates.test_5113', 'val', 1, 10); + create_range_partitions +------------------------- + 1 +(1 row) + +update test_updates.test_5113 set val = 11 where val = 1; +alter table test_updates.test_5113 add column x int8; +/* no extra data in column 'x' here: */ +select * from test_updates.test_5113 where val = 11; + val | x +-----+--- + 11 | +(1 row) + +drop table test_updates.test_5113 cascade; +NOTICE: drop cascades to 3 other objects DROP SCHEMA test_updates CASCADE; NOTICE: drop cascades to 15 other objects DROP EXTENSION pg_pathman; diff --git a/sql/pathman_rebuild_updates.sql b/sql/pathman_rebuild_updates.sql index f4229d09..01757c2c 100644 --- a/sql/pathman_rebuild_updates.sql +++ b/sql/pathman_rebuild_updates.sql @@ -79,6 +79,25 @@ UPDATE test_updates.test SET val = 95 WHERE val = 115 RETURNING *, tableoid::RE UPDATE test_updates.test SET val = -1 WHERE val = 95 RETURNING *, tableoid::REGCLASS; +/* basic check for 'ALTER TABLE ... ADD COLUMN'; PGPRO-5113 */ +create table test_updates.test_5113(val int4 not null); +insert into test_updates.test_5113 values (1); +select create_range_partitions('test_updates.test_5113', 'val', 1, 10); +update test_updates.test_5113 set val = 11 where val = 1; +alter table test_updates.test_5113 add column x varchar; +/* no error here: */ +select * from test_updates.test_5113 where val = 11; +drop table test_updates.test_5113 cascade; + +create table test_updates.test_5113(val int4 not null); +insert into test_updates.test_5113 values (1); +select create_range_partitions('test_updates.test_5113', 'val', 1, 10); +update test_updates.test_5113 set val = 11 where val = 1; +alter table test_updates.test_5113 add column x int8; +/* no extra data in column 'x' here: */ +select * from test_updates.test_5113 where val = 11; +drop table test_updates.test_5113 cascade; + DROP SCHEMA test_updates CASCADE; DROP EXTENSION pg_pathman; diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index 233054b7..0c912abe 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -48,6 +48,7 @@ typedef struct Oid partid; /* partition's relid */ ResultRelInfo *result_rel_info; /* cached ResultRelInfo */ TupleConversionMap *tuple_map; /* tuple mapping (parent => child) */ + TupleConversionMap *tuple_map_child; /* tuple mapping (child => child), for exclude 'ctid' */ PartRelationInfo *prel; /* this child might be a parent... */ ExprState *prel_expr_state; /* and have its own part. expression */ @@ -173,6 +174,10 @@ PartRelationInfo * refresh_result_parts_storage(ResultPartsStorage *parts_storag TupleConversionMap * build_part_tuple_map(Relation parent_rel, Relation child_rel); +TupleConversionMap * build_part_tuple_map_child(Relation child_rel); + +void destroy_tuple_map(TupleConversionMap *tuple_map); + List * pfilter_build_tlist(Plan *subplan); diff --git a/src/partition_filter.c b/src/partition_filter.c index 5d1f4943..44f021c4 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -239,13 +239,9 @@ fini_result_parts_storage(ResultPartsStorage *parts_storage) } /* Free conversion-related stuff */ - if (rri_holder->tuple_map) - { - FreeTupleDesc(rri_holder->tuple_map->indesc); - FreeTupleDesc(rri_holder->tuple_map->outdesc); + destroy_tuple_map(rri_holder->tuple_map); - free_conversion_map(rri_holder->tuple_map); - } + destroy_tuple_map(rri_holder->tuple_map_child); /* Don't forget to close 'prel'! */ if (rri_holder->prel) @@ -381,6 +377,13 @@ scan_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid) */ rri_holder->tuple_map = build_part_tuple_map(base_rel, child_rel); + /* + * Field for child->child tuple transformation map. We need to + * convert tuples because child TupleDesc might have extra + * columns ('ctid' etc.) and need remove them. + */ + rri_holder->tuple_map_child = NULL; + /* Default values */ rri_holder->prel = NULL; rri_holder->prel_expr_state = NULL; @@ -468,6 +471,73 @@ build_part_tuple_map(Relation base_rel, Relation child_rel) return tuple_map; } +/* + * Build tuple conversion map (e.g. partition tuple has extra column(s)). + * We create a special tuple map (tuple_map_child), which, when applied to the + * tuple of partition, translates the tuple attributes into the tuple + * attributes of the same partition, discarding service attributes like "ctid" + * (i.e. working like junkFilter). + */ +TupleConversionMap * +build_part_tuple_map_child(Relation child_rel) +{ + TupleConversionMap *tuple_map; + TupleDesc child_tupdesc1; + TupleDesc child_tupdesc2; + int n; +#if PG_VERSION_NUM >= 130000 + AttrMap *attrMap; +#else + AttrNumber *attrMap; +#endif + + child_tupdesc1 = CreateTupleDescCopy(RelationGetDescr(child_rel)); + child_tupdesc1->tdtypeid = InvalidOid; + + child_tupdesc2 = CreateTupleDescCopy(RelationGetDescr(child_rel)); + child_tupdesc2->tdtypeid = InvalidOid; + + /* Generate tuple transformation map */ +#if PG_VERSION_NUM >= 130000 + attrMap = build_attrmap_by_name(child_tupdesc1, child_tupdesc2); +#else + attrMap = convert_tuples_by_name_map(child_tupdesc1, child_tupdesc2, + ERR_PART_DESC_CONVERT); +#endif + + /* Prepare the map structure */ + tuple_map = (TupleConversionMap *) palloc(sizeof(TupleConversionMap)); + tuple_map->indesc = child_tupdesc1; + tuple_map->outdesc = child_tupdesc2; + tuple_map->attrMap = attrMap; + + /* preallocate workspace for Datum arrays */ + n = child_tupdesc1->natts; + tuple_map->outvalues = (Datum *) palloc(n * sizeof(Datum)); + tuple_map->outisnull = (bool *) palloc(n * sizeof(bool)); + + n = child_tupdesc1->natts + 1; /* +1 for NULL */ + tuple_map->invalues = (Datum *) palloc(n * sizeof(Datum)); + tuple_map->inisnull = (bool *) palloc(n * sizeof(bool)); + + tuple_map->invalues[0] = (Datum) 0; /* set up the NULL entry */ + tuple_map->inisnull[0] = true; + + return tuple_map; +} + +/* Destroy tuple conversion map */ +void +destroy_tuple_map(TupleConversionMap *tuple_map) +{ + if (tuple_map) + { + FreeTupleDesc(tuple_map->indesc); + FreeTupleDesc(tuple_map->outdesc); + + free_conversion_map(tuple_map); + } +} /* * ----------------------------------- @@ -812,6 +882,7 @@ partition_filter_exec(CustomScanState *node) MemoryContext old_mcxt; ResultRelInfoHolder *rri_holder; ResultRelInfo *rri; + JunkFilter *junkfilter = NULL; #if PG_VERSION_NUM >= 140000 PartitionRouterState *pr_state = linitial(node->custom_ps); @@ -827,6 +898,8 @@ partition_filter_exec(CustomScanState *node) */ reint_partition_filter_state(state, pr_state->current_rri); } +#else + junkfilter = estate->es_result_relation_info->ri_junkFilter; #endif /* Switch to per-tuple context */ @@ -844,18 +917,46 @@ partition_filter_exec(CustomScanState *node) /* Magic: replace parent's ResultRelInfo with ours */ estate->es_result_relation_info = rri; + /* + * Besides 'transform map' we should process two cases: + * 1) CMD_UPDATE, row moved to other partition, junkfilter == NULL + * (filled in router_set_slot() for SELECT + INSERT); + * we should clear attribute 'ctid' (do not insert it into database); + * 2) CMD_INSERT/CMD_UPDATE operations for partitions with deleted column(s), + * junkfilter == NULL. + */ /* If there's a transform map, rebuild the tuple */ - if (rri_holder->tuple_map) + if (rri_holder->tuple_map || + (!junkfilter && + (state->command_type == CMD_INSERT || state->command_type == CMD_UPDATE) && + (slot->tts_tupleDescriptor->natts > rri->ri_RelationDesc->rd_att->natts /* extra fields */))) { - Relation child_rel = rri->ri_RelationDesc; - - /* xxx why old code decided to materialize it? */ #if PG_VERSION_NUM < 120000 HeapTuple htup_old, htup_new; +#endif + Relation child_rel = rri->ri_RelationDesc; + TupleConversionMap *tuple_map; + if (rri_holder->tuple_map) + tuple_map = rri_holder->tuple_map; + else + { + if (!rri_holder->tuple_map_child) + { /* + * Generate child->child tuple transformation map. We need to + * convert tuples because child TupleDesc has extra + * columns ('ctid' etc.) and need remove them. + */ + rri_holder->tuple_map_child = build_part_tuple_map_child(child_rel); + } + tuple_map = rri_holder->tuple_map_child; + } + + /* xxx why old code decided to materialize it? */ +#if PG_VERSION_NUM < 120000 htup_old = ExecMaterializeSlot(slot); - htup_new = do_convert_tuple(htup_old, rri_holder->tuple_map); + htup_new = do_convert_tuple(htup_old, tuple_map); ExecClearTuple(slot); #endif @@ -872,7 +973,7 @@ partition_filter_exec(CustomScanState *node) /* TODO: why should we *always* set a new slot descriptor? */ ExecSetSlotDescriptor(state->tup_convert_slot, RelationGetDescr(child_rel)); #if PG_VERSION_NUM >= 120000 - slot = execute_attr_map_slot(rri_holder->tuple_map->attrMap, slot, state->tup_convert_slot); + slot = execute_attr_map_slot(tuple_map->attrMap, slot, state->tup_convert_slot); #else slot = ExecStoreTuple(htup_new, state->tup_convert_slot, InvalidBuffer, true); #endif From c16f7468167df5cb7655ca46b192b61619ab0930 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Fri, 12 Nov 2021 02:31:41 +0300 Subject: [PATCH 463/528] [PGPRO-5113] Added extra conditions for using tuple map (v9.6 - v11) --- src/partition_filter.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/src/partition_filter.c b/src/partition_filter.c index 44f021c4..0ef84e61 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -929,7 +929,19 @@ partition_filter_exec(CustomScanState *node) if (rri_holder->tuple_map || (!junkfilter && (state->command_type == CMD_INSERT || state->command_type == CMD_UPDATE) && - (slot->tts_tupleDescriptor->natts > rri->ri_RelationDesc->rd_att->natts /* extra fields */))) + (slot->tts_tupleDescriptor->natts > rri->ri_RelationDesc->rd_att->natts /* extra fields */ +#if PG_VERSION_NUM < 120000 + /* + * If we have a regular physical tuple 'slot->tts_tuple' and + * it's locally palloc'd => we will use this tuple in + * ExecMaterializeSlot() instead of materialize the slot, so + * need to check number of attributes for this tuple: + */ + || (slot->tts_tuple && slot->tts_shouldFree && + HeapTupleHeaderGetNatts(slot->tts_tuple->t_data) > + rri->ri_RelationDesc->rd_att->natts /* extra fields */) +#endif + ))) { #if PG_VERSION_NUM < 120000 HeapTuple htup_old, From e4faa9030c99a08ca587e036239b30ef9ca888c4 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Sun, 31 Oct 2021 14:51:35 +0300 Subject: [PATCH 464/528] Changed some regression tests + results 1) 'DROP SCHEMA ... CASCADE' replaced to 'DROP SCHEMA ...'; 2) pathman_column_type.sql: from results removed row 'partition status cache'; 3) pathman_mergejoin.sql: added GUC's for fixate strategy of queries --- expected/pathman_array_qual.out | 5 ++-- expected/pathman_array_qual_1.out | 5 ++-- expected/pathman_basic.out | 21 ++++++++++++++--- expected/pathman_basic_1.out | 21 ++++++++++++++--- expected/pathman_basic_2.out | 21 ++++++++++++++--- expected/pathman_bgw.out | 2 +- expected/pathman_calamity.out | 16 +++++++++---- expected/pathman_calamity_1.out | 16 +++++++++---- expected/pathman_calamity_2.out | 16 +++++++++---- expected/pathman_calamity_3.out | 16 +++++++++---- expected/pathman_callbacks.out | 6 +++-- expected/pathman_column_type.out | 32 +++++++++++++------------- expected/pathman_column_type_1.out | 32 +++++++++++++------------- expected/pathman_cte.out | 5 ++-- expected/pathman_cte_1.out | 5 ++-- expected/pathman_cte_2.out | 5 ++-- expected/pathman_declarative.out | 8 ++++--- expected/pathman_declarative_1.out | 8 ++++--- expected/pathman_domains.out | 6 +++-- expected/pathman_dropped_cols.out | 2 +- expected/pathman_expressions.out | 10 ++++++-- expected/pathman_expressions_1.out | 10 ++++++-- expected/pathman_expressions_2.out | 10 ++++++-- expected/pathman_foreign_keys.out | 5 ++-- expected/pathman_gaps.out | 11 +++++++-- expected/pathman_gaps_1.out | 11 +++++++-- expected/pathman_hashjoin.out | 9 +++++--- expected/pathman_hashjoin_1.out | 9 +++++--- expected/pathman_hashjoin_2.out | 9 +++++--- expected/pathman_hashjoin_3.out | 9 +++++--- expected/pathman_hashjoin_4.out | 9 +++++--- expected/pathman_hashjoin_5.out | 9 +++++--- expected/pathman_inserts.out | 8 +++++-- expected/pathman_inserts_1.out | 8 +++++-- expected/pathman_inserts_2.out | 8 +++++-- expected/pathman_interval.out | 2 +- expected/pathman_join_clause.out | 12 +++++++--- expected/pathman_join_clause_1.out | 12 +++++++--- expected/pathman_join_clause_2.out | 12 +++++++--- expected/pathman_lateral.out | 5 ++-- expected/pathman_lateral_1.out | 5 ++-- expected/pathman_lateral_2.out | 5 ++-- expected/pathman_lateral_3.out | 5 ++-- expected/pathman_mergejoin.out | 12 +++++++--- expected/pathman_mergejoin_1.out | 12 +++++++--- expected/pathman_mergejoin_2.out | 12 +++++++--- expected/pathman_mergejoin_3.out | 12 +++++++--- expected/pathman_mergejoin_4.out | 12 +++++++--- expected/pathman_mergejoin_5.out | 12 +++++++--- expected/pathman_only.out | 5 ++-- expected/pathman_only_1.out | 5 ++-- expected/pathman_only_2.out | 5 ++-- expected/pathman_param_upd_del.out | 5 ++-- expected/pathman_permissions.out | 2 +- expected/pathman_rebuild_deletes.out | 5 ++-- expected/pathman_rebuild_deletes_1.out | 5 ++-- expected/pathman_rebuild_updates.out | 5 ++-- expected/pathman_rebuild_updates_1.out | 5 ++-- expected/pathman_rowmarks.out | 10 ++++---- expected/pathman_rowmarks_1.out | 10 ++++---- expected/pathman_rowmarks_2.out | 10 ++++---- expected/pathman_rowmarks_3.out | 10 ++++---- expected/pathman_runtime_nodes.out | 24 ++++++++++++++++--- expected/pathman_subpartitions.out | 4 ++-- expected/pathman_subpartitions_1.out | 4 ++-- expected/pathman_upd_del.out | 10 +++++--- expected/pathman_upd_del_1.out | 10 +++++--- expected/pathman_upd_del_2.out | 10 +++++--- expected/pathman_update_node.out | 7 ++++-- expected/pathman_update_triggers.out | 6 +++-- expected/pathman_utility_stmt.out | 27 +++++++++++++++------- expected/pathman_views.out | 7 ++++-- expected/pathman_views_1.out | 7 ++++-- expected/pathman_views_2.out | 7 ++++-- expected/pathman_views_3.out | 7 ++++-- sql/pathman_array_qual.sql | 3 ++- sql/pathman_basic.sql | 15 ++++++++++-- sql/pathman_bgw.sql | 2 +- sql/pathman_calamity.sql | 11 ++++++--- sql/pathman_callbacks.sql | 5 +++- sql/pathman_column_type.sql | 17 +++++++++----- sql/pathman_cte.sql | 3 ++- sql/pathman_declarative.sql | 6 +++-- sql/pathman_domains.sql | 4 +++- sql/pathman_dropped_cols.sql | 2 +- sql/pathman_expressions.sql | 6 ++++- sql/pathman_foreign_keys.sql | 4 +++- sql/pathman_gaps.sql | 6 ++++- sql/pathman_hashjoin.sql | 6 +++-- sql/pathman_inserts.sql | 6 ++++- sql/pathman_interval.sql | 2 +- sql/pathman_join_clause.sql | 9 ++++++-- sql/pathman_lateral.sql | 3 ++- sql/pathman_mergejoin.sql | 10 ++++++-- sql/pathman_only.sql | 3 ++- sql/pathman_param_upd_del.sql | 3 ++- sql/pathman_permissions.sql | 2 +- sql/pathman_rebuild_deletes.sql | 3 ++- sql/pathman_rebuild_updates.sql | 3 ++- sql/pathman_rowmarks.sql | 4 +++- sql/pathman_runtime_nodes.sql | 19 +++++++++++++-- sql/pathman_subpartitions.sql | 3 ++- sql/pathman_upd_del.sql | 7 ++++-- sql/pathman_update_node.sql | 4 +++- sql/pathman_update_triggers.sql | 4 +++- sql/pathman_utility_stmt.sql | 18 +++++++++++---- sql/pathman_views.sql | 5 +++- 107 files changed, 649 insertions(+), 274 deletions(-) diff --git a/expected/pathman_array_qual.out b/expected/pathman_array_qual.out index 49dca03a..0587a1c8 100644 --- a/expected/pathman_array_qual.out +++ b/expected/pathman_array_qual.out @@ -2402,6 +2402,7 @@ EXECUTE q(100); (1 row) DEALLOCATE q; -DROP SCHEMA array_qual CASCADE; -NOTICE: drop cascades to 12 other objects +DROP TABLE array_qual.test CASCADE; +NOTICE: drop cascades to 11 other objects +DROP SCHEMA array_qual; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_array_qual_1.out b/expected/pathman_array_qual_1.out index 6c8def94..dd7d2485 100644 --- a/expected/pathman_array_qual_1.out +++ b/expected/pathman_array_qual_1.out @@ -2392,6 +2392,7 @@ EXECUTE q(100); (1 row) DEALLOCATE q; -DROP SCHEMA array_qual CASCADE; -NOTICE: drop cascades to 12 other objects +DROP TABLE array_qual.test CASCADE; +NOTICE: drop cascades to 11 other objects +DROP SCHEMA array_qual; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 4117a00c..3afde299 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1830,7 +1830,22 @@ SELECT * FROM test.mixinh_child1; SELECT * FROM test.mixinh_parent; ERROR: could not expand partitioned table "mixinh_child1" -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 32 other objects +DROP TABLE test.hash_rel CASCADE; +NOTICE: drop cascades to 3 other objects +DROP TABLE test.index_on_childs CASCADE; +NOTICE: drop cascades to 6 other objects +DROP TABLE test.mixinh_child1 CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE test.mixinh_parent CASCADE; +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.hash_rel_wrong CASCADE; +DROP TABLE test.range_rel CASCADE; +DROP TABLE test.range_rel_archive CASCADE; +DROP TABLE test.special_case_1_ind_o_s CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel_test1 CASCADE; +DROP TABLE test.range_rel_test2 CASCADE; +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_basic_1.out b/expected/pathman_basic_1.out index 702f9027..92a86727 100644 --- a/expected/pathman_basic_1.out +++ b/expected/pathman_basic_1.out @@ -1813,7 +1813,22 @@ SELECT * FROM test.mixinh_child1; SELECT * FROM test.mixinh_parent; ERROR: could not expand partitioned table "mixinh_child1" -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 32 other objects +DROP TABLE test.hash_rel CASCADE; +NOTICE: drop cascades to 3 other objects +DROP TABLE test.index_on_childs CASCADE; +NOTICE: drop cascades to 6 other objects +DROP TABLE test.mixinh_child1 CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE test.mixinh_parent CASCADE; +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.hash_rel_wrong CASCADE; +DROP TABLE test.range_rel CASCADE; +DROP TABLE test.range_rel_archive CASCADE; +DROP TABLE test.special_case_1_ind_o_s CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel_test1 CASCADE; +DROP TABLE test.range_rel_test2 CASCADE; +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_basic_2.out b/expected/pathman_basic_2.out index 28e46c14..7cfde8a6 100644 --- a/expected/pathman_basic_2.out +++ b/expected/pathman_basic_2.out @@ -1813,7 +1813,22 @@ SELECT * FROM test.mixinh_child1; SELECT * FROM test.mixinh_parent; ERROR: could not expand partitioned table "mixinh_child1" -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 32 other objects +DROP TABLE test.hash_rel CASCADE; +NOTICE: drop cascades to 3 other objects +DROP TABLE test.index_on_childs CASCADE; +NOTICE: drop cascades to 6 other objects +DROP TABLE test.mixinh_child1 CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE test.mixinh_parent CASCADE; +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.hash_rel_wrong CASCADE; +DROP TABLE test.range_rel CASCADE; +DROP TABLE test.range_rel_archive CASCADE; +DROP TABLE test.special_case_1_ind_o_s CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel_test1 CASCADE; +DROP TABLE test.range_rel_test2 CASCADE; +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_bgw.out b/expected/pathman_bgw.out index 5d5d2b21..4f2ad6b8 100644 --- a/expected/pathman_bgw.out +++ b/expected/pathman_bgw.out @@ -242,5 +242,5 @@ SELECT count(*) FROM test_bgw.conc_part; DROP TABLE test_bgw.conc_part CASCADE; NOTICE: drop cascades to 5 other objects -DROP SCHEMA test_bgw CASCADE; +DROP SCHEMA test_bgw; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index d8b6ad96..7226e7b9 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -779,8 +779,16 @@ SELECT merge_range_partitions('calamity.merge_test_a_1', ERROR: cannot merge partitions DROP TABLE calamity.merge_test_a,calamity.merge_test_b CASCADE; NOTICE: drop cascades to 6 other objects -DROP SCHEMA calamity CASCADE; -NOTICE: drop cascades to 15 other objects +DROP DOMAIN calamity.test_domain; +DROP TABLE calamity.part_test CASCADE; +NOTICE: drop cascades to table calamity.wrong_partition +DROP TABLE calamity.part_ok CASCADE; +NOTICE: drop cascades to 4 other objects +DROP TABLE calamity.hash_two_times CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE calamity.to_be_disabled CASCADE; +NOTICE: drop cascades to 3 other objects +DROP SCHEMA calamity; DROP EXTENSION pg_pathman; /* * ------------------------------- @@ -987,7 +995,7 @@ SELECT context, entries FROM pathman_cache_stats partition parents cache | 0 (3 rows) -DROP SCHEMA calamity CASCADE; +DROP SCHEMA calamity; DROP EXTENSION pg_pathman; /* * ------------------------------------------ @@ -1060,5 +1068,5 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ DROP TABLE calamity.survivor CASCADE; NOTICE: drop cascades to 3 other objects -DROP SCHEMA calamity CASCADE; +DROP SCHEMA calamity; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_calamity_1.out b/expected/pathman_calamity_1.out index 2b0f98e5..62050cfd 100644 --- a/expected/pathman_calamity_1.out +++ b/expected/pathman_calamity_1.out @@ -779,8 +779,16 @@ SELECT merge_range_partitions('calamity.merge_test_a_1', ERROR: cannot merge partitions DROP TABLE calamity.merge_test_a,calamity.merge_test_b CASCADE; NOTICE: drop cascades to 6 other objects -DROP SCHEMA calamity CASCADE; -NOTICE: drop cascades to 15 other objects +DROP DOMAIN calamity.test_domain; +DROP TABLE calamity.part_test CASCADE; +NOTICE: drop cascades to table calamity.wrong_partition +DROP TABLE calamity.part_ok CASCADE; +NOTICE: drop cascades to 4 other objects +DROP TABLE calamity.hash_two_times CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE calamity.to_be_disabled CASCADE; +NOTICE: drop cascades to 3 other objects +DROP SCHEMA calamity; DROP EXTENSION pg_pathman; /* * ------------------------------- @@ -987,7 +995,7 @@ SELECT context, entries FROM pathman_cache_stats partition parents cache | 0 (3 rows) -DROP SCHEMA calamity CASCADE; +DROP SCHEMA calamity; DROP EXTENSION pg_pathman; /* * ------------------------------------------ @@ -1060,5 +1068,5 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ DROP TABLE calamity.survivor CASCADE; NOTICE: drop cascades to 3 other objects -DROP SCHEMA calamity CASCADE; +DROP SCHEMA calamity; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_calamity_2.out b/expected/pathman_calamity_2.out index b6fafc83..f647e788 100644 --- a/expected/pathman_calamity_2.out +++ b/expected/pathman_calamity_2.out @@ -779,8 +779,16 @@ SELECT merge_range_partitions('calamity.merge_test_a_1', ERROR: cannot merge partitions DROP TABLE calamity.merge_test_a,calamity.merge_test_b CASCADE; NOTICE: drop cascades to 6 other objects -DROP SCHEMA calamity CASCADE; -NOTICE: drop cascades to 15 other objects +DROP DOMAIN calamity.test_domain; +DROP TABLE calamity.part_test CASCADE; +NOTICE: drop cascades to table calamity.wrong_partition +DROP TABLE calamity.part_ok CASCADE; +NOTICE: drop cascades to 4 other objects +DROP TABLE calamity.hash_two_times CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE calamity.to_be_disabled CASCADE; +NOTICE: drop cascades to 3 other objects +DROP SCHEMA calamity; DROP EXTENSION pg_pathman; /* * ------------------------------- @@ -987,7 +995,7 @@ SELECT context, entries FROM pathman_cache_stats partition parents cache | 0 (3 rows) -DROP SCHEMA calamity CASCADE; +DROP SCHEMA calamity; DROP EXTENSION pg_pathman; /* * ------------------------------------------ @@ -1060,5 +1068,5 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ DROP TABLE calamity.survivor CASCADE; NOTICE: drop cascades to 3 other objects -DROP SCHEMA calamity CASCADE; +DROP SCHEMA calamity; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_calamity_3.out b/expected/pathman_calamity_3.out index 9aec9765..f64a5f8b 100644 --- a/expected/pathman_calamity_3.out +++ b/expected/pathman_calamity_3.out @@ -783,8 +783,16 @@ SELECT merge_range_partitions('calamity.merge_test_a_1', ERROR: cannot merge partitions DROP TABLE calamity.merge_test_a,calamity.merge_test_b CASCADE; NOTICE: drop cascades to 6 other objects -DROP SCHEMA calamity CASCADE; -NOTICE: drop cascades to 15 other objects +DROP DOMAIN calamity.test_domain; +DROP TABLE calamity.part_test CASCADE; +NOTICE: drop cascades to table calamity.wrong_partition +DROP TABLE calamity.part_ok CASCADE; +NOTICE: drop cascades to 4 other objects +DROP TABLE calamity.hash_two_times CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE calamity.to_be_disabled CASCADE; +NOTICE: drop cascades to 3 other objects +DROP SCHEMA calamity; DROP EXTENSION pg_pathman; /* * ------------------------------- @@ -991,7 +999,7 @@ SELECT context, entries FROM pathman_cache_stats partition parents cache | 0 (3 rows) -DROP SCHEMA calamity CASCADE; +DROP SCHEMA calamity; DROP EXTENSION pg_pathman; /* * ------------------------------------------ @@ -1064,5 +1072,5 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ DROP TABLE calamity.survivor CASCADE; NOTICE: drop cascades to 3 other objects -DROP SCHEMA calamity CASCADE; +DROP SCHEMA calamity; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_callbacks.out b/expected/pathman_callbacks.out index 3eea2049..8427dae7 100644 --- a/expected/pathman_callbacks.out +++ b/expected/pathman_callbacks.out @@ -411,6 +411,8 @@ ORDER BY range_min::INT4; DROP TABLE callbacks.abc CASCADE; NOTICE: drop cascades to 5 other objects -DROP SCHEMA callbacks CASCADE; -NOTICE: drop cascades to 2 other objects +DROP FUNCTION callbacks.abc_on_part_created_callback(jsonb); +DROP FUNCTION public.dummy_cb(jsonb); +DROP FUNCTION callbacks.rotation_callback(jsonb); +DROP SCHEMA callbacks; DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_column_type.out b/expected/pathman_column_type.out index 4e2f3ff6..c77acbb2 100644 --- a/expected/pathman_column_type.out +++ b/expected/pathman_column_type.out @@ -23,14 +23,14 @@ SELECT * FROM test_column_type.test; ----- (0 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 10 partition parents cache | 10 - partition status cache | 3 -(4 rows) +(3 rows) /* * Get parsed and analyzed expression. @@ -84,14 +84,14 @@ SELECT * FROM test_column_type.test; ----- (0 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 10 partition parents cache | 10 - partition status cache | 3 -(4 rows) +(3 rows) /* check insert dispatching */ INSERT INTO test_column_type.test VALUES (1); @@ -135,14 +135,14 @@ SELECT * FROM test_column_type.test; ----+----- (0 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 5 partition parents cache | 5 - partition status cache | 3 -(4 rows) +(3 rows) /* change column's type (should NOT work) */ ALTER TABLE test_column_type.test ALTER id TYPE NUMERIC; @@ -153,14 +153,14 @@ SELECT * FROM test_column_type.test; ----+----- (0 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 5 partition parents cache | 5 - partition status cache | 3 -(4 rows) +(3 rows) /* change column's type (should flush caches) */ ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; @@ -170,14 +170,14 @@ SELECT * FROM test_column_type.test; ----+----- (0 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 5 partition parents cache | 5 - partition status cache | 3 -(4 rows) +(3 rows) /* check insert dispatching */ INSERT INTO test_column_type.test VALUES (1); @@ -199,5 +199,5 @@ NOTICE: 0 rows copied from test_column_type.test_4 (1 row) DROP TABLE test_column_type.test CASCADE; -DROP SCHEMA test_column_type CASCADE; +DROP SCHEMA test_column_type; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_column_type_1.out b/expected/pathman_column_type_1.out index d169719d..06b61387 100644 --- a/expected/pathman_column_type_1.out +++ b/expected/pathman_column_type_1.out @@ -23,14 +23,14 @@ SELECT * FROM test_column_type.test; ----- (0 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 10 partition parents cache | 10 - partition status cache | 3 -(4 rows) +(3 rows) /* * Get parsed and analyzed expression. @@ -84,14 +84,14 @@ SELECT * FROM test_column_type.test; ----- (0 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 10 partition parents cache | 10 - partition status cache | 3 -(4 rows) +(3 rows) /* check insert dispatching */ INSERT INTO test_column_type.test VALUES (1); @@ -135,14 +135,14 @@ SELECT * FROM test_column_type.test; ----+----- (0 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 5 partition parents cache | 5 - partition status cache | 3 -(4 rows) +(3 rows) /* change column's type (should NOT work) */ ALTER TABLE test_column_type.test ALTER id TYPE NUMERIC; @@ -153,14 +153,14 @@ SELECT * FROM test_column_type.test; ----+----- (0 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 5 partition parents cache | 5 - partition status cache | 3 -(4 rows) +(3 rows) /* change column's type (should flush caches) */ ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; @@ -170,14 +170,14 @@ SELECT * FROM test_column_type.test; ----+----- (0 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 5 partition parents cache | 5 - partition status cache | 3 -(4 rows) +(3 rows) /* check insert dispatching */ INSERT INTO test_column_type.test VALUES (1); @@ -199,5 +199,5 @@ NOTICE: 0 rows copied from test_column_type.test_4 (1 row) DROP TABLE test_column_type.test CASCADE; -DROP SCHEMA test_column_type CASCADE; +DROP SCHEMA test_column_type; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_cte.out b/expected/pathman_cte.out index ce818a36..33821ac0 100644 --- a/expected/pathman_cte.out +++ b/expected/pathman_cte.out @@ -271,6 +271,7 @@ SELECT * FROM test; (4 rows) -DROP SCHEMA test_cte CASCADE; -NOTICE: drop cascades to 3 other objects +DROP TABLE test_cte.recursive_cte_test_tbl CASCADE; +NOTICE: drop cascades to 2 other objects +DROP SCHEMA test_cte; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_cte_1.out b/expected/pathman_cte_1.out index 70a9ee88..5e30e188 100644 --- a/expected/pathman_cte_1.out +++ b/expected/pathman_cte_1.out @@ -260,6 +260,7 @@ SELECT * FROM test; (4 rows) -DROP SCHEMA test_cte CASCADE; -NOTICE: drop cascades to 3 other objects +DROP TABLE test_cte.recursive_cte_test_tbl CASCADE; +NOTICE: drop cascades to 2 other objects +DROP SCHEMA test_cte; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_cte_2.out b/expected/pathman_cte_2.out index 455a7cad..6b64ad42 100644 --- a/expected/pathman_cte_2.out +++ b/expected/pathman_cte_2.out @@ -247,6 +247,7 @@ SELECT * FROM test; (4 rows) -DROP SCHEMA test_cte CASCADE; -NOTICE: drop cascades to 3 other objects +DROP TABLE test_cte.recursive_cte_test_tbl CASCADE; +NOTICE: drop cascades to 2 other objects +DROP SCHEMA test_cte; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_declarative.out b/expected/pathman_declarative.out index 01f924ae..2915ecfb 100644 --- a/expected/pathman_declarative.out +++ b/expected/pathman_declarative.out @@ -99,7 +99,9 @@ ALTER TABLE IF EXISTS test.nonexistent_table ATTACH PARTITION baz FOR VALUES IN NOTICE: relation "nonexistent_table" does not exist, skipping ALTER TABLE IF EXISTS test.nonexistent_table DETACH PARTITION baz; NOTICE: relation "nonexistent_table" does not exist, skipping -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 8 other objects +DROP TABLE test.r2 CASCADE; +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 6 other objects +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_declarative_1.out b/expected/pathman_declarative_1.out index 9870a3e7..dede4941 100644 --- a/expected/pathman_declarative_1.out +++ b/expected/pathman_declarative_1.out @@ -99,7 +99,9 @@ ALTER TABLE IF EXISTS test.nonexistent_table ATTACH PARTITION baz FOR VALUES IN NOTICE: relation "nonexistent_table" does not exist, skipping ALTER TABLE IF EXISTS test.nonexistent_table DETACH PARTITION baz; NOTICE: relation "nonexistent_table" does not exist, skipping -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 8 other objects +DROP TABLE test.r2 CASCADE; +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 6 other objects +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_domains.out b/expected/pathman_domains.out index 41c8bfbb..cc32ce0c 100644 --- a/expected/pathman_domains.out +++ b/expected/pathman_domains.out @@ -124,6 +124,8 @@ ORDER BY "partition"::TEXT; domains.dom_table | domains.dom_table_4 | 1 | val | | (5 rows) -DROP SCHEMA domains CASCADE; -NOTICE: drop cascades to 7 other objects +DROP TABLE domains.dom_table CASCADE; +NOTICE: drop cascades to 5 other objects +DROP DOMAIN domains.dom_test CASCADE; +DROP SCHEMA domains; DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_dropped_cols.out b/expected/pathman_dropped_cols.out index 220f6750..826931d3 100644 --- a/expected/pathman_dropped_cols.out +++ b/expected/pathman_dropped_cols.out @@ -205,5 +205,5 @@ EXPLAIN (COSTS OFF) EXECUTE getbyroot(2); DEALLOCATE getbyroot; DROP TABLE root_dict CASCADE; NOTICE: drop cascades to 3 other objects -DROP SCHEMA dropped_cols CASCADE; +DROP SCHEMA dropped_cols; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_expressions.out b/expected/pathman_expressions.out index 1db38acb..cd629b8e 100644 --- a/expected/pathman_expressions.out +++ b/expected/pathman_expressions.out @@ -430,6 +430,12 @@ EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.range_rel WHERE (AGE(dt, '2000-01-0 Filter: (age(dt, 'Sat Jan 01 00:00:00 2000'::timestamp without time zone) = '@ 18 years'::interval) (3 rows) -DROP SCHEMA test_exprs CASCADE; -NOTICE: drop cascades to 24 other objects +DROP TABLE test_exprs.canary CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test_exprs.canary_copy CASCADE; +DROP TABLE test_exprs.range_rel CASCADE; +NOTICE: drop cascades to 11 other objects +DROP TABLE test_exprs.hash_rel CASCADE; +NOTICE: drop cascades to 4 other objects +DROP SCHEMA test_exprs; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_expressions_1.out b/expected/pathman_expressions_1.out index 126534a0..66e3ea75 100644 --- a/expected/pathman_expressions_1.out +++ b/expected/pathman_expressions_1.out @@ -434,6 +434,12 @@ EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.range_rel WHERE (AGE(dt, '2000-01-0 Filter: (age(dt, 'Sat Jan 01 00:00:00 2000'::timestamp without time zone) = '@ 18 years'::interval) (3 rows) -DROP SCHEMA test_exprs CASCADE; -NOTICE: drop cascades to 24 other objects +DROP TABLE test_exprs.canary CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test_exprs.canary_copy CASCADE; +DROP TABLE test_exprs.range_rel CASCADE; +NOTICE: drop cascades to 11 other objects +DROP TABLE test_exprs.hash_rel CASCADE; +NOTICE: drop cascades to 4 other objects +DROP SCHEMA test_exprs; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_expressions_2.out b/expected/pathman_expressions_2.out index 83b0c7b0..89bf24ef 100644 --- a/expected/pathman_expressions_2.out +++ b/expected/pathman_expressions_2.out @@ -425,6 +425,12 @@ EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.range_rel WHERE (AGE(dt, '2000-01-0 Filter: (age(dt, 'Sat Jan 01 00:00:00 2000'::timestamp without time zone) = '@ 18 years'::interval) (2 rows) -DROP SCHEMA test_exprs CASCADE; -NOTICE: drop cascades to 24 other objects +DROP TABLE test_exprs.canary CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test_exprs.canary_copy CASCADE; +DROP TABLE test_exprs.range_rel CASCADE; +NOTICE: drop cascades to 11 other objects +DROP TABLE test_exprs.hash_rel CASCADE; +NOTICE: drop cascades to 4 other objects +DROP SCHEMA test_exprs; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_foreign_keys.out b/expected/pathman_foreign_keys.out index 2ff12279..34fc75ad 100644 --- a/expected/pathman_foreign_keys.out +++ b/expected/pathman_foreign_keys.out @@ -90,6 +90,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM fkeys.messages; DROP TABLE fkeys.messages, fkeys.replies CASCADE; NOTICE: drop cascades to 3 other objects -DROP SCHEMA fkeys CASCADE; -NOTICE: drop cascades to 2 other objects +DROP TABLE fkeys.test_fkey CASCADE; +DROP TABLE fkeys.test_ref CASCADE; +DROP SCHEMA fkeys; DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_gaps.out b/expected/pathman_gaps.out index 1d9b1f33..530beca9 100644 --- a/expected/pathman_gaps.out +++ b/expected/pathman_gaps.out @@ -822,6 +822,13 @@ EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 51; -> Seq Scan on test_4_11 (7 rows) -DROP SCHEMA gaps CASCADE; -NOTICE: drop cascades to 30 other objects +DROP TABLE gaps.test_1 CASCADE; +NOTICE: drop cascades to 3 other objects +DROP TABLE gaps.test_2 CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE gaps.test_3 CASCADE; +NOTICE: drop cascades to 8 other objects +DROP TABLE gaps.test_4 CASCADE; +NOTICE: drop cascades to 10 other objects +DROP SCHEMA gaps; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_gaps_1.out b/expected/pathman_gaps_1.out index d6e1973d..b1c0ac34 100644 --- a/expected/pathman_gaps_1.out +++ b/expected/pathman_gaps_1.out @@ -807,6 +807,13 @@ EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 51; -> Seq Scan on test_4_11 (7 rows) -DROP SCHEMA gaps CASCADE; -NOTICE: drop cascades to 30 other objects +DROP TABLE gaps.test_1 CASCADE; +NOTICE: drop cascades to 3 other objects +DROP TABLE gaps.test_2 CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE gaps.test_3 CASCADE; +NOTICE: drop cascades to 8 other objects +DROP TABLE gaps.test_4 CASCADE; +NOTICE: drop cascades to 10 other objects +DROP SCHEMA gaps; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_hashjoin.out b/expected/pathman_hashjoin.out index 779efe3d..f5ebabdd 100644 --- a/expected/pathman_hashjoin.out +++ b/expected/pathman_hashjoin.out @@ -75,7 +75,10 @@ WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; -> Index Scan using range_rel_4_dt_idx on range_rel_4 j2_2 (20 rows) -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 12 other objects +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_hashjoin_1.out b/expected/pathman_hashjoin_1.out index ae1edda6..df6c0174 100644 --- a/expected/pathman_hashjoin_1.out +++ b/expected/pathman_hashjoin_1.out @@ -75,7 +75,10 @@ WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; -> Index Scan using range_rel_2_pkey on range_rel_2 j1_1 (20 rows) -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 12 other objects +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_hashjoin_2.out b/expected/pathman_hashjoin_2.out index 21cd1883..69ea5762 100644 --- a/expected/pathman_hashjoin_2.out +++ b/expected/pathman_hashjoin_2.out @@ -68,7 +68,10 @@ WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; Filter: (id IS NOT NULL) (13 rows) -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 12 other objects +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_hashjoin_3.out b/expected/pathman_hashjoin_3.out index 106e8c0e..e2c8903a 100644 --- a/expected/pathman_hashjoin_3.out +++ b/expected/pathman_hashjoin_3.out @@ -67,7 +67,10 @@ WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; Filter: (id IS NOT NULL) (12 rows) -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 12 other objects +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_hashjoin_4.out b/expected/pathman_hashjoin_4.out index ad4b5651..ef8dfc29 100644 --- a/expected/pathman_hashjoin_4.out +++ b/expected/pathman_hashjoin_4.out @@ -75,7 +75,10 @@ WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; -> Index Scan using range_rel_4_dt_idx on range_rel_4 j2_3 (20 rows) -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 12 other objects +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_hashjoin_5.out b/expected/pathman_hashjoin_5.out index 7bbea061..a8f3b6e7 100644 --- a/expected/pathman_hashjoin_5.out +++ b/expected/pathman_hashjoin_5.out @@ -67,7 +67,10 @@ WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; Filter: (id IS NOT NULL) (12 rows) -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 12 other objects +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_inserts.out b/expected/pathman_inserts.out index 225604c5..16656f18 100644 --- a/expected/pathman_inserts.out +++ b/expected/pathman_inserts.out @@ -1066,6 +1066,10 @@ SELECT count(*) FROM test_inserts.special_2; DROP TABLE test_inserts.special_2; DROP TABLE test_inserts.test_special_only CASCADE; NOTICE: drop cascades to 4 other objects -DROP SCHEMA test_inserts CASCADE; -NOTICE: drop cascades to 19 other objects +DROP TABLE test_inserts.storage CASCADE; +NOTICE: drop cascades to 15 other objects +DROP FUNCTION test_inserts.set_triggers(jsonb); +DROP FUNCTION test_inserts.print_cols_before_change(); +DROP FUNCTION test_inserts.print_cols_after_change(); +DROP SCHEMA test_inserts; DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_inserts_1.out b/expected/pathman_inserts_1.out index a6634edd..3479c12d 100644 --- a/expected/pathman_inserts_1.out +++ b/expected/pathman_inserts_1.out @@ -1066,6 +1066,10 @@ SELECT count(*) FROM test_inserts.special_2; DROP TABLE test_inserts.special_2; DROP TABLE test_inserts.test_special_only CASCADE; NOTICE: drop cascades to 4 other objects -DROP SCHEMA test_inserts CASCADE; -NOTICE: drop cascades to 19 other objects +DROP TABLE test_inserts.storage CASCADE; +NOTICE: drop cascades to 15 other objects +DROP FUNCTION test_inserts.set_triggers(jsonb); +DROP FUNCTION test_inserts.print_cols_before_change(); +DROP FUNCTION test_inserts.print_cols_after_change(); +DROP SCHEMA test_inserts; DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_inserts_2.out b/expected/pathman_inserts_2.out index 9a439010..91f05753 100644 --- a/expected/pathman_inserts_2.out +++ b/expected/pathman_inserts_2.out @@ -1066,6 +1066,10 @@ SELECT count(*) FROM test_inserts.special_2; DROP TABLE test_inserts.special_2; DROP TABLE test_inserts.test_special_only CASCADE; NOTICE: drop cascades to 4 other objects -DROP SCHEMA test_inserts CASCADE; -NOTICE: drop cascades to 19 other objects +DROP TABLE test_inserts.storage CASCADE; +NOTICE: drop cascades to 15 other objects +DROP FUNCTION test_inserts.set_triggers(jsonb); +DROP FUNCTION test_inserts.print_cols_before_change(); +DROP FUNCTION test_inserts.print_cols_after_change(); +DROP SCHEMA test_inserts; DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_interval.out b/expected/pathman_interval.out index 72dc4e01..e4741522 100644 --- a/expected/pathman_interval.out +++ b/expected/pathman_interval.out @@ -271,5 +271,5 @@ SELECT set_interval('test_interval.abc', NULL::INTEGER); ERROR: table "test_interval.abc" is not partitioned by RANGE DROP TABLE test_interval.abc CASCADE; NOTICE: drop cascades to 3 other objects -DROP SCHEMA test_interval CASCADE; +DROP SCHEMA test_interval; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_join_clause.out b/expected/pathman_join_clause.out index ed822543..7654d4ca 100644 --- a/expected/pathman_join_clause.out +++ b/expected/pathman_join_clause.out @@ -171,7 +171,13 @@ WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); 4 | 3 | | (2 rows) -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 15 other objects +DROP TABLE test.child CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE test.child_nopart CASCADE; +DROP TABLE test.mytbl CASCADE; +NOTICE: drop cascades to 8 other objects +DROP TABLE test.fk CASCADE; +DROP TABLE test.parent CASCADE; +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_join_clause_1.out b/expected/pathman_join_clause_1.out index 09b9a00c..d65131c7 100644 --- a/expected/pathman_join_clause_1.out +++ b/expected/pathman_join_clause_1.out @@ -170,7 +170,13 @@ WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); 4 | 3 | | (2 rows) -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 15 other objects +DROP TABLE test.child CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE test.child_nopart CASCADE; +DROP TABLE test.mytbl CASCADE; +NOTICE: drop cascades to 8 other objects +DROP TABLE test.fk CASCADE; +DROP TABLE test.parent CASCADE; +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_join_clause_2.out b/expected/pathman_join_clause_2.out index d58ff6f6..a1fae839 100644 --- a/expected/pathman_join_clause_2.out +++ b/expected/pathman_join_clause_2.out @@ -149,7 +149,13 @@ WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); 4 | 3 | | (2 rows) -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 15 other objects +DROP TABLE test.child CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE test.child_nopart CASCADE; +DROP TABLE test.mytbl CASCADE; +NOTICE: drop cascades to 8 other objects +DROP TABLE test.fk CASCADE; +DROP TABLE test.parent CASCADE; +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_lateral.out b/expected/pathman_lateral.out index 0cb1a864..53edc3d2 100644 --- a/expected/pathman_lateral.out +++ b/expected/pathman_lateral.out @@ -122,6 +122,7 @@ select * from set enable_hashjoin = on; set enable_mergejoin = on; -DROP SCHEMA test_lateral CASCADE; -NOTICE: drop cascades to 11 other objects +DROP TABLE test_lateral.data CASCADE; +NOTICE: drop cascades to 10 other objects +DROP SCHEMA test_lateral; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_lateral_1.out b/expected/pathman_lateral_1.out index 1dc67fe2..12995290 100644 --- a/expected/pathman_lateral_1.out +++ b/expected/pathman_lateral_1.out @@ -116,6 +116,7 @@ select * from set enable_hashjoin = on; set enable_mergejoin = on; -DROP SCHEMA test_lateral CASCADE; -NOTICE: drop cascades to 11 other objects +DROP TABLE test_lateral.data CASCADE; +NOTICE: drop cascades to 10 other objects +DROP SCHEMA test_lateral; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_lateral_2.out b/expected/pathman_lateral_2.out index 5ee4104c..df5292f8 100644 --- a/expected/pathman_lateral_2.out +++ b/expected/pathman_lateral_2.out @@ -122,6 +122,7 @@ select * from set enable_hashjoin = on; set enable_mergejoin = on; -DROP SCHEMA test_lateral CASCADE; -NOTICE: drop cascades to 11 other objects +DROP TABLE test_lateral.data CASCADE; +NOTICE: drop cascades to 10 other objects +DROP SCHEMA test_lateral; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_lateral_3.out b/expected/pathman_lateral_3.out index dd64819d..4bc385de 100644 --- a/expected/pathman_lateral_3.out +++ b/expected/pathman_lateral_3.out @@ -121,6 +121,7 @@ select * from set enable_hashjoin = on; set enable_mergejoin = on; -DROP SCHEMA test_lateral CASCADE; -NOTICE: drop cascades to 11 other objects +DROP TABLE test_lateral.data CASCADE; +NOTICE: drop cascades to 10 other objects +DROP SCHEMA test_lateral; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_mergejoin.out b/expected/pathman_mergejoin.out index ca3a3d9d..d8a14371 100644 --- a/expected/pathman_mergejoin.out +++ b/expected/pathman_mergejoin.out @@ -47,6 +47,8 @@ SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); SET enable_hashjoin = OFF; SET enable_nestloop = OFF; SET enable_mergejoin = ON; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel j1 JOIN test.range_rel j2 on j2.id = j1.id @@ -80,7 +82,11 @@ WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; SET enable_hashjoin = ON; SET enable_nestloop = ON; -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 12 other objects +SET enable_seqscan = ON; +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; DROP EXTENSION pg_pathman; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_mergejoin_1.out b/expected/pathman_mergejoin_1.out index 31da465a..bcd6c272 100644 --- a/expected/pathman_mergejoin_1.out +++ b/expected/pathman_mergejoin_1.out @@ -47,6 +47,8 @@ SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); SET enable_hashjoin = OFF; SET enable_nestloop = OFF; SET enable_mergejoin = ON; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel j1 JOIN test.range_rel j2 on j2.id = j1.id @@ -78,7 +80,11 @@ WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; SET enable_hashjoin = ON; SET enable_nestloop = ON; -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 12 other objects +SET enable_seqscan = ON; +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; DROP EXTENSION pg_pathman; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_mergejoin_2.out b/expected/pathman_mergejoin_2.out index 4b614ad6..aed697d2 100644 --- a/expected/pathman_mergejoin_2.out +++ b/expected/pathman_mergejoin_2.out @@ -47,6 +47,8 @@ SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); SET enable_hashjoin = OFF; SET enable_nestloop = OFF; SET enable_mergejoin = ON; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel j1 JOIN test.range_rel j2 on j2.id = j1.id @@ -71,7 +73,11 @@ WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; SET enable_hashjoin = ON; SET enable_nestloop = ON; -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 12 other objects +SET enable_seqscan = ON; +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; DROP EXTENSION pg_pathman; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_mergejoin_3.out b/expected/pathman_mergejoin_3.out index 7003205f..85414544 100644 --- a/expected/pathman_mergejoin_3.out +++ b/expected/pathman_mergejoin_3.out @@ -47,6 +47,8 @@ SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); SET enable_hashjoin = OFF; SET enable_nestloop = OFF; SET enable_mergejoin = ON; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel j1 JOIN test.range_rel j2 on j2.id = j1.id @@ -69,7 +71,11 @@ WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; SET enable_hashjoin = ON; SET enable_nestloop = ON; -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 12 other objects +SET enable_seqscan = ON; +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; DROP EXTENSION pg_pathman; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_mergejoin_4.out b/expected/pathman_mergejoin_4.out index 185aa3d1..e2affa74 100644 --- a/expected/pathman_mergejoin_4.out +++ b/expected/pathman_mergejoin_4.out @@ -47,6 +47,8 @@ SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); SET enable_hashjoin = OFF; SET enable_nestloop = OFF; SET enable_mergejoin = ON; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel j1 JOIN test.range_rel j2 on j2.id = j1.id @@ -78,7 +80,11 @@ WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; SET enable_hashjoin = ON; SET enable_nestloop = ON; -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 12 other objects +SET enable_seqscan = ON; +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; DROP EXTENSION pg_pathman; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_mergejoin_5.out b/expected/pathman_mergejoin_5.out index 6ffe89cd..7b607435 100644 --- a/expected/pathman_mergejoin_5.out +++ b/expected/pathman_mergejoin_5.out @@ -47,6 +47,8 @@ SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); SET enable_hashjoin = OFF; SET enable_nestloop = OFF; SET enable_mergejoin = ON; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel j1 JOIN test.range_rel j2 on j2.id = j1.id @@ -69,7 +71,11 @@ WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; SET enable_hashjoin = ON; SET enable_nestloop = ON; -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 12 other objects +SET enable_seqscan = ON; +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; DROP EXTENSION pg_pathman; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_only.out b/expected/pathman_only.out index 83425632..1b9f6a6b 100644 --- a/expected/pathman_only.out +++ b/expected/pathman_only.out @@ -272,6 +272,7 @@ WHERE val = (SELECT val FROM ONLY test_only.from_only_test Filter: (val = $0) (27 rows) -DROP SCHEMA test_only CASCADE; -NOTICE: drop cascades to 12 other objects +DROP TABLE test_only.from_only_test CASCADE; +NOTICE: drop cascades to 11 other objects +DROP SCHEMA test_only; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_only_1.out b/expected/pathman_only_1.out index da913e54..b92a8eaf 100644 --- a/expected/pathman_only_1.out +++ b/expected/pathman_only_1.out @@ -275,6 +275,7 @@ WHERE val = (SELECT val FROM ONLY test_only.from_only_test Filter: (val = $0) (27 rows) -DROP SCHEMA test_only CASCADE; -NOTICE: drop cascades to 12 other objects +DROP TABLE test_only.from_only_test CASCADE; +NOTICE: drop cascades to 11 other objects +DROP SCHEMA test_only; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_only_2.out b/expected/pathman_only_2.out index 39b8f199..63638012 100644 --- a/expected/pathman_only_2.out +++ b/expected/pathman_only_2.out @@ -275,6 +275,7 @@ WHERE val = (SELECT val FROM ONLY test_only.from_only_test Filter: (val = $0) (27 rows) -DROP SCHEMA test_only CASCADE; -NOTICE: drop cascades to 12 other objects +DROP TABLE test_only.from_only_test CASCADE; +NOTICE: drop cascades to 11 other objects +DROP SCHEMA test_only; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_param_upd_del.out b/expected/pathman_param_upd_del.out index ad935579..28fa616d 100644 --- a/expected/pathman_param_upd_del.out +++ b/expected/pathman_param_upd_del.out @@ -185,6 +185,7 @@ EXPLAIN (COSTS OFF) EXECUTE del(11); (3 rows) DEALLOCATE del; -DROP SCHEMA param_upd_del CASCADE; -NOTICE: drop cascades to 11 other objects +DROP TABLE param_upd_del.test CASCADE; +NOTICE: drop cascades to 10 other objects +DROP SCHEMA param_upd_del; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_permissions.out b/expected/pathman_permissions.out index d03588c7..04b1112d 100644 --- a/expected/pathman_permissions.out +++ b/expected/pathman_permissions.out @@ -259,5 +259,5 @@ DROP OWNED BY user1; DROP OWNED BY user2; DROP USER user1; DROP USER user2; -DROP SCHEMA permissions CASCADE; +DROP SCHEMA permissions; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_rebuild_deletes.out b/expected/pathman_rebuild_deletes.out index b19d700a..a5edc242 100644 --- a/expected/pathman_rebuild_deletes.out +++ b/expected/pathman_rebuild_deletes.out @@ -100,6 +100,7 @@ RETURNING *, tableoid::REGCLASS; (3 rows) DROP TABLE test_deletes.test_dummy; -DROP SCHEMA test_deletes CASCADE; -NOTICE: drop cascades to 13 other objects +DROP TABLE test_deletes.test CASCADE; +NOTICE: drop cascades to 12 other objects +DROP SCHEMA test_deletes; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_rebuild_deletes_1.out b/expected/pathman_rebuild_deletes_1.out index d1c4b69e..eb2f5001 100644 --- a/expected/pathman_rebuild_deletes_1.out +++ b/expected/pathman_rebuild_deletes_1.out @@ -100,6 +100,7 @@ RETURNING *, tableoid::REGCLASS; (3 rows) DROP TABLE test_deletes.test_dummy; -DROP SCHEMA test_deletes CASCADE; -NOTICE: drop cascades to 13 other objects +DROP TABLE test_deletes.test CASCADE; +NOTICE: drop cascades to 12 other objects +DROP SCHEMA test_deletes; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_rebuild_updates.out b/expected/pathman_rebuild_updates.out index dfa4a5ce..40c5b048 100644 --- a/expected/pathman_rebuild_updates.out +++ b/expected/pathman_rebuild_updates.out @@ -194,6 +194,7 @@ select * from test_updates.test_5113 where val = 11; drop table test_updates.test_5113 cascade; NOTICE: drop cascades to 3 other objects -DROP SCHEMA test_updates CASCADE; -NOTICE: drop cascades to 15 other objects +DROP TABLE test_updates.test CASCADE; +NOTICE: drop cascades to 14 other objects +DROP SCHEMA test_updates; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_rebuild_updates_1.out b/expected/pathman_rebuild_updates_1.out index 5bda15ce..57b3297a 100644 --- a/expected/pathman_rebuild_updates_1.out +++ b/expected/pathman_rebuild_updates_1.out @@ -194,6 +194,7 @@ select * from test_updates.test_5113 where val = 11; drop table test_updates.test_5113 cascade; NOTICE: drop cascades to 3 other objects -DROP SCHEMA test_updates CASCADE; -NOTICE: drop cascades to 15 other objects +DROP TABLE test_updates.test CASCADE; +NOTICE: drop cascades to 14 other objects +DROP SCHEMA test_updates; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_rowmarks.out b/expected/pathman_rowmarks.out index f9ef8114..ea047c9e 100644 --- a/expected/pathman_rowmarks.out +++ b/expected/pathman_rowmarks.out @@ -381,13 +381,13 @@ WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = SET enable_hashjoin = t; SET enable_mergejoin = t; -DROP SCHEMA rowmarks CASCADE; -NOTICE: drop cascades to 7 other objects -DETAIL: drop cascades to table rowmarks.first -drop cascades to table rowmarks.second -drop cascades to table rowmarks.first_0 +DROP TABLE rowmarks.first CASCADE; +NOTICE: drop cascades to 5 other objects +DETAIL: drop cascades to table rowmarks.first_0 drop cascades to table rowmarks.first_1 drop cascades to table rowmarks.first_2 drop cascades to table rowmarks.first_3 drop cascades to table rowmarks.first_4 +DROP TABLE rowmarks.second CASCADE; +DROP SCHEMA rowmarks; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_rowmarks_1.out b/expected/pathman_rowmarks_1.out index e0877333..256b8637 100644 --- a/expected/pathman_rowmarks_1.out +++ b/expected/pathman_rowmarks_1.out @@ -436,13 +436,13 @@ WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = SET enable_hashjoin = t; SET enable_mergejoin = t; -DROP SCHEMA rowmarks CASCADE; -NOTICE: drop cascades to 7 other objects -DETAIL: drop cascades to table rowmarks.first -drop cascades to table rowmarks.second -drop cascades to table rowmarks.first_0 +DROP TABLE rowmarks.first CASCADE; +NOTICE: drop cascades to 5 other objects +DETAIL: drop cascades to table rowmarks.first_0 drop cascades to table rowmarks.first_1 drop cascades to table rowmarks.first_2 drop cascades to table rowmarks.first_3 drop cascades to table rowmarks.first_4 +DROP TABLE rowmarks.second CASCADE; +DROP SCHEMA rowmarks; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_rowmarks_2.out b/expected/pathman_rowmarks_2.out index 7436b081..06fb88ac 100644 --- a/expected/pathman_rowmarks_2.out +++ b/expected/pathman_rowmarks_2.out @@ -378,13 +378,13 @@ WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = SET enable_hashjoin = t; SET enable_mergejoin = t; -DROP SCHEMA rowmarks CASCADE; -NOTICE: drop cascades to 7 other objects -DETAIL: drop cascades to table rowmarks.first -drop cascades to table rowmarks.second -drop cascades to table rowmarks.first_0 +DROP TABLE rowmarks.first CASCADE; +NOTICE: drop cascades to 5 other objects +DETAIL: drop cascades to table rowmarks.first_0 drop cascades to table rowmarks.first_1 drop cascades to table rowmarks.first_2 drop cascades to table rowmarks.first_3 drop cascades to table rowmarks.first_4 +DROP TABLE rowmarks.second CASCADE; +DROP SCHEMA rowmarks; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_rowmarks_3.out b/expected/pathman_rowmarks_3.out index 6179ff94..c2539d76 100644 --- a/expected/pathman_rowmarks_3.out +++ b/expected/pathman_rowmarks_3.out @@ -378,13 +378,13 @@ WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = SET enable_hashjoin = t; SET enable_mergejoin = t; -DROP SCHEMA rowmarks CASCADE; -NOTICE: drop cascades to 7 other objects -DETAIL: drop cascades to table rowmarks.first -drop cascades to table rowmarks.second -drop cascades to table rowmarks.first_0 +DROP TABLE rowmarks.first CASCADE; +NOTICE: drop cascades to 5 other objects +DETAIL: drop cascades to table rowmarks.first_0 drop cascades to table rowmarks.first_1 drop cascades to table rowmarks.first_2 drop cascades to table rowmarks.first_3 drop cascades to table rowmarks.first_4 +DROP TABLE rowmarks.second CASCADE; +DROP SCHEMA rowmarks; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_runtime_nodes.out b/expected/pathman_runtime_nodes.out index f364cfb4..17905e59 100644 --- a/expected/pathman_runtime_nodes.out +++ b/expected/pathman_runtime_nodes.out @@ -444,7 +444,25 @@ where id = any (select generate_series(-10, -1)); /* should be empty */ set enable_hashjoin = on; set enable_mergejoin = on; -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 37 other objects +DROP TABLE test.vals CASCADE; +DROP TABLE test.category CASCADE; +DROP TABLE test.run_values CASCADE; +DROP TABLE test.runtime_test_1 CASCADE; +NOTICE: drop cascades to 6 other objects +DROP TABLE test.runtime_test_2 CASCADE; +NOTICE: drop cascades to 6 other objects +DROP TABLE test.runtime_test_3 CASCADE; +NOTICE: drop cascades to 4 other objects +DROP TABLE test.runtime_test_4 CASCADE; +NOTICE: drop cascades to 6 other objects +DROP FUNCTION test.pathman_assert(bool, text); +DROP FUNCTION test.pathman_equal(text, text, text); +DROP FUNCTION test.pathman_test(text); +DROP FUNCTION test.pathman_test_1(); +DROP FUNCTION test.pathman_test_2(); +DROP FUNCTION test.pathman_test_3(); +DROP FUNCTION test.pathman_test_4(); +DROP FUNCTION test.pathman_test_5(); +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_subpartitions.out b/expected/pathman_subpartitions.out index 25b36492..3a6a19eb 100644 --- a/expected/pathman_subpartitions.out +++ b/expected/pathman_subpartitions.out @@ -462,6 +462,6 @@ SELECT a2.* FROM subpartitions.a1 JOIN subpartitions.a2 ON a2.n1=a1.n1 FOR UPDAT DROP TABLE subpartitions.a2 CASCADE; NOTICE: drop cascades to 4 other objects DROP TABLE subpartitions.a1; -DROP SCHEMA subpartitions CASCADE; -NOTICE: drop cascades to function subpartitions.partitions_tree(regclass,text) +DROP FUNCTION subpartitions.partitions_tree(regclass, text); +DROP SCHEMA subpartitions; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_subpartitions_1.out b/expected/pathman_subpartitions_1.out index 5ea33044..d620cde9 100644 --- a/expected/pathman_subpartitions_1.out +++ b/expected/pathman_subpartitions_1.out @@ -456,6 +456,6 @@ SELECT a2.* FROM subpartitions.a1 JOIN subpartitions.a2 ON a2.n1=a1.n1 FOR UPDAT DROP TABLE subpartitions.a2 CASCADE; NOTICE: drop cascades to 4 other objects DROP TABLE subpartitions.a1; -DROP SCHEMA subpartitions CASCADE; -NOTICE: drop cascades to function subpartitions.partitions_tree(regclass,text) +DROP FUNCTION subpartitions.partitions_tree(regclass, text); +DROP SCHEMA subpartitions; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_upd_del.out b/expected/pathman_upd_del.out index 2cc19239..44bb34fc 100644 --- a/expected/pathman_upd_del.out +++ b/expected/pathman_upd_del.out @@ -460,7 +460,11 @@ WITH q AS (SELECT id FROM test.tmp2 WHERE id < 3) DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); ROLLBACK; -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 27 other objects +DROP TABLE test.tmp CASCADE; +DROP TABLE test.tmp2 CASCADE; +NOTICE: drop cascades to 11 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 13 other objects +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_upd_del_1.out b/expected/pathman_upd_del_1.out index 5cd5ac9f..0a7e91e9 100644 --- a/expected/pathman_upd_del_1.out +++ b/expected/pathman_upd_del_1.out @@ -460,7 +460,11 @@ WITH q AS (SELECT id FROM test.tmp2 WHERE id < 3) DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); ROLLBACK; -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 27 other objects +DROP TABLE test.tmp CASCADE; +DROP TABLE test.tmp2 CASCADE; +NOTICE: drop cascades to 11 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 13 other objects +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_upd_del_2.out b/expected/pathman_upd_del_2.out index 2aeb6702..80325d7e 100644 --- a/expected/pathman_upd_del_2.out +++ b/expected/pathman_upd_del_2.out @@ -452,7 +452,11 @@ WITH q AS (SELECT id FROM test.tmp2 WHERE id < 3) DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); ROLLBACK; -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 27 other objects +DROP TABLE test.tmp CASCADE; +DROP TABLE test.tmp2 CASCADE; +NOTICE: drop cascades to 11 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 13 other objects +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_update_node.out b/expected/pathman_update_node.out index 120b42c4..9fc1d07f 100644 --- a/expected/pathman_update_node.out +++ b/expected/pathman_update_node.out @@ -446,6 +446,9 @@ SELECT count(*) FROM test_update_node.test_hash; 10 (1 row) -DROP SCHEMA test_update_node CASCADE; -NOTICE: drop cascades to 17 other objects +DROP TABLE test_update_node.test_hash CASCADE; +NOTICE: drop cascades to 3 other objects +DROP TABLE test_update_node.test_range CASCADE; +NOTICE: drop cascades to 12 other objects +DROP SCHEMA test_update_node; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_update_triggers.out b/expected/pathman_update_triggers.out index d5c92b9f..40c6a19c 100644 --- a/expected/pathman_update_triggers.out +++ b/expected/pathman_update_triggers.out @@ -184,6 +184,8 @@ select count(distinct val) from test_update_triggers.test; 1 (1 row) -DROP SCHEMA test_update_triggers CASCADE; -NOTICE: drop cascades to 4 other objects +DROP TABLE test_update_triggers.test CASCADE; +NOTICE: drop cascades to 2 other objects +DROP FUNCTION test_update_triggers.test_trigger(); +DROP SCHEMA test_update_triggers; DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_utility_stmt.out b/expected/pathman_utility_stmt.out index 7e59fa23..1a8b969e 100644 --- a/expected/pathman_utility_stmt.out +++ b/expected/pathman_utility_stmt.out @@ -214,8 +214,11 @@ SELECT COUNT(*) FROM copy_stmt_hooking.test2; 1 (1 row) -DROP SCHEMA copy_stmt_hooking CASCADE; -NOTICE: drop cascades to 797 other objects +DROP TABLE copy_stmt_hooking.test CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE copy_stmt_hooking.test2 CASCADE; +NOTICE: drop cascades to 790 other objects +DROP SCHEMA copy_stmt_hooking; /* * Test auto check constraint renaming */ @@ -353,8 +356,15 @@ WHERE r.conrelid = 'rename.plain_test'::regclass AND r.contype = 'c'; pathman_plain_test_renamed_check | CHECK (a < 100) (1 row) -DROP SCHEMA rename CASCADE; -NOTICE: drop cascades to 11 other objects +DROP TABLE rename.plain_test CASCADE; +DROP TABLE rename.test_inh CASCADE; +NOTICE: drop cascades to table rename.test_inh_one +DROP TABLE rename.parent CASCADE; +NOTICE: drop cascades to 3 other objects +DROP TABLE rename.test CASCADE; +NOTICE: drop cascades to 3 other objects +DROP FUNCTION add_constraint(regclass); +DROP SCHEMA rename; /* * Test DROP INDEX CONCURRENTLY (test snapshots) */ @@ -368,8 +378,9 @@ SELECT create_hash_partitions('drop_index.test', 'val', 2); (1 row) DROP INDEX CONCURRENTLY drop_index.test_0_val_idx; -DROP SCHEMA drop_index CASCADE; -NOTICE: drop cascades to 3 other objects +DROP TABLE drop_index.test CASCADE; +NOTICE: drop cascades to 2 other objects +DROP SCHEMA drop_index; /* * Checking that ALTER TABLE IF EXISTS with loaded (and created) pg_pathman extension works the same as in vanilla */ @@ -426,12 +437,12 @@ ERROR: schema "nonexistent_schema" does not exist CREATE SCHEMA test_nonexistance2; ALTER TABLE IF EXISTS test_nonexistance.existent_table SET SCHEMA test_nonexistance2; DROP TABLE test_nonexistance2.existent_table; -DROP SCHEMA test_nonexistance2 CASCADE; +DROP SCHEMA test_nonexistance2; ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table SET TABLESPACE nonexistent_tablespace; NOTICE: relation "nonexistent_table" does not exist, skipping CREATE TABLE test_nonexistance.existent_table(i INT4); ALTER TABLE IF EXISTS test_nonexistance.existent_table SET TABLESPACE nonexistent_tablespace; ERROR: tablespace "nonexistent_tablespace" does not exist DROP TABLE test_nonexistance.existent_table; -DROP SCHEMA test_nonexistance CASCADE; +DROP SCHEMA test_nonexistance; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_views.out b/expected/pathman_views.out index 78589970..64b8425d 100644 --- a/expected/pathman_views.out +++ b/expected/pathman_views.out @@ -186,6 +186,9 @@ explain (costs off) select * from views.abc_union_all where id = 5; Filter: (id = 5) (5 rows) -DROP SCHEMA views CASCADE; -NOTICE: drop cascades to 16 other objects +DROP TABLE views._abc CASCADE; +NOTICE: drop cascades to 13 other objects +DROP TABLE views._abc_add CASCADE; +DROP FUNCTION views.disable_modification(); +DROP SCHEMA views; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_views_1.out b/expected/pathman_views_1.out index ea390d84..e6bb45f5 100644 --- a/expected/pathman_views_1.out +++ b/expected/pathman_views_1.out @@ -242,6 +242,9 @@ explain (costs off) select * from views.abc_union_all where id = 5; Filter: (id = 5) (5 rows) -DROP SCHEMA views CASCADE; -NOTICE: drop cascades to 16 other objects +DROP TABLE views._abc CASCADE; +NOTICE: drop cascades to 13 other objects +DROP TABLE views._abc_add CASCADE; +DROP FUNCTION views.disable_modification(); +DROP SCHEMA views; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_views_2.out b/expected/pathman_views_2.out index 15770ec0..45ea3eb4 100644 --- a/expected/pathman_views_2.out +++ b/expected/pathman_views_2.out @@ -183,6 +183,9 @@ explain (costs off) select * from views.abc_union_all where id = 5; Filter: (id = 5) (5 rows) -DROP SCHEMA views CASCADE; -NOTICE: drop cascades to 16 other objects +DROP TABLE views._abc CASCADE; +NOTICE: drop cascades to 13 other objects +DROP TABLE views._abc_add CASCADE; +DROP FUNCTION views.disable_modification(); +DROP SCHEMA views; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_views_3.out b/expected/pathman_views_3.out index 09b5718f..cf5ca58e 100644 --- a/expected/pathman_views_3.out +++ b/expected/pathman_views_3.out @@ -184,6 +184,9 @@ explain (costs off) select * from views.abc_union_all where id = 5; Filter: (id = 5) (5 rows) -DROP SCHEMA views CASCADE; -NOTICE: drop cascades to 16 other objects +DROP TABLE views._abc CASCADE; +NOTICE: drop cascades to 13 other objects +DROP TABLE views._abc_add CASCADE; +DROP FUNCTION views.disable_modification(); +DROP SCHEMA views; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_array_qual.sql b/sql/pathman_array_qual.sql index 84327359..9f1b0c1e 100644 --- a/sql/pathman_array_qual.sql +++ b/sql/pathman_array_qual.sql @@ -427,5 +427,6 @@ DEALLOCATE q; -DROP SCHEMA array_qual CASCADE; +DROP TABLE array_qual.test CASCADE; +DROP SCHEMA array_qual; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 403424f5..478935c5 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -563,6 +563,17 @@ INSERT INTO test.mixinh_child1 VALUES (1); SELECT * FROM test.mixinh_child1; SELECT * FROM test.mixinh_parent; -DROP SCHEMA test CASCADE; +DROP TABLE test.hash_rel CASCADE; +DROP TABLE test.index_on_childs CASCADE; +DROP TABLE test.mixinh_child1 CASCADE; +DROP TABLE test.mixinh_parent CASCADE; +DROP TABLE test.num_range_rel CASCADE; +DROP TABLE test.hash_rel_wrong CASCADE; +DROP TABLE test.range_rel CASCADE; +DROP TABLE test.range_rel_archive CASCADE; +DROP TABLE test.special_case_1_ind_o_s CASCADE; +DROP TABLE test.range_rel_test1 CASCADE; +DROP TABLE test.range_rel_test2 CASCADE; +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/sql/pathman_bgw.sql b/sql/pathman_bgw.sql index 28f922e6..74239e99 100644 --- a/sql/pathman_bgw.sql +++ b/sql/pathman_bgw.sql @@ -145,5 +145,5 @@ DROP TABLE test_bgw.conc_part CASCADE; -DROP SCHEMA test_bgw CASCADE; +DROP SCHEMA test_bgw; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index 6ad0df0e..ecc2c30f 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -345,7 +345,12 @@ SELECT merge_range_partitions('calamity.merge_test_a_1', DROP TABLE calamity.merge_test_a,calamity.merge_test_b CASCADE; -DROP SCHEMA calamity CASCADE; +DROP DOMAIN calamity.test_domain; +DROP TABLE calamity.part_test CASCADE; +DROP TABLE calamity.part_ok CASCADE; +DROP TABLE calamity.hash_two_times CASCADE; +DROP TABLE calamity.to_be_disabled CASCADE; +DROP SCHEMA calamity; DROP EXTENSION pg_pathman; @@ -428,7 +433,7 @@ DROP TABLE calamity.test_pathman_cache_stats CASCADE; SELECT context, entries FROM pathman_cache_stats WHERE context != 'partition status cache' ORDER BY context; /* OK */ -DROP SCHEMA calamity CASCADE; +DROP SCHEMA calamity; DROP EXTENSION pg_pathman; @@ -467,5 +472,5 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ DROP TABLE calamity.survivor CASCADE; -DROP SCHEMA calamity CASCADE; +DROP SCHEMA calamity; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_callbacks.sql b/sql/pathman_callbacks.sql index 65b729d9..096a55ad 100644 --- a/sql/pathman_callbacks.sql +++ b/sql/pathman_callbacks.sql @@ -144,5 +144,8 @@ ORDER BY range_min::INT4; DROP TABLE callbacks.abc CASCADE; -DROP SCHEMA callbacks CASCADE; +DROP FUNCTION callbacks.abc_on_part_created_callback(jsonb); +DROP FUNCTION public.dummy_cb(jsonb); +DROP FUNCTION callbacks.rotation_callback(jsonb); +DROP SCHEMA callbacks; DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_column_type.sql b/sql/pathman_column_type.sql index 685643fd..d3f16107 100644 --- a/sql/pathman_column_type.sql +++ b/sql/pathman_column_type.sql @@ -20,7 +20,8 @@ SELECT create_range_partitions('test_column_type.test', 'val', 1, 10, 10); /* make sure that bounds and dispatch info has been cached */ SELECT * FROM test_column_type.test; -SELECT context, entries FROM pathman_cache_stats ORDER BY context; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* * Get parsed and analyzed expression. @@ -45,7 +46,8 @@ DROP FUNCTION get_cached_partition_cooked_key(REGCLASS); /* make sure that everything works properly */ SELECT * FROM test_column_type.test; -SELECT context, entries FROM pathman_cache_stats ORDER BY context; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* check insert dispatching */ INSERT INTO test_column_type.test VALUES (1); @@ -65,21 +67,24 @@ SELECT create_hash_partitions('test_column_type.test', 'id', 5); /* make sure that bounds and dispatch info has been cached */ SELECT * FROM test_column_type.test; -SELECT context, entries FROM pathman_cache_stats ORDER BY context; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* change column's type (should NOT work) */ ALTER TABLE test_column_type.test ALTER id TYPE NUMERIC; /* make sure that everything works properly */ SELECT * FROM test_column_type.test; -SELECT context, entries FROM pathman_cache_stats ORDER BY context; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* change column's type (should flush caches) */ ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; /* make sure that everything works properly */ SELECT * FROM test_column_type.test; -SELECT context, entries FROM pathman_cache_stats ORDER BY context; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* check insert dispatching */ INSERT INTO test_column_type.test VALUES (1); @@ -89,5 +94,5 @@ SELECT drop_partitions('test_column_type.test'); DROP TABLE test_column_type.test CASCADE; -DROP SCHEMA test_column_type CASCADE; +DROP SCHEMA test_column_type; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_cte.sql b/sql/pathman_cte.sql index 5a695cbb..594c6db7 100644 --- a/sql/pathman_cte.sql +++ b/sql/pathman_cte.sql @@ -157,5 +157,6 @@ SELECT * FROM test; -DROP SCHEMA test_cte CASCADE; +DROP TABLE test_cte.recursive_cte_test_tbl CASCADE; +DROP SCHEMA test_cte; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_declarative.sql b/sql/pathman_declarative.sql index d89ce3ed..eb12c295 100644 --- a/sql/pathman_declarative.sql +++ b/sql/pathman_declarative.sql @@ -43,6 +43,8 @@ CREATE TABLE test.r4 PARTITION OF test.range_rel ALTER TABLE IF EXISTS test.nonexistent_table ATTACH PARTITION baz FOR VALUES IN (42); ALTER TABLE IF EXISTS test.nonexistent_table DETACH PARTITION baz; -DROP SCHEMA test CASCADE; +DROP TABLE test.r2 CASCADE; +DROP TABLE test.range_rel CASCADE; +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/sql/pathman_domains.sql b/sql/pathman_domains.sql index 4793c6f8..105b2399 100644 --- a/sql/pathman_domains.sql +++ b/sql/pathman_domains.sql @@ -41,5 +41,7 @@ SELECT * FROM pathman_partition_list ORDER BY "partition"::TEXT; -DROP SCHEMA domains CASCADE; +DROP TABLE domains.dom_table CASCADE; +DROP DOMAIN domains.dom_test CASCADE; +DROP SCHEMA domains; DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_dropped_cols.sql b/sql/pathman_dropped_cols.sql index cb6acc57..2a128df2 100644 --- a/sql/pathman_dropped_cols.sql +++ b/sql/pathman_dropped_cols.sql @@ -100,5 +100,5 @@ EXPLAIN (COSTS OFF) EXECUTE getbyroot(2); DEALLOCATE getbyroot; DROP TABLE root_dict CASCADE; -DROP SCHEMA dropped_cols CASCADE; +DROP SCHEMA dropped_cols; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_expressions.sql b/sql/pathman_expressions.sql index ed05be79..bf29f896 100644 --- a/sql/pathman_expressions.sql +++ b/sql/pathman_expressions.sql @@ -178,5 +178,9 @@ INSERT INTO test_exprs.range_rel_6 (dt, txt) VALUES ('2020-01-01'::DATE, md5('as SELECT COUNT(*) FROM test_exprs.range_rel_6; EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.range_rel WHERE (AGE(dt, '2000-01-01'::DATE)) = '18 years'::interval; -DROP SCHEMA test_exprs CASCADE; +DROP TABLE test_exprs.canary CASCADE; +DROP TABLE test_exprs.canary_copy CASCADE; +DROP TABLE test_exprs.range_rel CASCADE; +DROP TABLE test_exprs.hash_rel CASCADE; +DROP SCHEMA test_exprs; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_foreign_keys.sql b/sql/pathman_foreign_keys.sql index 1ec1b766..74dee25f 100644 --- a/sql/pathman_foreign_keys.sql +++ b/sql/pathman_foreign_keys.sql @@ -52,5 +52,7 @@ DROP TABLE fkeys.messages, fkeys.replies CASCADE; -DROP SCHEMA fkeys CASCADE; +DROP TABLE fkeys.test_fkey CASCADE; +DROP TABLE fkeys.test_ref CASCADE; +DROP SCHEMA fkeys; DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_gaps.sql b/sql/pathman_gaps.sql index 55c9a16d..129b210c 100644 --- a/sql/pathman_gaps.sql +++ b/sql/pathman_gaps.sql @@ -137,5 +137,9 @@ EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 51; -DROP SCHEMA gaps CASCADE; +DROP TABLE gaps.test_1 CASCADE; +DROP TABLE gaps.test_2 CASCADE; +DROP TABLE gaps.test_3 CASCADE; +DROP TABLE gaps.test_4 CASCADE; +DROP SCHEMA gaps; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_hashjoin.sql b/sql/pathman_hashjoin.sql index 2c3654d4..620dee5f 100644 --- a/sql/pathman_hashjoin.sql +++ b/sql/pathman_hashjoin.sql @@ -49,6 +49,8 @@ JOIN test.range_rel j2 on j2.id = j1.id JOIN test.num_range_rel j3 on j3.id = j1.id WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; -DROP SCHEMA test CASCADE; +DROP TABLE test.num_range_rel CASCADE; +DROP TABLE test.range_rel CASCADE; +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/sql/pathman_inserts.sql b/sql/pathman_inserts.sql index c8c6439d..aa5b6c1c 100644 --- a/sql/pathman_inserts.sql +++ b/sql/pathman_inserts.sql @@ -223,5 +223,9 @@ DROP TABLE test_inserts.special_2; DROP TABLE test_inserts.test_special_only CASCADE; -DROP SCHEMA test_inserts CASCADE; +DROP TABLE test_inserts.storage CASCADE; +DROP FUNCTION test_inserts.set_triggers(jsonb); +DROP FUNCTION test_inserts.print_cols_before_change(); +DROP FUNCTION test_inserts.print_cols_after_change(); +DROP SCHEMA test_inserts; DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_interval.sql b/sql/pathman_interval.sql index f2933ab0..3a457e7a 100644 --- a/sql/pathman_interval.sql +++ b/sql/pathman_interval.sql @@ -168,5 +168,5 @@ DROP TABLE test_interval.abc CASCADE; -DROP SCHEMA test_interval CASCADE; +DROP SCHEMA test_interval; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_join_clause.sql b/sql/pathman_join_clause.sql index 3a0a655f..aa30b0b8 100644 --- a/sql/pathman_join_clause.sql +++ b/sql/pathman_join_clause.sql @@ -106,6 +106,11 @@ WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); -DROP SCHEMA test CASCADE; +DROP TABLE test.child CASCADE; +DROP TABLE test.child_nopart CASCADE; +DROP TABLE test.mytbl CASCADE; +DROP TABLE test.fk CASCADE; +DROP TABLE test.parent CASCADE; +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/sql/pathman_lateral.sql b/sql/pathman_lateral.sql index d287c051..d5def38c 100644 --- a/sql/pathman_lateral.sql +++ b/sql/pathman_lateral.sql @@ -45,5 +45,6 @@ set enable_mergejoin = on; -DROP SCHEMA test_lateral CASCADE; +DROP TABLE test_lateral.data CASCADE; +DROP SCHEMA test_lateral; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_mergejoin.sql b/sql/pathman_mergejoin.sql index 05de4ba2..d1084375 100644 --- a/sql/pathman_mergejoin.sql +++ b/sql/pathman_mergejoin.sql @@ -48,6 +48,9 @@ SET enable_hashjoin = OFF; SET enable_nestloop = OFF; SET enable_mergejoin = ON; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; + EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel j1 JOIN test.range_rel j2 on j2.id = j1.id @@ -56,7 +59,10 @@ WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; SET enable_hashjoin = ON; SET enable_nestloop = ON; +SET enable_seqscan = ON; -DROP SCHEMA test CASCADE; +DROP TABLE test.num_range_rel CASCADE; +DROP TABLE test.range_rel CASCADE; +DROP SCHEMA test; DROP EXTENSION pg_pathman; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/sql/pathman_only.sql b/sql/pathman_only.sql index 53ef6a9a..88f4e88a 100644 --- a/sql/pathman_only.sql +++ b/sql/pathman_only.sql @@ -74,5 +74,6 @@ WHERE val = (SELECT val FROM ONLY test_only.from_only_test -DROP SCHEMA test_only CASCADE; +DROP TABLE test_only.from_only_test CASCADE; +DROP SCHEMA test_only; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_param_upd_del.sql b/sql/pathman_param_upd_del.sql index f4e42a41..0f3030e7 100644 --- a/sql/pathman_param_upd_del.sql +++ b/sql/pathman_param_upd_del.sql @@ -45,5 +45,6 @@ EXPLAIN (COSTS OFF) EXECUTE del(11); DEALLOCATE del; -DROP SCHEMA param_upd_del CASCADE; +DROP TABLE param_upd_del.test CASCADE; +DROP SCHEMA param_upd_del; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_permissions.sql b/sql/pathman_permissions.sql index 3a234676..49e1fc18 100644 --- a/sql/pathman_permissions.sql +++ b/sql/pathman_permissions.sql @@ -174,5 +174,5 @@ DROP USER user1; DROP USER user2; -DROP SCHEMA permissions CASCADE; +DROP SCHEMA permissions; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_rebuild_deletes.sql b/sql/pathman_rebuild_deletes.sql index 28a09916..1af6b61a 100644 --- a/sql/pathman_rebuild_deletes.sql +++ b/sql/pathman_rebuild_deletes.sql @@ -60,5 +60,6 @@ DROP TABLE test_deletes.test_dummy; -DROP SCHEMA test_deletes CASCADE; +DROP TABLE test_deletes.test CASCADE; +DROP SCHEMA test_deletes; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_rebuild_updates.sql b/sql/pathman_rebuild_updates.sql index 01757c2c..fbbbcbba 100644 --- a/sql/pathman_rebuild_updates.sql +++ b/sql/pathman_rebuild_updates.sql @@ -99,5 +99,6 @@ select * from test_updates.test_5113 where val = 11; drop table test_updates.test_5113 cascade; -DROP SCHEMA test_updates CASCADE; +DROP TABLE test_updates.test CASCADE; +DROP SCHEMA test_updates; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_rowmarks.sql b/sql/pathman_rowmarks.sql index ab7f24ac..bb7719ea 100644 --- a/sql/pathman_rowmarks.sql +++ b/sql/pathman_rowmarks.sql @@ -135,5 +135,7 @@ SET enable_mergejoin = t; -DROP SCHEMA rowmarks CASCADE; +DROP TABLE rowmarks.first CASCADE; +DROP TABLE rowmarks.second CASCADE; +DROP SCHEMA rowmarks; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_runtime_nodes.sql b/sql/pathman_runtime_nodes.sql index e0b50e9b..81c046db 100644 --- a/sql/pathman_runtime_nodes.sql +++ b/sql/pathman_runtime_nodes.sql @@ -331,7 +331,22 @@ set enable_hashjoin = on; set enable_mergejoin = on; -DROP SCHEMA test CASCADE; +DROP TABLE test.vals CASCADE; +DROP TABLE test.category CASCADE; +DROP TABLE test.run_values CASCADE; +DROP TABLE test.runtime_test_1 CASCADE; +DROP TABLE test.runtime_test_2 CASCADE; +DROP TABLE test.runtime_test_3 CASCADE; +DROP TABLE test.runtime_test_4 CASCADE; +DROP FUNCTION test.pathman_assert(bool, text); +DROP FUNCTION test.pathman_equal(text, text, text); +DROP FUNCTION test.pathman_test(text); +DROP FUNCTION test.pathman_test_1(); +DROP FUNCTION test.pathman_test_2(); +DROP FUNCTION test.pathman_test_3(); +DROP FUNCTION test.pathman_test_4(); +DROP FUNCTION test.pathman_test_5(); +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/sql/pathman_subpartitions.sql b/sql/pathman_subpartitions.sql index 7a4dc606..5515874c 100644 --- a/sql/pathman_subpartitions.sql +++ b/sql/pathman_subpartitions.sql @@ -164,5 +164,6 @@ DROP TABLE subpartitions.a2 CASCADE; DROP TABLE subpartitions.a1; -DROP SCHEMA subpartitions CASCADE; +DROP FUNCTION subpartitions.partitions_tree(regclass, text); +DROP SCHEMA subpartitions; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_upd_del.sql b/sql/pathman_upd_del.sql index a6cab581..a034c14a 100644 --- a/sql/pathman_upd_del.sql +++ b/sql/pathman_upd_del.sql @@ -274,6 +274,9 @@ ROLLBACK; -DROP SCHEMA test CASCADE; +DROP TABLE test.tmp CASCADE; +DROP TABLE test.tmp2 CASCADE; +DROP TABLE test.range_rel CASCADE; +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/sql/pathman_update_node.sql b/sql/pathman_update_node.sql index 2c7e97f7..e70f60f4 100644 --- a/sql/pathman_update_node.sql +++ b/sql/pathman_update_node.sql @@ -214,5 +214,7 @@ SELECT count(*) FROM test_update_node.test_hash; -DROP SCHEMA test_update_node CASCADE; +DROP TABLE test_update_node.test_hash CASCADE; +DROP TABLE test_update_node.test_range CASCADE; +DROP SCHEMA test_update_node; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_update_triggers.sql b/sql/pathman_update_triggers.sql index e8405acb..646afe65 100644 --- a/sql/pathman_update_triggers.sql +++ b/sql/pathman_update_triggers.sql @@ -140,5 +140,7 @@ update test_update_triggers.test set val = val + 1 returning *, tableoid::regcla select count(distinct val) from test_update_triggers.test; -DROP SCHEMA test_update_triggers CASCADE; +DROP TABLE test_update_triggers.test CASCADE; +DROP FUNCTION test_update_triggers.test_trigger(); +DROP SCHEMA test_update_triggers; DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_utility_stmt.sql b/sql/pathman_utility_stmt.sql index 3b99a2f3..08992835 100644 --- a/sql/pathman_utility_stmt.sql +++ b/sql/pathman_utility_stmt.sql @@ -154,7 +154,9 @@ COPY copy_stmt_hooking.test2(t) FROM stdin; \. SELECT COUNT(*) FROM copy_stmt_hooking.test2; -DROP SCHEMA copy_stmt_hooking CASCADE; +DROP TABLE copy_stmt_hooking.test CASCADE; +DROP TABLE copy_stmt_hooking.test2 CASCADE; +DROP SCHEMA copy_stmt_hooking; @@ -234,7 +236,12 @@ FROM pg_constraint r WHERE r.conrelid = 'rename.plain_test'::regclass AND r.contype = 'c'; -DROP SCHEMA rename CASCADE; +DROP TABLE rename.plain_test CASCADE; +DROP TABLE rename.test_inh CASCADE; +DROP TABLE rename.parent CASCADE; +DROP TABLE rename.test CASCADE; +DROP FUNCTION add_constraint(regclass); +DROP SCHEMA rename; @@ -248,7 +255,8 @@ CREATE INDEX ON drop_index.test (val); SELECT create_hash_partitions('drop_index.test', 'val', 2); DROP INDEX CONCURRENTLY drop_index.test_0_val_idx; -DROP SCHEMA drop_index CASCADE; +DROP TABLE drop_index.test CASCADE; +DROP SCHEMA drop_index; /* * Checking that ALTER TABLE IF EXISTS with loaded (and created) pg_pathman extension works the same as in vanilla @@ -288,14 +296,14 @@ ALTER TABLE IF EXISTS test_nonexistance.existent_table SET SCHEMA nonexistent_sc CREATE SCHEMA test_nonexistance2; ALTER TABLE IF EXISTS test_nonexistance.existent_table SET SCHEMA test_nonexistance2; DROP TABLE test_nonexistance2.existent_table; -DROP SCHEMA test_nonexistance2 CASCADE; +DROP SCHEMA test_nonexistance2; ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table SET TABLESPACE nonexistent_tablespace; CREATE TABLE test_nonexistance.existent_table(i INT4); ALTER TABLE IF EXISTS test_nonexistance.existent_table SET TABLESPACE nonexistent_tablespace; DROP TABLE test_nonexistance.existent_table; -DROP SCHEMA test_nonexistance CASCADE; +DROP SCHEMA test_nonexistance; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_views.sql b/sql/pathman_views.sql index 65e64149..36baa5c5 100644 --- a/sql/pathman_views.sql +++ b/sql/pathman_views.sql @@ -79,5 +79,8 @@ explain (costs off) select * from views.abc_union_all where id = 5; -DROP SCHEMA views CASCADE; +DROP TABLE views._abc CASCADE; +DROP TABLE views._abc_add CASCADE; +DROP FUNCTION views.disable_modification(); +DROP SCHEMA views; DROP EXTENSION pg_pathman; From a5356299da96093cd2afe2623629b74640759d20 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Fri, 15 Oct 2021 14:05:11 +0300 Subject: [PATCH 465/528] [PGPRO-5614] Reset cache at start and at finish ATX transaction --- src/pg_pathman.c | 33 ++++++++++++++++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index f06e794e..24b22eb2 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -4,7 +4,7 @@ * This module sets planner hooks, handles SELECT queries and produces * paths for partitioned tables * - * Copyright (c) 2015-2016, Postgres Professional + * Copyright (c) 2015-2021, Postgres Professional * * ------------------------------------------------------------------------ */ @@ -281,6 +281,32 @@ estimate_paramsel_using_prel(const PartRelationInfo *prel, int strategy) else return 1.0; } +#if defined(PGPRO_EE) && PG_VERSION_NUM >= 130000 +/* + * Reset cache at start and at finish ATX transaction + */ +static void +pathman_xact_cb(XactEvent event, void *arg) +{ + if (getNestLevelATX() > 0) + { + /* + * For each ATX transaction start/finish: need to reset pg_pathman + * cache because we shouldn't see uncommitted data in autonomous + * transaction and data of autonomous transaction in main transaction + */ + if ((event == XACT_EVENT_START /* start */) || + (event == XACT_EVENT_ABORT || + event == XACT_EVENT_PARALLEL_ABORT || + event == XACT_EVENT_COMMIT || + event == XACT_EVENT_PARALLEL_COMMIT || + event == XACT_EVENT_PREPARE /* finish */)) + { + pathman_relcache_hook(PointerGetDatum(NULL), InvalidOid); + } + } +} +#endif /* * ------------------- @@ -330,6 +356,11 @@ _PG_init(void) init_partition_filter_static_data(); init_partition_router_static_data(); init_partition_overseer_static_data(); + +#if defined(PGPRO_EE) && PG_VERSION_NUM >= 130000 + /* Callbacks for reload relcache for ATX transactions */ + RegisterXactCallback(pathman_xact_cb, NULL); +#endif } /* Get cached PATHMAN_CONFIG relation Oid */ From 8ffc7224187cc59e91d99f6edeaa19982f9372b7 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Mon, 6 Dec 2021 15:30:36 +0300 Subject: [PATCH 466/528] [PGPRO-5902] Reset cache at start and at finish ATX transaction (for v10-v12) --- src/pg_pathman.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 24b22eb2..35ad28dd 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -281,7 +281,7 @@ estimate_paramsel_using_prel(const PartRelationInfo *prel, int strategy) else return 1.0; } -#if defined(PGPRO_EE) && PG_VERSION_NUM >= 130000 +#if defined(PGPRO_EE) && PG_VERSION_NUM >= 100000 /* * Reset cache at start and at finish ATX transaction */ @@ -357,7 +357,7 @@ _PG_init(void) init_partition_router_static_data(); init_partition_overseer_static_data(); -#if defined(PGPRO_EE) && PG_VERSION_NUM >= 130000 +#if defined(PGPRO_EE) && PG_VERSION_NUM >= 100000 /* Callbacks for reload relcache for ATX transactions */ RegisterXactCallback(pathman_xact_cb, NULL); #endif From 1daee0c503ded92d1504b5ec8c99401cee2994ed Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Wed, 20 Apr 2022 21:14:25 +0300 Subject: [PATCH 467/528] [PGPRO-6538] Changed lock order The parent table is locked first and then are locked the partitions. --- src/pl_range_funcs.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 12c247ab..4465d36e 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -683,9 +683,6 @@ merge_range_partitions(PG_FUNCTION_ARGS) /* Extract partition Oids from array */ parts[i] = DatumGetObjectId(datums[i]); - /* Prevent modification of partitions */ - LockRelationOid(parts[i], AccessExclusiveLock); - /* Check if all partitions are from the same parent */ cur_parent = get_parent_of_partition(parts[i]); @@ -708,6 +705,10 @@ merge_range_partitions(PG_FUNCTION_ARGS) /* Prevent changes in partitioning scheme */ LockRelationOid(parent, ShareUpdateExclusiveLock); + /* Prevent modification of partitions */ + for (i = 0; i < nparts; i++) + LockRelationOid(parts[i], AccessExclusiveLock); + /* Emit an error if it is not partitioned by RANGE */ prel = get_pathman_relation_info(parent); shout_if_prel_is_invalid(parent, prel, PT_RANGE); From 66543e768f7b9a7de6844f9bb0780a253ddc2823 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Thu, 21 Apr 2022 15:25:13 +0300 Subject: [PATCH 468/528] [PGPRO-6538] Skip non-existing relations for Citus compatibility (issue #247) --- src/utility_stmt_hooking.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 89649e0d..35786092 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -114,7 +114,11 @@ is_pathman_related_copy(Node *parsetree) (copy_stmt->is_from ? PATHMAN_COPY_WRITE_LOCK : PATHMAN_COPY_READ_LOCK), - false); + true); + + /* Skip relation if it does not exist (for Citus compatibility) */ + if (!OidIsValid(parent_relid)) + return false; /* Check that relation is partitioned */ if (has_pathman_relation_info(parent_relid)) From 52260faa84a09c81bb9c4b3709bf6e723d83ff24 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Wed, 18 May 2022 18:17:03 +0300 Subject: [PATCH 469/528] [PGPRO-6644] Corrected memory allocation using double pointer --- src/utils.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/utils.c b/src/utils.c index ddf10bae..15552f56 100644 --- a/src/utils.c +++ b/src/utils.c @@ -515,7 +515,7 @@ qualified_relnames_to_rangevars(char **relnames, size_t nrelnames) /* Convert partition names into RangeVars */ if (relnames) { - rangevars = palloc(sizeof(RangeVar) * nrelnames); + rangevars = palloc(sizeof(RangeVar *) * nrelnames); for (i = 0; i < nrelnames; i++) { List *nl = stringToQualifiedNameList(relnames[i]); From 31f101220a83d6609fc269c6217ff6be4934317a Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Tue, 14 Jun 2022 10:39:41 +0300 Subject: [PATCH 470/528] [PGPRO-6764] Fix build errors after merging 1C_master into STD_master --- src/partition_creation.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index 2154bc8a..a89f8f68 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1671,14 +1671,14 @@ make_constraint_common(char *name, Node *raw_expr) return constraint; } -#if PG_VERSION_NUM >= 150000 /* reason: commit 639a86e36aae */ +#if PG_VERSION_NUM >= 150000 /* for commits 639a86e36aae, c4cc2850f4d1 */ static String make_string_value_struct(char* str) { String val; val.type = T_String; - val.val = str; + val.sval = str; return val; } @@ -1689,7 +1689,7 @@ make_int_value_struct(int int_val) Integer val; val.type = T_Integer; - val.val = int_val; + val.ival = int_val; return val; } From 33b4d47a904cdb0f608c3e2c26e77919e351c41b Mon Sep 17 00:00:00 2001 From: Marina Polyakova Date: Wed, 29 Jun 2022 21:12:21 +0300 Subject: [PATCH 471/528] PGPRO-6857: fix build for PostgreSQL 15 - In the commit 791b1b71da35d9d4264f72a87e4078b85a2fcfb4 the functions parse_analyze and pg_analyze_and_rewrite were renamed to parse_analyze_fixedparams and pg_analyze_and_rewrite_fixedparams respectively. - The commit 7103ebb7aae8ab8076b7e85f335ceb8fe799097c added a new argument tmfd to the function ExecBRUpdateTriggers. - The commit ba9a7e392171c83eb3332a757279e7088487f9a2 added a new argmument is_crosspart_update to the function ExecARDeleteTriggers. --- src/include/compat/pg_compat.h | 35 +++++++++++++++++++++++++++++++--- src/partition_router.c | 2 +- 2 files changed, 33 insertions(+), 4 deletions(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index a551b7ed..80a76d60 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -635,7 +635,12 @@ extern int oid_cmp(const void *p1, const void *p2); * * for v10 cast first arg to RawStmt type */ -#if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 150000 /* for commit 791b1b71da35 */ +#define parse_analyze_compat(parse_tree, query_string, param_types, nparams, \ + query_env) \ + parse_analyze_fixedparams((RawStmt *) (parse_tree), (query_string), (param_types), \ + (nparams), (query_env)) +#elif PG_VERSION_NUM >= 100000 #define parse_analyze_compat(parse_tree, query_string, param_types, nparams, \ query_env) \ parse_analyze((RawStmt *) (parse_tree), (query_string), (param_types), \ @@ -653,7 +658,12 @@ extern int oid_cmp(const void *p1, const void *p2); * * for v10 cast first arg to RawStmt type */ -#if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 150000 /* for commit 791b1b71da35 */ +#define pg_analyze_and_rewrite_compat(parsetree, query_string, param_types, \ + nparams, query_env) \ + pg_analyze_and_rewrite_fixedparams((RawStmt *) (parsetree), (query_string), \ + (param_types), (nparams), (query_env)) +#elif PG_VERSION_NUM >= 100000 #define pg_analyze_and_rewrite_compat(parsetree, query_string, param_types, \ nparams, query_env) \ pg_analyze_and_rewrite((RawStmt *) (parsetree), (query_string), \ @@ -766,6 +776,20 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, #include "access/tupconvert.h" #endif +/* + * ExecBRUpdateTriggers() + */ +#if PG_VERSION_NUM >= 150000 /* for commit 7103ebb7aae8 */ +#define ExecBRUpdateTriggersCompat(estate, epqstate, relinfo, \ + tupleid, fdw_trigtuple, newslot) \ + ExecBRUpdateTriggers((estate), (epqstate), (relinfo), (tupleid), \ + (fdw_trigtuple), (newslot), NULL) +#else +#define ExecBRUpdateTriggersCompat(estate, epqstate, relinfo, \ + tupleid, fdw_trigtuple, newslot) \ + ExecBRUpdateTriggers((estate), (epqstate), (relinfo), (tupleid), \ + (fdw_trigtuple), (newslot)) +#endif /* * ExecARInsertTriggers() @@ -801,7 +825,12 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, /* * ExecARDeleteTriggers() */ -#if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 150000 /* for commit ba9a7e392171 */ +#define ExecARDeleteTriggersCompat(estate, relinfo, tupleid, \ + fdw_trigtuple, transition_capture) \ + ExecARDeleteTriggers((estate), (relinfo), (tupleid), \ + (fdw_trigtuple), (transition_capture), false) +#elif PG_VERSION_NUM >= 100000 #define ExecARDeleteTriggersCompat(estate, relinfo, tupleid, \ fdw_trigtuple, transition_capture) \ ExecARDeleteTriggers((estate), (relinfo), (tupleid), \ diff --git a/src/partition_router.c b/src/partition_router.c index 17013a02..90727c00 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -523,7 +523,7 @@ router_lock_or_delete_tuple(PartitionRouterState *state, rri->ri_TrigDesc->trig_update_before_row) { #if PG_VERSION_NUM >= 120000 - if (!ExecBRUpdateTriggers(estate, epqstate, rri, tupleid, NULL, slot)) + if (!ExecBRUpdateTriggersCompat(estate, epqstate, rri, tupleid, NULL, slot)) return NULL; #else slot = ExecBRUpdateTriggers(estate, epqstate, rri, tupleid, NULL, slot); From 677e7913bc7473d075d5e0777dfa18a98ada758f Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Mon, 11 Jul 2022 23:12:16 +0300 Subject: [PATCH 472/528] [PGPRO-5360] Fix for freeze (Valgrind and compilation with -Og option) --- src/pathman_workers.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/pathman_workers.c b/src/pathman_workers.c index 38d61622..7b64017b 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -458,8 +458,8 @@ bgw_main_concurrent_part(Datum main_arg) ConcurrentPartSlot *part_slot; char *sql = NULL; int64 rows; - bool failed; - int failures_count = 0; + volatile bool failed; + volatile int failures_count = 0; LOCKMODE lockmode = RowExclusiveLock; /* Update concurrent part slot */ @@ -497,7 +497,7 @@ bgw_main_concurrent_part(Datum main_arg) Oid types[2] = { OIDOID, INT4OID }; Datum vals[2] = { part_slot->relid, part_slot->batch_size }; - bool rel_locked = false; + volatile bool rel_locked = false; /* Reset loop variables */ failed = false; From f1350909c8071c7f4393e37aa452576e11a09db1 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Thu, 14 Jul 2022 14:00:56 +0300 Subject: [PATCH 473/528] Revert "hide false positives found by clang analyzer" This reverts commit 6b00d812b9396353fff72d42181278c4bd19b68f. --- src/pathman_workers.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/pathman_workers.c b/src/pathman_workers.c index 7b64017b..eca9ee52 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -545,14 +545,12 @@ bgw_main_concurrent_part(Datum main_arg) /* Great, now relation is locked */ rel_locked = true; - (void) rel_locked; /* mute clang analyzer */ /* Make sure that relation exists */ if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(part_slot->relid))) { /* Exit after we raise ERROR */ failures_count = PART_WORKER_MAX_ATTEMPTS; - (void) failures_count; /* mute clang analyzer */ elog(ERROR, "relation %u does not exist", part_slot->relid); } @@ -562,7 +560,6 @@ bgw_main_concurrent_part(Datum main_arg) { /* Exit after we raise ERROR */ failures_count = PART_WORKER_MAX_ATTEMPTS; - (void) failures_count; /* mute clang analyzer */ elog(ERROR, "relation \"%s\" is not partitioned", get_rel_name(part_slot->relid)); From ff2942add4eb0c53936fac9205a40375db911a68 Mon Sep 17 00:00:00 2001 From: Anton Voloshin Date: Wed, 20 Jul 2022 13:21:59 +0300 Subject: [PATCH 474/528] adapt pg_pathman for upcoming PostgreSQL 15 Only call RequestAddinShmemSpace from within our implementation of shmem_request_hook (as required after commit 4f2400cb3 in PostgreSQL 15). --- src/pg_pathman.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 35ad28dd..b6b5d815 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -314,6 +314,11 @@ pathman_xact_cb(XactEvent event, void *arg) * ------------------- */ +#if PG_VERSION_NUM >= 150000 +static shmem_request_hook_type prev_shmem_request_hook = NULL; +static void pg_pathman_shmem_request(void); +#endif + /* Set initial values for all Postmaster's forks */ void _PG_init(void) @@ -326,7 +331,12 @@ _PG_init(void) } /* Request additional shared resources */ +#if PG_VERSION_NUM >= 150000 + prev_shmem_request_hook = shmem_request_hook; + shmem_request_hook = pg_pathman_shmem_request; +#else RequestAddinShmemSpace(estimate_pathman_shmem_size()); +#endif /* Assign pg_pathman's initial state */ pathman_init_state.pg_pathman_enable = DEFAULT_PATHMAN_ENABLE; @@ -363,6 +373,17 @@ _PG_init(void) #endif } +#if PG_VERSION_NUM >= 150000 +static void +pg_pathman_shmem_request(void) +{ + if (prev_shmem_request_hook) + prev_shmem_request_hook(); + + RequestAddinShmemSpace(estimate_pathman_shmem_size()); +} +#endif + /* Get cached PATHMAN_CONFIG relation Oid */ Oid get_pathman_config_relid(bool invalid_is_ok) From 0b54f70915da8ca919ddb1216863ab8ebf819b46 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Tue, 7 Jun 2022 17:56:39 +0300 Subject: [PATCH 475/528] [PGPRO-6734] PostgreSQL v15 compatibility --- .travis.yml | 7 +- Dockerfile.tmpl | 2 +- patches/REL_14_STABLE-pg_pathman-core.diff | 8 +- patches/REL_15_STABLE-pg_pathman-core.diff | 506 +++++++++++++++++++++ run_tests.sh | 50 +- src/hooks.c | 2 +- src/partition_creation.c | 8 +- src/partition_router.c | 4 +- src/pg_pathman.c | 6 +- src/planner_tree_modification.c | 14 + tests/python/partitioning_test.py | 82 ++-- 11 files changed, 620 insertions(+), 69 deletions(-) create mode 100644 patches/REL_15_STABLE-pg_pathman-core.diff diff --git a/.travis.yml b/.travis.yml index 7f22cf8e..67a5f2ee 100644 --- a/.travis.yml +++ b/.travis.yml @@ -20,6 +20,8 @@ notifications: on_failure: always env: + - PG_VERSION=14 LEVEL=hardcore + - PG_VERSION=14 - PG_VERSION=13 LEVEL=hardcore - PG_VERSION=13 - PG_VERSION=12 LEVEL=hardcore @@ -28,12 +30,7 @@ env: - PG_VERSION=11 - PG_VERSION=10 LEVEL=hardcore - PG_VERSION=10 - - PG_VERSION=9.6 LEVEL=hardcore - - PG_VERSION=9.6 - - PG_VERSION=9.5 LEVEL=hardcore - - PG_VERSION=9.5 jobs: allow_failures: - env: PG_VERSION=10 LEVEL=nightmare - - env: PG_VERSION=9.6 LEVEL=nightmare diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index e1e3b0e6..0a25ad14 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -2,7 +2,7 @@ FROM postgres:${PG_VERSION}-alpine # Install dependencies RUN apk add --no-cache \ - openssl curl \ + openssl curl git patch \ cmocka-dev \ perl perl-ipc-run \ python3 python3-dev py3-virtualenv \ diff --git a/patches/REL_14_STABLE-pg_pathman-core.diff b/patches/REL_14_STABLE-pg_pathman-core.diff index e3e7c549..751095aa 100644 --- a/patches/REL_14_STABLE-pg_pathman-core.diff +++ b/patches/REL_14_STABLE-pg_pathman-core.diff @@ -33,8 +33,8 @@ index 5483dee650..e2864e6ae9 100644 out: + + /* -+ * pg_pathman: pass 'tts_tableOid' to result tuple for determine from -+ * which partition the touple was read ++ * pg_pathman: pass 'tts_tableOid' to result tuple to determine from ++ * which partition the tuple was read + */ + if (resultslot) + { @@ -111,7 +111,7 @@ index d328856ae5..27235ec869 100644 for (;;) { + /* -+ * "es_original_tuple" should contains original modified tuple (new ++ * "es_original_tuple" should contain original modified tuple (new + * values of the changed columns plus row identity information such as + * CTID) in case tuple planSlot is replaced in pg_pathman to new value + * in call "ExecProcNode(subplanstate)". @@ -312,7 +312,7 @@ index 381d9e548d..9d101c3a86 100644 -ProtocolVersion FrontendProtocol; -+ProtocolVersion FrontendProtocol = (ProtocolVersion)0; ++ProtocolVersion FrontendProtocol = (ProtocolVersion) 0; volatile sig_atomic_t InterruptPending = false; volatile sig_atomic_t QueryCancelPending = false; diff --git a/patches/REL_15_STABLE-pg_pathman-core.diff b/patches/REL_15_STABLE-pg_pathman-core.diff new file mode 100644 index 00000000..b30b0230 --- /dev/null +++ b/patches/REL_15_STABLE-pg_pathman-core.diff @@ -0,0 +1,506 @@ +diff --git a/contrib/Makefile b/contrib/Makefile +index bbf220407b..9a82a2db04 100644 +--- a/contrib/Makefile ++++ b/contrib/Makefile +@@ -34,6 +34,7 @@ SUBDIRS = \ + passwordcheck \ + pg_buffercache \ + pg_freespacemap \ ++ pg_pathman \ + pg_prewarm \ + pg_stat_statements \ + pg_surgery \ +diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c +index 47d80b0d25..6689776769 100644 +--- a/src/backend/access/transam/xact.c ++++ b/src/backend/access/transam/xact.c +@@ -78,7 +78,7 @@ int DefaultXactIsoLevel = XACT_READ_COMMITTED; + int XactIsoLevel; + + bool DefaultXactReadOnly = false; +-bool XactReadOnly; ++bool XactReadOnly = false; + + bool DefaultXactDeferrable = false; + bool XactDeferrable; +diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c +index e44ad68cda..b9ba79e756 100644 +--- a/src/backend/executor/execExprInterp.c ++++ b/src/backend/executor/execExprInterp.c +@@ -1831,6 +1831,16 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) + } + + out: ++ ++ /* ++ * pg_pathman: pass 'tts_tableOid' to result tuple to determine from ++ * which partition the tuple was read ++ */ ++ if (resultslot) ++ { ++ resultslot->tts_tableOid = scanslot ? scanslot->tts_tableOid : ++ (innerslot ? innerslot->tts_tableOid : (outerslot ? outerslot->tts_tableOid : InvalidOid)); ++ } + *isnull = state->resnull; + return state->resvalue; + } +diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c +index ef2fd46092..8551733c55 100644 +--- a/src/backend/executor/execMain.c ++++ b/src/backend/executor/execMain.c +@@ -826,6 +826,13 @@ InitPlan(QueryDesc *queryDesc, int eflags) + + estate->es_plannedstmt = plannedstmt; + ++ /* ++ * Fields "es_result_relation_info", "es_original_tuple" are used for ++ * pg_pathman only: ++ */ ++ estate->es_result_relation_info = NULL; ++ estate->es_original_tuple = NULL; ++ + /* + * Next, build the ExecRowMark array from the PlanRowMark(s), if any. + */ +@@ -2811,6 +2818,13 @@ EvalPlanQualStart(EPQState *epqstate, Plan *planTree) + rcestate->es_junkFilter = parentestate->es_junkFilter; + rcestate->es_output_cid = parentestate->es_output_cid; + ++ /* ++ * Fields "es_result_relation_info", "es_original_tuple" are used for ++ * pg_pathman only: ++ */ ++ rcestate->es_result_relation_info = NULL; ++ rcestate->es_original_tuple = NULL; ++ + /* + * ResultRelInfos needed by subplans are initialized from scratch when the + * subplans themselves are initialized. +diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c +index a49c3da5b6..2c0b32e2df 100644 +--- a/src/backend/executor/nodeModifyTable.c ++++ b/src/backend/executor/nodeModifyTable.c +@@ -551,7 +551,7 @@ ExecInitInsertProjection(ModifyTableState *mtstate, + * This is also a convenient place to verify that the output of an UPDATE + * matches the target table (ExecBuildUpdateProjection does that). + */ +-static void ++void + ExecInitUpdateProjection(ModifyTableState *mtstate, + ResultRelInfo *resultRelInfo) + { +@@ -3460,6 +3460,7 @@ ExecModifyTable(PlanState *pstate) + PartitionTupleRouting *proute = node->mt_partition_tuple_routing; + List *relinfos = NIL; + ListCell *lc; ++ ResultRelInfo *saved_resultRelInfo; + + CHECK_FOR_INTERRUPTS(); + +@@ -3501,6 +3502,8 @@ ExecModifyTable(PlanState *pstate) + context.mtstate = node; + context.epqstate = &node->mt_epqstate; + context.estate = estate; ++ saved_resultRelInfo = estate->es_result_relation_info; ++ estate->es_result_relation_info = NULL; + + /* + * Fetch rows from subplan, and execute the required table modification +@@ -3508,6 +3511,14 @@ ExecModifyTable(PlanState *pstate) + */ + for (;;) + { ++ /* ++ * "es_original_tuple" should contain original modified tuple (new ++ * values of the changed columns plus row identity information such as ++ * CTID) in case tuple planSlot is replaced in pg_pathman to new value ++ * in call "ExecProcNode(subplanstate)". ++ */ ++ estate->es_original_tuple = NULL; ++ + /* + * Reset the per-output-tuple exprcontext. This is needed because + * triggers expect to use that context as workspace. It's a bit ugly +@@ -3541,7 +3552,9 @@ ExecModifyTable(PlanState *pstate) + bool isNull; + Oid resultoid; + +- datum = ExecGetJunkAttribute(context.planSlot, node->mt_resultOidAttno, ++ datum = ExecGetJunkAttribute(estate->es_original_tuple ? ++ estate->es_original_tuple : context.planSlot, ++ node->mt_resultOidAttno, + &isNull); + if (isNull) + { +@@ -3578,6 +3591,8 @@ ExecModifyTable(PlanState *pstate) + if (resultRelInfo->ri_usesFdwDirectModify) + { + Assert(resultRelInfo->ri_projectReturning); ++ /* PartitionRouter does not support foreign data wrappers: */ ++ Assert(estate->es_original_tuple == NULL); + + /* + * A scan slot containing the data that was actually inserted, +@@ -3587,6 +3602,7 @@ ExecModifyTable(PlanState *pstate) + */ + slot = ExecProcessReturning(resultRelInfo, NULL, context.planSlot); + ++ estate->es_result_relation_info = saved_resultRelInfo; + return slot; + } + +@@ -3617,7 +3633,8 @@ ExecModifyTable(PlanState *pstate) + { + /* ri_RowIdAttNo refers to a ctid attribute */ + Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)); +- datum = ExecGetJunkAttribute(slot, ++ datum = ExecGetJunkAttribute(estate->es_original_tuple ++ ? estate->es_original_tuple : slot, + resultRelInfo->ri_RowIdAttNo, + &isNull); + +@@ -3665,7 +3682,8 @@ ExecModifyTable(PlanState *pstate) + */ + else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)) + { +- datum = ExecGetJunkAttribute(slot, ++ datum = ExecGetJunkAttribute(estate->es_original_tuple ++ ? estate->es_original_tuple : slot, + resultRelInfo->ri_RowIdAttNo, + &isNull); + /* shouldn't ever get a null result... */ +@@ -3696,9 +3714,12 @@ ExecModifyTable(PlanState *pstate) + /* Initialize projection info if first time for this table */ + if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) + ExecInitInsertProjection(node, resultRelInfo); +- slot = ExecGetInsertNewTuple(resultRelInfo, context.planSlot); +- slot = ExecInsert(&context, resultRelInfo, slot, +- node->canSetTag, NULL, NULL); ++ /* Do nothing in case tuple was modified in pg_pathman: */ ++ if (!estate->es_original_tuple) ++ slot = ExecGetInsertNewTuple(resultRelInfo, context.planSlot); ++ slot = ExecInsert(&context, estate->es_result_relation_info ? ++ estate->es_result_relation_info : resultRelInfo, ++ slot, node->canSetTag, NULL, NULL); + break; + + case CMD_UPDATE: +@@ -3706,38 +3727,46 @@ ExecModifyTable(PlanState *pstate) + if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) + ExecInitUpdateProjection(node, resultRelInfo); + +- /* +- * Make the new tuple by combining plan's output tuple with +- * the old tuple being updated. +- */ +- oldSlot = resultRelInfo->ri_oldTupleSlot; +- if (oldtuple != NULL) +- { +- /* Use the wholerow junk attr as the old tuple. */ +- ExecForceStoreHeapTuple(oldtuple, oldSlot, false); +- } +- else ++ /* Do nothing in case tuple was modified in pg_pathman: */ ++ if (!estate->es_original_tuple) + { +- /* Fetch the most recent version of old tuple. */ +- Relation relation = resultRelInfo->ri_RelationDesc; ++ /* ++ * Make the new tuple by combining plan's output tuple ++ * with the old tuple being updated. ++ */ ++ oldSlot = resultRelInfo->ri_oldTupleSlot; ++ if (oldtuple != NULL) ++ { ++ /* Use the wholerow junk attr as the old tuple. */ ++ ExecForceStoreHeapTuple(oldtuple, oldSlot, false); ++ } ++ else ++ { ++ /* Fetch the most recent version of old tuple. */ ++ Relation relation = resultRelInfo->ri_RelationDesc; + +- if (!table_tuple_fetch_row_version(relation, tupleid, +- SnapshotAny, +- oldSlot)) +- elog(ERROR, "failed to fetch tuple being updated"); ++ if (!table_tuple_fetch_row_version(relation, tupleid, ++ SnapshotAny, ++ oldSlot)) ++ elog(ERROR, "failed to fetch tuple being updated"); ++ } ++ slot = internalGetUpdateNewTuple(resultRelInfo, context.planSlot, ++ oldSlot, NULL); ++ context.GetUpdateNewTuple = internalGetUpdateNewTuple; ++ context.relaction = NULL; + } +- slot = internalGetUpdateNewTuple(resultRelInfo, context.planSlot, +- oldSlot, NULL); +- context.GetUpdateNewTuple = internalGetUpdateNewTuple; +- context.relaction = NULL; + + /* Now apply the update. */ +- slot = ExecUpdate(&context, resultRelInfo, tupleid, oldtuple, ++ slot = ExecUpdate(&context, estate->es_result_relation_info ? ++ estate->es_result_relation_info : resultRelInfo, ++ tupleid, oldtuple, + slot, node->canSetTag); + break; + + case CMD_DELETE: +- slot = ExecDelete(&context, resultRelInfo, tupleid, oldtuple, ++ slot = ExecDelete(&context, estate->es_result_relation_info ? ++ estate->es_result_relation_info : resultRelInfo, ++ tupleid, oldtuple, + true, false, node->canSetTag, NULL, NULL); + break; + +@@ -3755,7 +3784,10 @@ ExecModifyTable(PlanState *pstate) + * the work on next call. + */ + if (slot) ++ { ++ estate->es_result_relation_info = saved_resultRelInfo; + return slot; ++ } + } + + /* +@@ -3784,6 +3816,7 @@ ExecModifyTable(PlanState *pstate) + + node->mt_done = true; + ++ estate->es_result_relation_info = saved_resultRelInfo; + return NULL; + } + +@@ -3858,6 +3891,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) + ListCell *l; + int i; + Relation rel; ++ ResultRelInfo *saved_resultRelInfo; + + /* check for unsupported flags */ + Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); +@@ -3958,6 +3992,13 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) + i++; + } + ++ /* ++ * pg_pathman: set "estate->es_result_relation_info" value for take it in ++ * functions partition_filter_begin(), partition_router_begin() ++ */ ++ saved_resultRelInfo = estate->es_result_relation_info; ++ estate->es_result_relation_info = mtstate->resultRelInfo; ++ + /* + * Now we may initialize the subplan. + */ +@@ -4040,6 +4081,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) + } + } + ++ estate->es_result_relation_info = saved_resultRelInfo; ++ + /* + * If this is an inherited update/delete/merge, there will be a junk + * attribute named "tableoid" present in the subplan's targetlist. It +diff --git a/src/backend/utils/init/globals.c b/src/backend/utils/init/globals.c +index 1a5d29ac9b..c70e3ff8b8 100644 +--- a/src/backend/utils/init/globals.c ++++ b/src/backend/utils/init/globals.c +@@ -25,7 +25,7 @@ + #include "storage/backendid.h" + + +-ProtocolVersion FrontendProtocol; ++ProtocolVersion FrontendProtocol = (ProtocolVersion) 0; + + volatile sig_atomic_t InterruptPending = false; + volatile sig_atomic_t QueryCancelPending = false; +diff --git a/src/include/access/xact.h b/src/include/access/xact.h +index 4794941df3..483050268e 100644 +--- a/src/include/access/xact.h ++++ b/src/include/access/xact.h +@@ -53,6 +53,8 @@ extern PGDLLIMPORT int XactIsoLevel; + + /* Xact read-only state */ + extern PGDLLIMPORT bool DefaultXactReadOnly; ++ ++#define PGPRO_PATHMAN_AWARE_COPY + extern PGDLLIMPORT bool XactReadOnly; + + /* flag for logging statements in this transaction */ +diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h +index d68a6b9d28..a96eb93316 100644 +--- a/src/include/executor/executor.h ++++ b/src/include/executor/executor.h +@@ -661,5 +661,7 @@ extern ResultRelInfo *ExecLookupResultRelByOid(ModifyTableState *node, + Oid resultoid, + bool missing_ok, + bool update_cache); ++extern void ExecInitUpdateProjection(ModifyTableState *mtstate, ++ ResultRelInfo *resultRelInfo); + + #endif /* EXECUTOR_H */ +diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h +index 5728801379..ec5496afff 100644 +--- a/src/include/nodes/execnodes.h ++++ b/src/include/nodes/execnodes.h +@@ -611,6 +611,12 @@ typedef struct EState + * es_result_relations in no + * specific order */ + ++ /* These fields was added for compatibility pg_pathman with 14: */ ++ ResultRelInfo *es_result_relation_info; /* currently active array elt */ ++ TupleTableSlot *es_original_tuple; /* original modified tuple (new values ++ * of the changed columns plus row ++ * identity information such as CTID) */ ++ + PartitionDirectory es_partition_directory; /* for PartitionDesc lookup */ + + /* +diff --git a/src/tools/msvc/Install.pm b/src/tools/msvc/Install.pm +index 8de79c618c..c9226ba5ad 100644 +--- a/src/tools/msvc/Install.pm ++++ b/src/tools/msvc/Install.pm +@@ -30,6 +30,18 @@ my @client_program_files = ( + 'pg_receivewal', 'pg_recvlogical', 'pg_restore', 'psql', + 'reindexdb', 'vacuumdb', @client_contribs); + ++sub SubstituteMakefileVariables { ++ local $_ = shift; # Line to substitue ++ my $mf = shift; # Makefile text ++ while (/\$\((\w+)\)/) { ++ my $varname = $1; ++ if ($mf =~ /^$varname\s*=\s*(.*)$/mg) { ++ my $varvalue=$1; ++ s/\$\($varname\)/$varvalue/g; ++ } ++ } ++ return $_; ++} + sub lcopy + { + my $src = shift; +@@ -609,7 +621,7 @@ sub ParseAndCleanRule + substr($flist, 0, index($flist, '$(addsuffix ')) + . substr($flist, $i + 1); + } +- return $flist; ++ return SubstituteMakefileVariables($flist, $mf); + } + + sub CopyIncludeFiles +diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm +index e4feda10fd..74a0a0a062 100644 +--- a/src/tools/msvc/Mkvcbuild.pm ++++ b/src/tools/msvc/Mkvcbuild.pm +@@ -39,8 +39,8 @@ my $contrib_defines = {}; + my @contrib_uselibpq = (); + my @contrib_uselibpgport = (); + my @contrib_uselibpgcommon = (); +-my $contrib_extralibs = { 'libpq_pipeline' => ['ws2_32.lib'] }; +-my $contrib_extraincludes = {}; ++my $contrib_extralibs = { 'libpq_pipeline' => ['ws2_32.lib'] }; ++my $contrib_extraincludes = { 'pg_pathman' => ['contrib/pg_pathman/src/include'] }; + my $contrib_extrasource = {}; + my @contrib_excludes = ( + 'bool_plperl', 'commit_ts', +@@ -964,6 +964,7 @@ sub AddContrib + my $dn = $1; + my $proj = $solution->AddProject($dn, 'dll', 'contrib', "$subdir/$n"); + $proj->AddReference($postgres); ++ $proj->RemoveFile("$subdir/$n/src/declarative.c") if $n eq 'pg_pathman'; + AdjustContribProj($proj); + push @projects, $proj; + } +@@ -1067,6 +1068,19 @@ sub AddContrib + return; + } + ++sub SubstituteMakefileVariables { ++ local $_ = shift; # Line to substitue ++ my $mf = shift; # Makefile text ++ while (/\$\((\w+)\)/) { ++ my $varname = $1; ++ if ($mf =~ /^$varname\s*=\s*(.*)$/mg) { ++ my $varvalue=$1; ++ s/\$\($varname\)/$varvalue/g; ++ } ++ } ++ return $_; ++} ++ + sub GenerateContribSqlFiles + { + my $n = shift; +@@ -1091,23 +1105,53 @@ sub GenerateContribSqlFiles + substr($l, 0, index($l, '$(addsuffix ')) . substr($l, $i + 1); + } + ++ $l = SubstituteMakefileVariables($l,$mf); + foreach my $d (split /\s+/, $l) + { +- my $in = "$d.in"; +- my $out = "$d"; +- +- if (Solution::IsNewer("contrib/$n/$out", "contrib/$n/$in")) +- { +- print "Building $out from $in (contrib/$n)...\n"; +- my $cont = Project::read_file("contrib/$n/$in"); +- my $dn = $out; +- $dn =~ s/\.sql$//; +- $cont =~ s/MODULE_PATHNAME/\$libdir\/$dn/g; +- my $o; +- open($o, '>', "contrib/$n/$out") +- || croak "Could not write to contrib/$n/$d"; +- print $o $cont; +- close($o); ++ if ( -f "contrib/$n/$d.in" ) { ++ my $in = "$d.in"; ++ my $out = "$d"; ++ if (Solution::IsNewer("contrib/$n/$out", "contrib/$n/$in")) ++ { ++ print "Building $out from $in (contrib/$n)...\n"; ++ my $cont = Project::read_file("contrib/$n/$in"); ++ my $dn = $out; ++ $dn =~ s/\.sql$//; ++ $cont =~ s/MODULE_PATHNAME/\$libdir\/$dn/g; ++ my $o; ++ open($o, '>', "contrib/$n/$out") ++ || croak "Could not write to contrib/$n/$d"; ++ print $o $cont; ++ close($o); ++ } ++ } else { ++ # Search for makefile rule. ++ # For now we do not process rule command and assume ++ # that we should just concatenate all prerequisites ++ # ++ my @prereq = (); ++ my $target; ++ my @rules = $mf =~ /^(\S+)\s*:\s*([^=].*)$/mg; ++ RULE: ++ while (@rules) { ++ $target = SubstituteMakefileVariables(shift @rules,$mf); ++ @prereq = split(/\s+/,SubstituteMakefileVariables(shift @rules,$mf)); ++ last RULE if ($target eq $d); ++ @prereq = (); ++ } ++ croak "Don't know how to build contrib/$n/$d" unless @prereq; ++ if (grep(Solution::IsNewer("contrib/$n/$d","contrib/$n/$_"), ++ @prereq)) { ++ print STDERR "building $d from @prereq by concatentation\n"; ++ my $o; ++ open $o, ">contrib/$n/$d" ++ or croak("Couldn't write to contrib/$n/$d:$!"); ++ for my $in (@prereq) { ++ my $data = Project::read_file("contrib/$n/$in"); ++ print $o $data; ++ } ++ close $o; ++ } + } + } + } diff --git a/run_tests.sh b/run_tests.sh index 8f06d39c..2e2edc6f 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -17,9 +17,26 @@ status=0 export PGPORT=55435 export VIRTUAL_ENV_DISABLE_PROMPT=1 -# rebuild PostgreSQL with cassert + valgrind support +PATHMAN_DIR=$PWD + +# indicator of using cassert + valgrind support +USE_ASSERT_VALGRIND=false if [ "$LEVEL" = "hardcore" ] || \ [ "$LEVEL" = "nightmare" ]; then + USE_ASSERT_VALGRIND=true +fi + +# indicator of using special patch for vanilla +if [ "$(printf '%s\n' "14" "$PG_VERSION" | sort -V | head -n1)" = "$PG_VERSION" ]; then + USE_PATH=false +else + #patch version 14 and newer + USE_PATH=true +fi + +# rebuild PostgreSQL with cassert + valgrind support +if [ "$USE_ASSERT_VALGRIND" = true ] || \ + [ "$USE_PATH" = true ]; then set -e @@ -40,15 +57,28 @@ if [ "$LEVEL" = "hardcore" ] || \ cd $CUSTOM_PG_SRC - # enable Valgrind support - sed -i.bak "s/\/* #define USE_VALGRIND *\//#define USE_VALGRIND/g" src/include/pg_config_manual.h - - # enable additional options - ./configure \ - CFLAGS='-Og -ggdb3 -fno-omit-frame-pointer' \ - --enable-cassert \ - --prefix=$CUSTOM_PG_BIN \ - --quiet + if [ "$USE_PATH" = true ]; then + # apply the patch + patch -p1 < $PATHMAN_DIR/patches/REL_${PG_VERSION%.*}_STABLE-pg_pathman-core.diff + fi + + if [ "$USE_ASSERT_VALGRIND" = true ]; then + # enable Valgrind support + sed -i.bak "s/\/* #define USE_VALGRIND *\//#define USE_VALGRIND/g" src/include/pg_config_manual.h + + # enable additional options + ./configure \ + CFLAGS='-Og -ggdb3 -fno-omit-frame-pointer' \ + --enable-cassert \ + --prefix=$CUSTOM_PG_BIN \ + --quiet + else + # without additional options + ./configure \ + --enable-cassert \ + --prefix=$CUSTOM_PG_BIN \ + --quiet + fi # build & install PG time make -s -j$(nproc) && make -s install diff --git a/src/hooks.c b/src/hooks.c index f376e4a0..46204d5c 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -293,7 +293,7 @@ pathman_join_pathlist_hook(PlannerInfo *root, * Currently we use get_parameterized_joinrel_size() since * it works just fine, but this might change some day. */ -#if PG_VERSION_NUM >= 150000 /* reason: commit 18fea737b5e4 */ +#if PG_VERSION_NUM >= 150000 /* for commit 18fea737b5e4 */ nest_path->jpath.path.rows = #else nest_path->path.rows = diff --git a/src/partition_creation.c b/src/partition_creation.c index a89f8f68..b98163d7 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -92,7 +92,7 @@ static void postprocess_child_table_and_atts(Oid parent_relid, Oid partition_rel static Oid text_to_regprocedure(text *proname_args); static Constraint *make_constraint_common(char *name, Node *raw_expr); -#if PG_VERSION_NUM >= 150000 /* reason: commit 639a86e36aae */ +#if PG_VERSION_NUM >= 150000 /* for commit 639a86e36aae */ static String make_string_value_struct(char *str); static Integer make_int_value_struct(int int_val); #else @@ -1361,7 +1361,7 @@ build_raw_range_check_tree(Node *raw_expression, const Bound *end_value, Oid value_type) { -#if PG_VERSION_NUM >= 150000 /* reason: commit 639a86e36aae */ +#if PG_VERSION_NUM >= 150000 /* for commit 639a86e36aae */ #define BuildConstExpr(node, value, value_type) \ do { \ (node)->val.sval = make_string_value_struct( \ @@ -1568,7 +1568,7 @@ build_raw_hash_check_tree(Node *raw_expression, hash_proc = tce->hash_proc; /* Total amount of partitions */ -#if PG_VERSION_NUM >= 150000 /* reason: commit 639a86e36aae */ +#if PG_VERSION_NUM >= 150000 /* for commit 639a86e36aae */ part_count_c->val.ival = make_int_value_struct(part_count); #else part_count_c->val = make_int_value_struct(part_count); @@ -1576,7 +1576,7 @@ build_raw_hash_check_tree(Node *raw_expression, part_count_c->location = -1; /* Index of this partition (hash % total amount) */ -#if PG_VERSION_NUM >= 150000 /* reason: commit 639a86e36aae */ +#if PG_VERSION_NUM >= 150000 /* for commit 639a86e36aae */ part_idx_c->val.ival = make_int_value_struct(part_idx); #else part_idx_c->val = make_int_value_struct(part_idx); diff --git a/src/partition_router.c b/src/partition_router.c index 90727c00..54f6e25e 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -198,12 +198,14 @@ partition_router_exec(CustomScanState *node) TupleTableSlot *old_slot; ResultRelInfo *rri; #endif - TupleTableSlot *full_slot = slot; + TupleTableSlot *full_slot; bool partition_changed = false; ItemPointerSetInvalid(&ctid); #if PG_VERSION_NUM < 140000 + full_slot = slot; + /* Build new junkfilter if needed */ if (state->junkfilter == NULL) state->junkfilter = state->current_rri->ri_junkFilter; diff --git a/src/pg_pathman.c b/src/pg_pathman.c index b6b5d815..3b99a7e7 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -314,7 +314,7 @@ pathman_xact_cb(XactEvent event, void *arg) * ------------------- */ -#if PG_VERSION_NUM >= 150000 +#if PG_VERSION_NUM >= 150000 /* for commit 4f2400cb3f10 */ static shmem_request_hook_type prev_shmem_request_hook = NULL; static void pg_pathman_shmem_request(void); #endif @@ -331,7 +331,7 @@ _PG_init(void) } /* Request additional shared resources */ -#if PG_VERSION_NUM >= 150000 +#if PG_VERSION_NUM >= 150000 /* for commit 4f2400cb3f10 */ prev_shmem_request_hook = shmem_request_hook; shmem_request_hook = pg_pathman_shmem_request; #else @@ -373,7 +373,7 @@ _PG_init(void) #endif } -#if PG_VERSION_NUM >= 150000 +#if PG_VERSION_NUM >= 150000 /* for commit 4f2400cb3f10 */ static void pg_pathman_shmem_request(void) { diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 2477cc7f..b321d9e6 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -495,7 +495,17 @@ disable_standard_inheritance(Query *parse, transform_query_cxt *context) if (rte->rtekind != RTE_RELATION || rte->relkind != RELKIND_RELATION || parse->resultRelation == current_rti) /* is it a result relation? */ + { +#if PG_VERSION_NUM >= 150000 /* for commit 7103ebb7aae8 */ + if (parse->commandType == CMD_MERGE && + (rte->rtekind == RTE_RELATION || + rte->relkind == RELKIND_RELATION) && + rte->inh && has_pathman_relation_info(rte->relid)) + elog(ERROR, "pg_pathman doesn't support MERGE command yet"); +#endif + continue; + } /* Table may be partitioned */ if (rte->inh) @@ -805,7 +815,9 @@ partition_filter_visitor(Plan *plan, void *context) if (lc3) { returning_list = lfirst(lc3); +#if PG_VERSION_NUM < 140000 lc3 = lnext_compat(modify_table->returningLists, lc3); +#endif } #if PG_VERSION_NUM >= 140000 /* for changes 86dc90056dfd */ @@ -893,7 +905,9 @@ partition_router_visitor(Plan *plan, void *context) if (lc3) { returning_list = lfirst(lc3); +#if PG_VERSION_NUM < 140000 lc3 = lnext_compat(modify_table->returningLists, lc3); +#endif } #if PG_VERSION_NUM >= 140000 /* for changes 86dc90056dfd */ diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index ad555455..152b8b19 100644 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -549,7 +549,7 @@ def test_parallel_nodes(self): } ] """) - self.assertEqual(ordered(plan, skip_keys=['Subplans Removed']), ordered(expected)) + self.assertEqual(ordered(plan, skip_keys=['Subplans Removed', 'Async Capable']), ordered(expected)) # Check count of returned tuples count = con.execute( @@ -602,7 +602,7 @@ def test_parallel_nodes(self): } ] """) - self.assertEqual(ordered(plan, skip_keys=['Subplans Removed']), ordered(expected)) + self.assertEqual(ordered(plan, skip_keys=['Subplans Removed', 'Async Capable']), ordered(expected)) # Check tuples returned by query above res_tuples = con.execute( @@ -625,7 +625,7 @@ def test_parallel_nodes(self): } ] """) - self.assertEqual(ordered(plan), ordered(expected)) + self.assertEqual(ordered(plan, skip_keys=['Async Capable']), ordered(expected)) # Remove all objects for testing node.psql('drop table range_partitioned cascade') @@ -665,13 +665,13 @@ def con2_thread(): res = con2.execute(""" explain (analyze, costs off, timing off) select * from drop_test - where val = any (select generate_series(1, 40, 34)) - """) # query selects from drop_test_1 and drop_test_4 + where val = any (select generate_series(22, 40, 13)) + """) # query selects from drop_test_3 and drop_test_4 con2.commit() has_runtime_append = False - has_drop_test_1 = False + has_drop_test_3 = False has_drop_test_4 = False for row in res: @@ -679,8 +679,8 @@ def con2_thread(): has_runtime_append = True continue - if row[0].find('drop_test_1') >= 0: - has_drop_test_1 = True + if row[0].find('drop_test_3') >= 0: + has_drop_test_3 = True continue if row[0].find('drop_test_4') >= 0: @@ -688,7 +688,7 @@ def con2_thread(): continue # return all values in tuple - queue.put((has_runtime_append, has_drop_test_1, has_drop_test_4)) + queue.put((has_runtime_append, has_drop_test_3, has_drop_test_4)) # Step 1: cache partitioned table in con1 con1.begin() @@ -702,7 +702,7 @@ def con2_thread(): # Step 3: drop first partition of 'drop_test' con1.begin() - con1.execute('drop table drop_test_1') + con1.execute('drop table drop_test_3') # Step 4: try executing select (RuntimeAppend) t = threading.Thread(target=con2_thread) @@ -734,9 +734,9 @@ def con2_thread(): self.assertEqual(len(rows), 99) # check RuntimeAppend + selected partitions - (has_runtime_append, has_drop_test_1, has_drop_test_4) = queue.get() + (has_runtime_append, has_drop_test_3, has_drop_test_4) = queue.get() self.assertTrue(has_runtime_append) - self.assertFalse(has_drop_test_1) + self.assertFalse(has_drop_test_3) self.assertTrue(has_drop_test_4) def test_conc_part_creation_insert(self): @@ -1044,34 +1044,36 @@ def test_update_node_plan1(self): self.assertEqual(plan["Relation Name"], "test_range") self.assertEqual(len(plan["Target Tables"]), 11) - expected_format = ''' - { - "Plans": [ - { - "Plans": [ - { - "Filter": "(comment = '15'::text)", - "Node Type": "Seq Scan", - "Relation Name": "test_range%s", - "Parent Relationship": "child" - } - ], - "Node Type": "Custom Scan", - "Parent Relationship": "child", - "Custom Plan Provider": "PartitionRouter" - } - ], - "Node Type": "Custom Scan", - "Parent Relationship": "Member", - "Custom Plan Provider": "PartitionFilter" - } - ''' - - for i, f in enumerate([''] + list(map(str, range(1, 10)))): - num = '_' + f if f else '' - expected = json.loads(expected_format % num) - p = ordered(plan["Plans"][i], skip_keys=['Parallel Aware', 'Alias']) - self.assertEqual(p, ordered(expected)) + # Plan was seriously changed in vanilla since v14 + if version < LooseVersion('14'): + expected_format = ''' + { + "Plans": [ + { + "Plans": [ + { + "Filter": "(comment = '15'::text)", + "Node Type": "Seq Scan", + "Relation Name": "test_range%s", + "Parent Relationship": "child" + } + ], + "Node Type": "Custom Scan", + "Parent Relationship": "child", + "Custom Plan Provider": "PartitionRouter" + } + ], + "Node Type": "Custom Scan", + "Parent Relationship": "Member", + "Custom Plan Provider": "PartitionFilter" + } + ''' + + for i, f in enumerate([''] + list(map(str, range(1, 10)))): + num = '_' + f if f else '' + expected = json.loads(expected_format % num) + p = ordered(plan["Plans"][i], skip_keys=['Parallel Aware', 'Alias']) + self.assertEqual(p, ordered(expected)) node.psql('postgres', 'DROP SCHEMA test_update_node CASCADE;') node.psql('postgres', 'DROP EXTENSION pg_pathman CASCADE;') From 02010f9888545169d029ab58d9f02940fe408683 Mon Sep 17 00:00:00 2001 From: "Anton A. Melnikov" Date: Mon, 22 Aug 2022 19:18:00 +0300 Subject: [PATCH 476/528] PGPRO-6148, PGPRO-7080: Use common macro for all PG versions instead of add_vars_to_targetlist() function. Reason: b3ff6c74 --- src/compat/rowmarks_fix.c | 2 +- src/include/compat/rowmarks_fix.h | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/src/compat/rowmarks_fix.c b/src/compat/rowmarks_fix.c index 4dd1c20a..35eea44b 100644 --- a/src/compat/rowmarks_fix.c +++ b/src/compat/rowmarks_fix.c @@ -47,7 +47,7 @@ append_tle_for_rowmark(PlannerInfo *root, PlanRowMark *rc) root->processed_tlist = lappend(root->processed_tlist, tle); - add_vars_to_targetlist(root, list_make1(var), bms_make_singleton(0), true); + add_vars_to_targetlist_compat(root, list_make1(var), bms_make_singleton(0)); } diff --git a/src/include/compat/rowmarks_fix.h b/src/include/compat/rowmarks_fix.h index 09e5fbef..c94504c3 100644 --- a/src/include/compat/rowmarks_fix.h +++ b/src/include/compat/rowmarks_fix.h @@ -45,5 +45,17 @@ void append_tle_for_rowmark(PlannerInfo *root, PlanRowMark *rc); #endif +/* + * add_vars_to_targetlist() + * In >=16 last argument was removed (b3ff6c742f6c) + */ +#if PG_VERSION_NUM >= 160000 +#define add_vars_to_targetlist_compat(root, vars, where_needed) \ + add_vars_to_targetlist((root), (vars), (where_needed)); +#else +#define add_vars_to_targetlist_compat(root, vars, where_needed) \ + add_vars_to_targetlist((root), (vars), (where_needed), true); +#endif + #endif /* ROWMARKS_FIX_H */ From 19d6a1a00b62ccecf81223e6f7795460e2590354 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Thu, 13 Oct 2022 16:38:32 +0300 Subject: [PATCH 477/528] travis-ci for v15 --- .travis.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.travis.yml b/.travis.yml index 67a5f2ee..dd63d98f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -20,6 +20,8 @@ notifications: on_failure: always env: + - PG_VERSION=15 LEVEL=hardcore + - PG_VERSION=15 - PG_VERSION=14 LEVEL=hardcore - PG_VERSION=14 - PG_VERSION=13 LEVEL=hardcore From c76321ac5b87063b6b4e35901a4958341e58a66a Mon Sep 17 00:00:00 2001 From: Anton Voloshin Date: Thu, 20 Oct 2022 12:43:31 +0300 Subject: [PATCH 478/528] PGPRO-7123: unexport ExecInitUpdateProjection for timescaledb In vanilla PostgreSQL ExecInitUpdateProjection is a static function. However, pgpro does export that function due to pg_pathman needs. Starting with 15th version, we rename exported function, adding Pgpro prefix to avoid compatibility issues with timescaledb. --- src/partition_router.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/partition_router.c b/src/partition_router.c index 54f6e25e..b551158e 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -254,7 +254,11 @@ partition_router_exec(CustomScanState *node) /* Initialize projection info if first time for this table. */ if (unlikely(!rri->ri_projectNewInfoValid)) +#if PG_VERSION_NUM >= 150000 /* after PGPRO-7123 */ + PgproExecInitUpdateProjection(state->mt_state, rri); +#else ExecInitUpdateProjection(state->mt_state, rri); +#endif /* PG_VERSION_NUM >= 150000 ... else */ old_slot = rri->ri_oldTupleSlot; /* Fetch the most recent version of old tuple. */ @@ -264,7 +268,7 @@ partition_router_exec(CustomScanState *node) /* Build full tuple (using "old_slot" + changed from "slot"): */ full_slot = ExecGetUpdateNewTuple(rri, slot, old_slot); -#endif +#endif /* PG_VERSION_NUM >= 140000 */ /* Lock or delete tuple from old partition */ full_slot = router_lock_or_delete_tuple(state, full_slot, From eabe2a886f564c5cc3a8f1b9bb29e35d8e5108c8 Mon Sep 17 00:00:00 2001 From: Anton Voloshin Date: Mon, 24 Oct 2022 15:11:44 +0300 Subject: [PATCH 479/528] Use proper ifdef to get ExecInitUpdateProjection's name updating the previous commit. --- src/partition_router.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/partition_router.c b/src/partition_router.c index b551158e..eefc44bf 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -254,11 +254,11 @@ partition_router_exec(CustomScanState *node) /* Initialize projection info if first time for this table. */ if (unlikely(!rri->ri_projectNewInfoValid)) -#if PG_VERSION_NUM >= 150000 /* after PGPRO-7123 */ +#ifdef PG_HAVE_PGPRO_EXEC_INIT_UPDATE_PROJECTION PgproExecInitUpdateProjection(state->mt_state, rri); #else ExecInitUpdateProjection(state->mt_state, rri); -#endif /* PG_VERSION_NUM >= 150000 ... else */ +#endif /* !PG_HAVE_PGPRO_EXEC_INIT_UPDATE_PROJECTION */ old_slot = rri->ri_oldTupleSlot; /* Fetch the most recent version of old tuple. */ From 7a244036d5db177e5fc89530fc643a91f64b7502 Mon Sep 17 00:00:00 2001 From: Anton Voloshin Date: Tue, 25 Oct 2022 14:22:16 +0300 Subject: [PATCH 480/528] update .patch for REL_15_STABLE to match current code --- patches/REL_15_STABLE-pg_pathman-core.diff | 91 +++++++++++++--------- 1 file changed, 53 insertions(+), 38 deletions(-) diff --git a/patches/REL_15_STABLE-pg_pathman-core.diff b/patches/REL_15_STABLE-pg_pathman-core.diff index b30b0230..e0eb9a62 100644 --- a/patches/REL_15_STABLE-pg_pathman-core.diff +++ b/patches/REL_15_STABLE-pg_pathman-core.diff @@ -1,5 +1,5 @@ diff --git a/contrib/Makefile b/contrib/Makefile -index bbf220407b..9a82a2db04 100644 +index bbf220407b0..9a82a2db046 100644 --- a/contrib/Makefile +++ b/contrib/Makefile @@ -34,6 +34,7 @@ SUBDIRS = \ @@ -11,7 +11,7 @@ index bbf220407b..9a82a2db04 100644 pg_stat_statements \ pg_surgery \ diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c -index 47d80b0d25..6689776769 100644 +index 594d8da2cdc..a2049e70e95 100644 --- a/src/backend/access/transam/xact.c +++ b/src/backend/access/transam/xact.c @@ -78,7 +78,7 @@ int DefaultXactIsoLevel = XACT_READ_COMMITTED; @@ -24,10 +24,10 @@ index 47d80b0d25..6689776769 100644 bool DefaultXactDeferrable = false; bool XactDeferrable; diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c -index e44ad68cda..b9ba79e756 100644 +index ef0f9577ab1..95858960d50 100644 --- a/src/backend/executor/execExprInterp.c +++ b/src/backend/executor/execExprInterp.c -@@ -1831,6 +1831,16 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) +@@ -1801,6 +1801,16 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) } out: @@ -45,7 +45,7 @@ index e44ad68cda..b9ba79e756 100644 return state->resvalue; } diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c -index ef2fd46092..8551733c55 100644 +index ef2fd46092e..8551733c55d 100644 --- a/src/backend/executor/execMain.c +++ b/src/backend/executor/execMain.c @@ -826,6 +826,13 @@ InitPlan(QueryDesc *queryDesc, int eflags) @@ -77,19 +77,24 @@ index ef2fd46092..8551733c55 100644 * ResultRelInfos needed by subplans are initialized from scratch when the * subplans themselves are initialized. diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c -index a49c3da5b6..2c0b32e2df 100644 +index 04454ad6e60..6a52e86b782 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c -@@ -551,7 +551,7 @@ ExecInitInsertProjection(ModifyTableState *mtstate, - * This is also a convenient place to verify that the output of an UPDATE - * matches the target table (ExecBuildUpdateProjection does that). - */ --static void +@@ -603,6 +603,13 @@ ExecInitUpdateProjection(ModifyTableState *mtstate, + resultRelInfo->ri_projectNewInfoValid = true; + } + +void - ExecInitUpdateProjection(ModifyTableState *mtstate, - ResultRelInfo *resultRelInfo) - { -@@ -3460,6 +3460,7 @@ ExecModifyTable(PlanState *pstate) ++PgproExecInitUpdateProjection(ModifyTableState *mtstate, ++ ResultRelInfo *resultRelInfo) ++{ ++ ExecInitUpdateProjection(mtstate, resultRelInfo); ++} ++ + /* + * ExecGetInsertNewTuple + * This prepares a "new" tuple ready to be inserted into given result +@@ -3461,6 +3468,7 @@ ExecModifyTable(PlanState *pstate) PartitionTupleRouting *proute = node->mt_partition_tuple_routing; List *relinfos = NIL; ListCell *lc; @@ -97,7 +102,7 @@ index a49c3da5b6..2c0b32e2df 100644 CHECK_FOR_INTERRUPTS(); -@@ -3501,6 +3502,8 @@ ExecModifyTable(PlanState *pstate) +@@ -3502,6 +3510,8 @@ ExecModifyTable(PlanState *pstate) context.mtstate = node; context.epqstate = &node->mt_epqstate; context.estate = estate; @@ -106,7 +111,7 @@ index a49c3da5b6..2c0b32e2df 100644 /* * Fetch rows from subplan, and execute the required table modification -@@ -3508,6 +3511,14 @@ ExecModifyTable(PlanState *pstate) +@@ -3509,6 +3519,14 @@ ExecModifyTable(PlanState *pstate) */ for (;;) { @@ -121,7 +126,7 @@ index a49c3da5b6..2c0b32e2df 100644 /* * Reset the per-output-tuple exprcontext. This is needed because * triggers expect to use that context as workspace. It's a bit ugly -@@ -3541,7 +3552,9 @@ ExecModifyTable(PlanState *pstate) +@@ -3542,7 +3560,9 @@ ExecModifyTable(PlanState *pstate) bool isNull; Oid resultoid; @@ -132,7 +137,7 @@ index a49c3da5b6..2c0b32e2df 100644 &isNull); if (isNull) { -@@ -3578,6 +3591,8 @@ ExecModifyTable(PlanState *pstate) +@@ -3579,6 +3599,8 @@ ExecModifyTable(PlanState *pstate) if (resultRelInfo->ri_usesFdwDirectModify) { Assert(resultRelInfo->ri_projectReturning); @@ -141,7 +146,7 @@ index a49c3da5b6..2c0b32e2df 100644 /* * A scan slot containing the data that was actually inserted, -@@ -3587,6 +3602,7 @@ ExecModifyTable(PlanState *pstate) +@@ -3588,6 +3610,7 @@ ExecModifyTable(PlanState *pstate) */ slot = ExecProcessReturning(resultRelInfo, NULL, context.planSlot); @@ -149,7 +154,7 @@ index a49c3da5b6..2c0b32e2df 100644 return slot; } -@@ -3617,7 +3633,8 @@ ExecModifyTable(PlanState *pstate) +@@ -3618,7 +3641,8 @@ ExecModifyTable(PlanState *pstate) { /* ri_RowIdAttNo refers to a ctid attribute */ Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)); @@ -159,7 +164,7 @@ index a49c3da5b6..2c0b32e2df 100644 resultRelInfo->ri_RowIdAttNo, &isNull); -@@ -3665,7 +3682,8 @@ ExecModifyTable(PlanState *pstate) +@@ -3666,7 +3690,8 @@ ExecModifyTable(PlanState *pstate) */ else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)) { @@ -169,7 +174,7 @@ index a49c3da5b6..2c0b32e2df 100644 resultRelInfo->ri_RowIdAttNo, &isNull); /* shouldn't ever get a null result... */ -@@ -3696,9 +3714,12 @@ ExecModifyTable(PlanState *pstate) +@@ -3697,9 +3722,12 @@ ExecModifyTable(PlanState *pstate) /* Initialize projection info if first time for this table */ if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) ExecInitInsertProjection(node, resultRelInfo); @@ -185,7 +190,7 @@ index a49c3da5b6..2c0b32e2df 100644 break; case CMD_UPDATE: -@@ -3706,38 +3727,46 @@ ExecModifyTable(PlanState *pstate) +@@ -3707,38 +3735,46 @@ ExecModifyTable(PlanState *pstate) if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) ExecInitUpdateProjection(node, resultRelInfo); @@ -255,7 +260,7 @@ index a49c3da5b6..2c0b32e2df 100644 true, false, node->canSetTag, NULL, NULL); break; -@@ -3755,7 +3784,10 @@ ExecModifyTable(PlanState *pstate) +@@ -3756,7 +3792,10 @@ ExecModifyTable(PlanState *pstate) * the work on next call. */ if (slot) @@ -266,7 +271,7 @@ index a49c3da5b6..2c0b32e2df 100644 } /* -@@ -3784,6 +3816,7 @@ ExecModifyTable(PlanState *pstate) +@@ -3785,6 +3824,7 @@ ExecModifyTable(PlanState *pstate) node->mt_done = true; @@ -274,7 +279,7 @@ index a49c3da5b6..2c0b32e2df 100644 return NULL; } -@@ -3858,6 +3891,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) +@@ -3859,6 +3899,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) ListCell *l; int i; Relation rel; @@ -282,7 +287,7 @@ index a49c3da5b6..2c0b32e2df 100644 /* check for unsupported flags */ Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); -@@ -3958,6 +3992,13 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) +@@ -3959,6 +4000,13 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) i++; } @@ -296,7 +301,7 @@ index a49c3da5b6..2c0b32e2df 100644 /* * Now we may initialize the subplan. */ -@@ -4040,6 +4081,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) +@@ -4041,6 +4089,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) } } @@ -306,7 +311,7 @@ index a49c3da5b6..2c0b32e2df 100644 * If this is an inherited update/delete/merge, there will be a junk * attribute named "tableoid" present in the subplan's targetlist. It diff --git a/src/backend/utils/init/globals.c b/src/backend/utils/init/globals.c -index 1a5d29ac9b..c70e3ff8b8 100644 +index 1a5d29ac9ba..aadca8ea474 100644 --- a/src/backend/utils/init/globals.c +++ b/src/backend/utils/init/globals.c @@ -25,7 +25,7 @@ @@ -319,7 +324,7 @@ index 1a5d29ac9b..c70e3ff8b8 100644 volatile sig_atomic_t InterruptPending = false; volatile sig_atomic_t QueryCancelPending = false; diff --git a/src/include/access/xact.h b/src/include/access/xact.h -index 4794941df3..483050268e 100644 +index 65616ca2f79..965eb544217 100644 --- a/src/include/access/xact.h +++ b/src/include/access/xact.h @@ -53,6 +53,8 @@ extern PGDLLIMPORT int XactIsoLevel; @@ -332,19 +337,29 @@ index 4794941df3..483050268e 100644 /* flag for logging statements in this transaction */ diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h -index d68a6b9d28..a96eb93316 100644 +index 82925b4b633..de23622ca24 100644 --- a/src/include/executor/executor.h +++ b/src/include/executor/executor.h -@@ -661,5 +661,7 @@ extern ResultRelInfo *ExecLookupResultRelByOid(ModifyTableState *node, +@@ -659,5 +659,17 @@ extern ResultRelInfo *ExecLookupResultRelByOid(ModifyTableState *node, Oid resultoid, bool missing_ok, bool update_cache); -+extern void ExecInitUpdateProjection(ModifyTableState *mtstate, -+ ResultRelInfo *resultRelInfo); ++#define PG_HAVE_PGPRO_EXEC_INIT_UPDATE_PROJECTION ++/* ++ * This function is static in vanilla, but pg_pathman wants it exported. ++ * We cannot make it extern with the same name to avoid compilation errors ++ * in timescaledb, which ships it's own static copy of the same function. ++ * So, export ExecInitUpdateProjection with Pgpro prefix. ++ * ++ * The define above helps pg_pathman to expect proper exported symbol ++ * from various versions of pgpro. ++ */ ++extern void PgproExecInitUpdateProjection(ModifyTableState *mtstate, ++ ResultRelInfo *resultRelInfo); #endif /* EXECUTOR_H */ diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h -index 5728801379..ec5496afff 100644 +index 57288013795..ec5496afffa 100644 --- a/src/include/nodes/execnodes.h +++ b/src/include/nodes/execnodes.h @@ -611,6 +611,12 @@ typedef struct EState @@ -361,7 +376,7 @@ index 5728801379..ec5496afff 100644 /* diff --git a/src/tools/msvc/Install.pm b/src/tools/msvc/Install.pm -index 8de79c618c..c9226ba5ad 100644 +index 8de79c618cb..c9226ba5ad4 100644 --- a/src/tools/msvc/Install.pm +++ b/src/tools/msvc/Install.pm @@ -30,6 +30,18 @@ my @client_program_files = ( @@ -393,7 +408,7 @@ index 8de79c618c..c9226ba5ad 100644 sub CopyIncludeFiles diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm -index e4feda10fd..74a0a0a062 100644 +index e4feda10fd8..74a0a0a062b 100644 --- a/src/tools/msvc/Mkvcbuild.pm +++ b/src/tools/msvc/Mkvcbuild.pm @@ -39,8 +39,8 @@ my $contrib_defines = {}; From 2680eee6d84f2a8955e0e6ad50f3f4f4db43a4ba Mon Sep 17 00:00:00 2001 From: Marina Polyakova Date: Fri, 18 Nov 2022 07:47:55 +0300 Subject: [PATCH 481/528] Fix build due to new checks in PostgreSQL 16 Due to the commit c8b2ef05f481ef06326d7b9f3eb14b303f215c7e in PostgreSQL 16: - The macro CStringGetTextDatum returns a Datum, so use the more appropriate macro PG_RETURN_DATUM instead of PG_RETURN_TEXT_P. - The input to the macro TextDatumGetCString must be of type Datum, so use the more appropriate macro PG_GETARG_DATUM instead of PG_GETARG_TEXT_P. --- src/pl_funcs.c | 10 +++++----- src/pl_hash_funcs.c | 2 +- src/pl_range_funcs.c | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 76ecbe3d..b638fc47 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -179,7 +179,7 @@ get_partition_cooked_key_pl(PG_FUNCTION_ARGS) pfree(expr_cstr); pfree(expr); - PG_RETURN_TEXT_P(CStringGetTextDatum(cooked_cstr)); + PG_RETURN_DATUM(CStringGetTextDatum(cooked_cstr)); } /* @@ -199,7 +199,7 @@ get_cached_partition_cooked_key_pl(PG_FUNCTION_ARGS) res = CStringGetTextDatum(nodeToString(prel->expr)); close_pathman_relation_info(prel); - PG_RETURN_TEXT_P(res); + PG_RETURN_DATUM(res); } /* @@ -688,7 +688,7 @@ validate_expression(PG_FUNCTION_ARGS) if (!PG_ARGISNULL(1)) { - expression = TextDatumGetCString(PG_GETARG_TEXT_P(1)); + expression = TextDatumGetCString(PG_GETARG_DATUM(1)); } else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("'expression' should not be NULL"))); @@ -818,7 +818,7 @@ add_to_pathman_config(PG_FUNCTION_ARGS) if (!PG_ARGISNULL(1)) { - expression = TextDatumGetCString(PG_GETARG_TEXT_P(1)); + expression = TextDatumGetCString(PG_GETARG_DATUM(1)); } else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("'expression' should not be NULL"))); @@ -1203,7 +1203,7 @@ is_operator_supported(PG_FUNCTION_ARGS) { Oid opid, typid = PG_GETARG_OID(0); - char *opname = TextDatumGetCString(PG_GETARG_TEXT_P(1)); + char *opname = TextDatumGetCString(PG_GETARG_DATUM(1)); opid = compatible_oper_opid(list_make1(makeString(opname)), typid, typid, true); diff --git a/src/pl_hash_funcs.c b/src/pl_hash_funcs.c index f4a44b71..ddaaa8c0 100644 --- a/src/pl_hash_funcs.c +++ b/src/pl_hash_funcs.c @@ -119,7 +119,7 @@ Datum build_hash_condition(PG_FUNCTION_ARGS) { Oid expr_type = PG_GETARG_OID(0); - char *expr_cstr = TextDatumGetCString(PG_GETARG_TEXT_P(1)); + char *expr_cstr = TextDatumGetCString(PG_GETARG_DATUM(1)); uint32 part_count = PG_GETARG_UINT32(2), part_idx = PG_GETARG_UINT32(3); diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 4465d36e..b2a8dc3d 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -156,7 +156,7 @@ create_single_range_partition_pl(PG_FUNCTION_ARGS) /* Fetch 'tablespace' */ if (!PG_ARGISNULL(4)) { - tablespace = TextDatumGetCString(PG_GETARG_TEXT_P(4)); + tablespace = TextDatumGetCString(PG_GETARG_DATUM(4)); } else tablespace = NULL; /* default */ @@ -429,7 +429,7 @@ validate_interval_value(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("'expression' should not be NULL"))); } - else expr_cstr = TextDatumGetCString(PG_GETARG_TEXT_P(ARG_EXPRESSION)); + else expr_cstr = TextDatumGetCString(PG_GETARG_DATUM(ARG_EXPRESSION)); if (PG_ARGISNULL(ARG_PARTTYPE)) { @@ -1086,7 +1086,7 @@ build_range_condition(PG_FUNCTION_ARGS) if (!PG_ARGISNULL(1)) { - expression = TextDatumGetCString(PG_GETARG_TEXT_P(1)); + expression = TextDatumGetCString(PG_GETARG_DATUM(1)); } else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("'expression' should not be NULL")));; From 12594a31a25f3ca34d7b1331889c740870bff765 Mon Sep 17 00:00:00 2001 From: Marina Polyakova Date: Mon, 21 Nov 2022 15:44:24 +0300 Subject: [PATCH 482/528] Fix compiler warnings due to new checks in PostgreSQL 16 See the commit 0fe954c28584169938e5c0738cfaa9930ce77577 (Add -Wshadow=compatible-local to the standard compilation flags) in PostgreSQL 16. --- src/partition_creation.c | 6 ++---- src/relation_info.c | 18 +++++++++--------- src/runtime_merge_append.c | 6 ++++-- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index b98163d7..b42372b3 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -2027,11 +2027,9 @@ build_partitioning_expression(Oid parent_relid, /* We need expression type for hash functions */ if (expr_type) { - Node *expr; - expr = cook_partitioning_expression(parent_relid, expr_cstr, NULL); - /* Finally return expression type */ - *expr_type = exprType(expr); + *expr_type = exprType( + cook_partitioning_expression(parent_relid, expr_cstr, NULL)); } if (columns) diff --git a/src/relation_info.c b/src/relation_info.c index 64c04c2f..90e30d0e 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -71,34 +71,34 @@ int prel_resowner_line = 0; #define LeakTrackerAdd(prel) \ do { \ - MemoryContext old_mcxt = MemoryContextSwitchTo((prel)->mcxt); \ + MemoryContext leak_tracker_add_old_mcxt = MemoryContextSwitchTo((prel)->mcxt); \ (prel)->owners = \ list_append_unique( \ (prel)->owners, \ list_make2(makeString((char *) prel_resowner_function), \ makeInteger(prel_resowner_line))); \ - MemoryContextSwitchTo(old_mcxt); \ + MemoryContextSwitchTo(leak_tracker_add_old_mcxt); \ \ (prel)->access_total++; \ } while (0) #define LeakTrackerPrint(prel) \ do { \ - ListCell *lc; \ - foreach (lc, (prel)->owners) \ + ListCell *leak_tracker_print_lc; \ + foreach (leak_tracker_print_lc, (prel)->owners) \ { \ - char *fun = strVal(linitial(lfirst(lc))); \ - int line = intVal(lsecond(lfirst(lc))); \ + char *fun = strVal(linitial(lfirst(leak_tracker_print_lc))); \ + int line = intVal(lsecond(lfirst(leak_tracker_print_lc))); \ elog(WARNING, "PartRelationInfo referenced in %s:%d", fun, line); \ } \ } while (0) #define LeakTrackerFree(prel) \ do { \ - ListCell *lc; \ - foreach (lc, (prel)->owners) \ + ListCell *leak_tracker_free_lc; \ + foreach (leak_tracker_free_lc, (prel)->owners) \ { \ - list_free_deep(lfirst(lc)); \ + list_free_deep(lfirst(leak_tracker_free_lc)); \ } \ list_free((prel)->owners); \ (prel)->owners = NIL; \ diff --git a/src/runtime_merge_append.c b/src/runtime_merge_append.c index 601c663f..5edd803c 100644 --- a/src/runtime_merge_append.c +++ b/src/runtime_merge_append.c @@ -374,7 +374,8 @@ fetch_next_tuple(CustomScanState *node) for (i = 0; i < scan_state->rstate.ncur_plans; i++) { ChildScanCommon child = scan_state->rstate.cur_plans[i]; - PlanState *ps = child->content.plan_state; + + ps = child->content.plan_state; Assert(child->content_type == CHILD_PLAN_STATE); @@ -721,10 +722,11 @@ prepare_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree, List *pathkeys, foreach(j, ec->ec_members) { - EquivalenceMember *em = (EquivalenceMember *) lfirst(j); List *exprvars; ListCell *k; + em = (EquivalenceMember *) lfirst(j); + /* * We shouldn't be trying to sort by an equivalence class that * contains a constant, so no need to consider such cases any From a9e82f4cee1d675af19f26aeaca035e5d3ba6c65 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Mon, 16 May 2022 22:13:54 +0300 Subject: [PATCH 483/528] [PGPRO-4997] Integrated two vanilla commits for EXPLAIN correction (v13+) 1) ce76c0ba - Add a reverse-translation column number array to struct AppendRelInfo. 2) 55a1954d - Fix EXPLAIN's column alias output for mismatched child tables. --- expected/pathman_array_qual_2.out | 2398 ++++++++++++++++++++++++++ expected/pathman_basic_2.out | 364 ++-- expected/pathman_calamity_2.out | 48 +- expected/pathman_calamity_3.out | 48 +- expected/pathman_cte_2.out | 10 +- expected/pathman_cte_3.out | 266 +++ expected/pathman_domains_1.out | 131 ++ expected/pathman_expressions_3.out | 436 +++++ expected/pathman_gaps_2.out | 819 +++++++++ expected/pathman_hashjoin_4.out | 6 +- expected/pathman_hashjoin_5.out | 2 +- expected/pathman_inserts_2.out | 114 +- expected/pathman_join_clause_2.out | 4 +- expected/pathman_join_clause_3.out | 182 ++ expected/pathman_lateral_2.out | 32 +- expected/pathman_mergejoin_4.out | 10 +- expected/pathman_mergejoin_5.out | 2 +- expected/pathman_only_2.out | 94 +- expected/pathman_rowmarks_3.out | 120 +- expected/pathman_subpartitions_2.out | 461 +++++ expected/pathman_upd_del_3.out | 462 +++++ expected/pathman_views_3.out | 98 +- expected/pathman_views_4.out | 191 ++ src/include/pathman.h | 3 +- src/partition_creation.c | 2 +- src/partition_filter.c | 2 +- src/pg_pathman.c | 91 +- src/planner_tree_modification.c | 2 +- 28 files changed, 5916 insertions(+), 482 deletions(-) create mode 100644 expected/pathman_array_qual_2.out create mode 100644 expected/pathman_cte_3.out create mode 100644 expected/pathman_domains_1.out create mode 100644 expected/pathman_expressions_3.out create mode 100644 expected/pathman_gaps_2.out create mode 100644 expected/pathman_join_clause_3.out create mode 100644 expected/pathman_subpartitions_2.out create mode 100644 expected/pathman_upd_del_3.out create mode 100644 expected/pathman_views_4.out diff --git a/expected/pathman_array_qual_2.out b/expected/pathman_array_qual_2.out new file mode 100644 index 00000000..ab504858 --- /dev/null +++ b/expected/pathman_array_qual_2.out @@ -0,0 +1,2398 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA array_qual; +CREATE TABLE array_qual.test(val TEXT NOT NULL); +CREATE SEQUENCE array_qual.test_seq; +SELECT add_to_pathman_config('array_qual.test', 'val', NULL); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT add_range_partition('array_qual.test', 'a'::TEXT, 'b'); + add_range_partition +--------------------- + array_qual.test_1 +(1 row) + +SELECT add_range_partition('array_qual.test', 'b'::TEXT, 'c'); + add_range_partition +--------------------- + array_qual.test_2 +(1 row) + +SELECT add_range_partition('array_qual.test', 'c'::TEXT, 'd'); + add_range_partition +--------------------- + array_qual.test_3 +(1 row) + +SELECT add_range_partition('array_qual.test', 'd'::TEXT, 'e'); + add_range_partition +--------------------- + array_qual.test_4 +(1 row) + +INSERT INTO array_qual.test VALUES ('aaaa'); +INSERT INTO array_qual.test VALUES ('bbbb'); +INSERT INTO array_qual.test VALUES ('cccc'); +ANALYZE; +/* + * Test expr op ANY (...) + */ +/* matching collations */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val < ANY (array['a', 'b']); + QUERY PLAN +------------------------- + Seq Scan on test_1 test +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val < ANY (array['a', 'z']); + QUERY PLAN +-------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 +(5 rows) + +/* different collations */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val COLLATE "POSIX" < ANY (array['a', 'b']); + QUERY PLAN +----------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_2 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_3 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_4 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val < ANY (array['a', 'b' COLLATE "POSIX"]); + QUERY PLAN +--------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (val < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_2 + Filter: (val < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_3 + Filter: (val < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_4 + Filter: (val < 'b'::text COLLATE "POSIX") +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val COLLATE "C" < ANY (array['a', 'b' COLLATE "POSIX"]); +ERROR: collation mismatch between explicit collations "C" and "POSIX" at character 95 +/* different collations (pruning should work) */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val COLLATE "POSIX" = ANY (array['a', 'b']); + QUERY PLAN +------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: ((val)::text = ANY ('{a,b}'::text[])) + -> Seq Scan on test_2 + Filter: ((val)::text = ANY ('{a,b}'::text[])) +(5 rows) + +/* non-btree operator */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val ~~ ANY (array['a', 'b']); + QUERY PLAN +------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (val ~~ ANY ('{a,b}'::text[])) + -> Seq Scan on test_2 + Filter: (val ~~ ANY ('{a,b}'::text[])) + -> Seq Scan on test_3 + Filter: (val ~~ ANY ('{a,b}'::text[])) + -> Seq Scan on test_4 + Filter: (val ~~ ANY ('{a,b}'::text[])) +(9 rows) + +DROP TABLE array_qual.test CASCADE; +NOTICE: drop cascades to 5 other objects +CREATE TABLE array_qual.test(a INT4 NOT NULL, b INT4); +SELECT create_range_partitions('array_qual.test', 'a', 1, 100, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +INSERT INTO array_qual.test SELECT i, i FROM generate_series(1, 1000) g(i); +ANALYZE; +/* + * Test expr IN (...) + */ +/* a IN (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (1, 2, 3, 4); + QUERY PLAN +---------------------------------------------- + Seq Scan on test_1 test + Filter: (a = ANY ('{1,2,3,4}'::integer[])) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (100, 200, 300, 400); + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (-100, 100); + QUERY PLAN +----------------------------------------------- + Seq Scan on test_1 test + Filter: (a = ANY ('{-100,100}'::integer[])) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (-100, -200, -300); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (-100, -200, -300, NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (NULL, NULL, NULL, NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +/* b IN (...) - pruning should not work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (1, 2, 3, 4); + QUERY PLAN +---------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (100, 200, 300, 400); + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (-100, 100); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{-100,100}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (-100, -200, -300); + QUERY PLAN +----------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (-100, -200, -300, NULL); + QUERY PLAN +---------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (NULL, NULL, NULL, NULL); + QUERY PLAN +---------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) +(21 rows) + +/* + * Test expr = ANY (...) + */ +/* a = ANY (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[100, 100]); + QUERY PLAN +---------------------------------------------- + Seq Scan on test_1 test + Filter: (a = ANY ('{100,100}'::integer[])) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[100, 200, 300, 400]); + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100, 200], array[300, 400]]); + QUERY PLAN +---------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{{100,200},{300,400}}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{{100,200},{300,400}}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{{100,200},{300,400}}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ANY ('{{100,200},{300,400}}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100, 200], array[300, 400], array[NULL, NULL]::int4[]]); + QUERY PLAN +---------------------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{{100,200},{300,400},{NULL,NULL}}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{{100,200},{300,400},{NULL,NULL}}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{{100,200},{300,400},{NULL,NULL}}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ANY ('{{100,200},{300,400},{NULL,NULL}}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100, 200], array[300, NULL]]); + QUERY PLAN +----------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{{100,200},{300,NULL}}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{{100,200},{300,NULL}}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{{100,200},{300,NULL}}'::integer[])) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +/* + * Test expr = ALL (...) + */ +/* a = ALL (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[]::int4[]); + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_5 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_6 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_7 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_8 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_9 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_10 + Filter: (a = ALL ('{}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[100, 100]); + QUERY PLAN +---------------------------------------------- + Seq Scan on test_1 test + Filter: (a = ALL ('{100,100}'::integer[])) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[100, 200, 300, 400]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[array[100, 200], array[300, 400]]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[array[100, 200], array[300, 400], array[NULL, NULL]::int4[]]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[array[100, 200], array[300, NULL]]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +/* + * Test expr < ANY (...) + */ +/* a < ANY (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[100, 100]); + QUERY PLAN +------------------------- + Seq Scan on test_1 test + Filter: (a < 100) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[99, 100, 101]); + QUERY PLAN +------------------------- + Seq Scan on test_1 test +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[500, 550]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + Filter: (a < 550) +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[100, 700]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + Filter: (a < 700) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[NULL, 700]); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + Filter: (a < ANY ('{NULL,700}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SELECT count(*) FROM array_qual.test WHERE a < ANY (array[NULL, 700]); + count +------- + 699 +(1 row) + +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT count(*) FROM array_qual.test WHERE a < ANY (array[NULL, 700]); + count +------- + 699 +(1 row) + +/* + * Test expr < ALL (...) + */ +/* a < ALL (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[]::int4[]); + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_2 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_3 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_4 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_5 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_6 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_7 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_8 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_9 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_10 + Filter: (a < ALL ('{}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[100, 100]); + QUERY PLAN +------------------------- + Seq Scan on test_1 test + Filter: (a < 100) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[99, 100, 101]); + QUERY PLAN +------------------------- + Seq Scan on test_1 test + Filter: (a < 99) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[500, 550]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + Filter: (a < 500) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[100, 700]); + QUERY PLAN +------------------------- + Seq Scan on test_1 test + Filter: (a < 100) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[NULL, 700]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SELECT count(*) FROM array_qual.test WHERE a < ALL (array[NULL, 700]); + count +------- + 0 +(1 row) + +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT count(*) FROM array_qual.test WHERE a < ALL (array[NULL, 700]); + count +------- + 0 +(1 row) + +/* + * Test expr > ANY (...) + */ +/* a > ANY (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[100, 100]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 100) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[99, 100, 101]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 99) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[500, 550]); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_5 test_1 + Filter: (a > 500) + -> Seq Scan on test_6 test_2 + -> Seq Scan on test_7 test_3 + -> Seq Scan on test_8 test_4 + -> Seq Scan on test_9 test_5 + -> Seq Scan on test_10 test_6 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[100, 700]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 100) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[NULL, 700]); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_7 test_1 + Filter: (a > ANY ('{NULL,700}'::integer[])) + -> Seq Scan on test_8 test_2 + -> Seq Scan on test_9 test_3 + -> Seq Scan on test_10 test_4 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SELECT count(*) FROM array_qual.test WHERE a > ANY (array[NULL, 700]); + count +------- + 300 +(1 row) + +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT count(*) FROM array_qual.test WHERE a > ANY (array[NULL, 700]); + count +------- + 300 +(1 row) + +/* + * Test expr > ALL (...) + */ +/* a > ALL (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[]::int4[]); + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_2 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_3 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_4 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_5 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_6 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_7 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_8 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_9 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_10 + Filter: (a > ALL ('{}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[100, 100]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 100) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[99, 100, 101]); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_2 test_1 + Filter: (a > 101) + -> Seq Scan on test_3 test_2 + -> Seq Scan on test_4 test_3 + -> Seq Scan on test_5 test_4 + -> Seq Scan on test_6 test_5 + -> Seq Scan on test_7 test_6 + -> Seq Scan on test_8 test_7 + -> Seq Scan on test_9 test_8 + -> Seq Scan on test_10 test_9 +(11 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[500, 550]); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 550) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[100, 700]); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_7 test_1 + Filter: (a > 700) + -> Seq Scan on test_8 test_2 + -> Seq Scan on test_9 test_3 + -> Seq Scan on test_10 test_4 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[NULL, 700]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SELECT count(*) FROM array_qual.test WHERE a > ALL (array[NULL, 700]); + count +------- + 0 +(1 row) + +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT count(*) FROM array_qual.test WHERE a > ALL (array[NULL, 700]); + count +------- + 0 +(1 row) + +/* + * Test expr > ANY (... $1 ...) + */ +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ANY (array[$1, 100, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) +(22 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) +(22 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ANY (array[100, 600, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[100, 600, $1])) +(22 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[100, 600, $1])) +(22 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ANY (array[NULL, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 test_1 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 test_2 + -> Seq Scan on test_7 test_3 + -> Seq Scan on test_8 test_4 + -> Seq Scan on test_9 test_5 + -> Seq Scan on test_10 test_6 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 test_1 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 test_2 + -> Seq Scan on test_7 test_3 + -> Seq Scan on test_8 test_4 + -> Seq Scan on test_9 test_5 + -> Seq Scan on test_10 test_6 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 test_1 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 test_2 + -> Seq Scan on test_7 test_3 + -> Seq Scan on test_8 test_4 + -> Seq Scan on test_9 test_5 + -> Seq Scan on test_10 test_6 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 test_1 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 test_2 + -> Seq Scan on test_7 test_3 + -> Seq Scan on test_8 test_4 + -> Seq Scan on test_9 test_5 + -> Seq Scan on test_10 test_6 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 test_1 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 test_2 + -> Seq Scan on test_7 test_3 + -> Seq Scan on test_8 test_4 + -> Seq Scan on test_9 test_5 + -> Seq Scan on test_10 test_6 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) +(22 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) +(22 rows) + +EXECUTE q(NULL); + a | b +---+--- +(0 rows) + +DEALLOCATE q; +/* + * Test expr > ALL (... $1 ...) + */ +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, 1000000, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, NULL, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[NULL, $1, NULL]); +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXECUTE q(NULL); + a | b +---+--- +(0 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, 100, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) +(12 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[100, $1, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[100, $1, 600])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[100, $1, 600])) +(12 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[100, 600, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[100, 600, $1])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[100, 600, $1])) +(12 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[array[100, NULL], array[1, $1]]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[array[100, 600], array[1, $1]]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(999); + QUERY PLAN +-------------------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) +(12 rows) + +/* check query plan: EXECUTE q(999) */ +DO language plpgsql +$$ + DECLARE + query text; + result jsonb; + num int; + + BEGIN + query := 'EXECUTE q(999)'; + + EXECUTE format('EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, FORMAT JSON) %s', query) + INTO result; + + SELECT count(*) FROM jsonb_array_elements_text(result->0->'Plan'->'Plans') INTO num; + + RAISE notice '%: number of partitions: %', query, num; + END +$$; +NOTICE: EXECUTE q(999): number of partitions: 5 +DEALLOCATE q; +PREPARE q(int4[]) AS SELECT * FROM array_qual.test WHERE a > ALL (array[array[100, 600], $1]); +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +---------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +---------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 999}'); + QUERY PLAN +---------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) +(12 rows) + +/* check query plan: EXECUTE q('{1, 999}') */ +DO language plpgsql +$$ + DECLARE + query text; + result jsonb; + num int; + + BEGIN + query := 'EXECUTE q(''{1, 999}'')'; + + EXECUTE format('EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, FORMAT JSON) %s', query) + INTO result; + + SELECT count(*) FROM jsonb_array_elements_text(result->0->'Plan'->'Plans') INTO num; + + RAISE notice '%: number of partitions: %', query, num; + END +$$; +NOTICE: EXECUTE q('{1, 999}'): number of partitions: 1 +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, 898]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_9 test_1 + Filter: (a > 898) + -> Seq Scan on test_10 test_2 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_9 test_1 + Filter: (a > 898) + -> Seq Scan on test_10 test_2 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_9 test_1 + Filter: (a > 898) + -> Seq Scan on test_10 test_2 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_9 test_1 + Filter: (a > 898) + -> Seq Scan on test_10 test_2 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_9 test_1 + Filter: (a > 898) + -> Seq Scan on test_10 test_2 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 898])) +(6 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 898])) +(6 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(900); /* check quals optimization */ + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 898])) +(6 rows) + +EXECUTE q(1000); + a | b +---+--- +(0 rows) + +/* check query plan: EXECUTE q(999) */ +DO language plpgsql +$$ + DECLARE + query text; + result jsonb; + num int; + + BEGIN + query := 'EXECUTE q(999)'; + + EXECUTE format('EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, FORMAT JSON) %s', query) + INTO result; + + SELECT count(*) FROM jsonb_array_elements_text(result->0->'Plan'->'Plans') INTO num; + + RAISE notice '%: number of partitions: %', query, num; + END +$$; +NOTICE: EXECUTE q(999): number of partitions: 1 +DEALLOCATE q; +/* + * Test expr = ALL (... $1 ...) + */ +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a = ALL (array[$1, 100, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a = ALL (array[100, 600, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a = ALL (array[100, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a = ALL (ARRAY[100, $1])) + -> Seq Scan on test_1 test + Filter: (a = ALL (ARRAY[100, $1])) +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a = ALL (ARRAY[100, $1])) + -> Seq Scan on test_1 test + Filter: (a = ALL (ARRAY[100, $1])) +(4 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(100); + a | b +-----+----- + 100 | 100 +(1 row) + +DEALLOCATE q; +DROP TABLE array_qual.test CASCADE; +NOTICE: drop cascades to 11 other objects +DROP SCHEMA array_qual; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_basic_2.out b/expected/pathman_basic_2.out index 7cfde8a6..ec180fdb 100644 --- a/expected/pathman_basic_2.out +++ b/expected/pathman_basic_2.out @@ -36,13 +36,13 @@ SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3, partition_dat (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; - QUERY PLAN -------------------------------------------- + QUERY PLAN +----------------------------------------- Append -> Seq Scan on hash_rel hash_rel_1 - -> Seq Scan on hash_rel_0 - -> Seq Scan on hash_rel_1 hash_rel_1_1 - -> Seq Scan on hash_rel_2 + -> Seq Scan on hash_rel_0 hash_rel_2 + -> Seq Scan on hash_rel_1 hash_rel_3 + -> Seq Scan on hash_rel_2 hash_rel_4 (5 rows) SELECT * FROM test.hash_rel; @@ -60,12 +60,12 @@ SELECT pathman.set_enable_parent('test.hash_rel', false); (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; - QUERY PLAN ------------------------------- + QUERY PLAN +----------------------------------------- Append - -> Seq Scan on hash_rel_0 - -> Seq Scan on hash_rel_1 - -> Seq Scan on hash_rel_2 + -> Seq Scan on hash_rel_0 hash_rel_1 + -> Seq Scan on hash_rel_1 hash_rel_2 + -> Seq Scan on hash_rel_2 hash_rel_3 (4 rows) SELECT * FROM test.hash_rel; @@ -80,13 +80,13 @@ SELECT pathman.set_enable_parent('test.hash_rel', true); (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; - QUERY PLAN -------------------------------------------- + QUERY PLAN +----------------------------------------- Append -> Seq Scan on hash_rel hash_rel_1 - -> Seq Scan on hash_rel_0 - -> Seq Scan on hash_rel_1 hash_rel_1_1 - -> Seq Scan on hash_rel_2 + -> Seq Scan on hash_rel_0 hash_rel_2 + -> Seq Scan on hash_rel_1 hash_rel_3 + -> Seq Scan on hash_rel_2 hash_rel_4 (5 rows) SELECT * FROM test.hash_rel; @@ -224,12 +224,12 @@ SELECT pathman.create_range_partitions('test.improved_dummy', 'id', 1, 10); INSERT INTO test.improved_dummy (name) VALUES ('test'); /* spawns new partition */ EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; - QUERY PLAN ----------------------------------------------------- + QUERY PLAN +------------------------------------------------------ Append -> Seq Scan on improved_dummy_1 Filter: ((id = 5) AND (name = 'ib'::text)) - -> Seq Scan on improved_dummy_11 + -> Seq Scan on improved_dummy_11 improved_dummy_2 Filter: (id = 101) (5 rows) @@ -245,9 +245,9 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 A Append -> Seq Scan on improved_dummy improved_dummy_1 Filter: ((id = 101) OR ((id = 5) AND (name = 'ib'::text))) - -> Seq Scan on improved_dummy_1 improved_dummy_1_1 + -> Seq Scan on improved_dummy_1 improved_dummy_2 Filter: ((id = 5) AND (name = 'ib'::text)) - -> Seq Scan on improved_dummy_11 + -> Seq Scan on improved_dummy_11 improved_dummy_3 Filter: (id = 101) (7 rows) @@ -259,9 +259,9 @@ SELECT pathman.set_enable_parent('test.improved_dummy', false); /* disable paren ALTER TABLE test.improved_dummy_1 ADD CHECK (name != 'ib'); /* make test.improved_dummy_1 disappear */ EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; - QUERY PLAN -------------------------------- - Seq Scan on improved_dummy_11 + QUERY PLAN +---------------------------------------------- + Seq Scan on improved_dummy_11 improved_dummy Filter: (id = 101) (2 rows) @@ -277,7 +277,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 A Append -> Seq Scan on improved_dummy improved_dummy_1 Filter: ((id = 101) OR ((id = 5) AND (name = 'ib'::text))) - -> Seq Scan on improved_dummy_11 + -> Seq Scan on improved_dummy_11 improved_dummy_2 Filter: (id = 101) (5 rows) @@ -389,16 +389,16 @@ EXPLAIN (COSTS OFF) INSERT INTO test.insert_into_select_copy SELECT * FROM test.insert_into_select WHERE val <= 80; - QUERY PLAN ---------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------- Insert on insert_into_select_copy -> Append -> Seq Scan on insert_into_select insert_into_select_1 Filter: (val <= 80) - -> Seq Scan on insert_into_select_1 insert_into_select_1_1 - -> Seq Scan on insert_into_select_2 - -> Seq Scan on insert_into_select_3 - -> Seq Scan on insert_into_select_4 + -> Seq Scan on insert_into_select_1 insert_into_select_2 + -> Seq Scan on insert_into_select_2 insert_into_select_3 + -> Seq Scan on insert_into_select_3 insert_into_select_4 + -> Seq Scan on insert_into_select_4 insert_into_select_5 Filter: (val <= 80) (9 rows) @@ -418,12 +418,12 @@ SET enable_indexscan = OFF; SET enable_bitmapscan = OFF; SET enable_seqscan = ON; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; - QUERY PLAN ------------------------------- + QUERY PLAN +----------------------------------------- Append - -> Seq Scan on hash_rel_0 - -> Seq Scan on hash_rel_1 - -> Seq Scan on hash_rel_2 + -> Seq Scan on hash_rel_0 hash_rel_1 + -> Seq Scan on hash_rel_1 hash_rel_2 + -> Seq Scan on hash_rel_2 hash_rel_3 (4 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE false; @@ -441,16 +441,16 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = NULL; (2 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; - QUERY PLAN ------------------------- - Seq Scan on hash_rel_1 + QUERY PLAN +--------------------------------- + Seq Scan on hash_rel_1 hash_rel Filter: (value = 2) (2 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE 2 = value; /* test commutator */ - QUERY PLAN ------------------------- - Seq Scan on hash_rel_1 + QUERY PLAN +--------------------------------- + Seq Scan on hash_rel_1 hash_rel Filter: (2 = value) (2 rows) @@ -465,45 +465,45 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; (5 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 = id; /* test commutator */ - QUERY PLAN ------------------------------ - Seq Scan on num_range_rel_3 + QUERY PLAN +------------------------------------------- + Seq Scan on num_range_rel_3 num_range_rel Filter: (2500 = id) (2 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 < id; /* test commutator */ - QUERY PLAN ------------------------------------ + QUERY PLAN +--------------------------------------------------- Append - -> Seq Scan on num_range_rel_3 + -> Seq Scan on num_range_rel_3 num_range_rel_1 Filter: (2500 < id) - -> Seq Scan on num_range_rel_4 + -> Seq Scan on num_range_rel_4 num_range_rel_2 (4 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; - QUERY PLAN ------------------------------------ + QUERY PLAN +--------------------------------------------------- Append - -> Seq Scan on num_range_rel_3 + -> Seq Scan on num_range_rel_3 num_range_rel_1 Filter: (id > 2500) - -> Seq Scan on num_range_rel_4 + -> Seq Scan on num_range_rel_4 num_range_rel_2 (4 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1000 AND id < 3000; - QUERY PLAN ------------------------------------ + QUERY PLAN +--------------------------------------------------- Append - -> Seq Scan on num_range_rel_2 - -> Seq Scan on num_range_rel_3 + -> Seq Scan on num_range_rel_2 num_range_rel_1 + -> Seq Scan on num_range_rel_3 num_range_rel_2 (3 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1500 AND id < 2500; - QUERY PLAN ------------------------------------ + QUERY PLAN +--------------------------------------------------- Append - -> Seq Scan on num_range_rel_2 + -> Seq Scan on num_range_rel_2 num_range_rel_1 Filter: (id >= 1500) - -> Seq Scan on num_range_rel_3 + -> Seq Scan on num_range_rel_3 num_range_rel_2 Filter: (id < 2500) (5 rows) @@ -524,35 +524,35 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2015-02-15'; QUERY PLAN -------------------------------------------------------------------------------- Append - -> Seq Scan on range_rel_2 + -> Seq Scan on range_rel_2 range_rel_1 Filter: (dt > 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 + -> Seq Scan on range_rel_3 range_rel_2 + -> Seq Scan on range_rel_4 range_rel_3 (5 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE '2015-02-15' < dt; /* test commutator */ QUERY PLAN -------------------------------------------------------------------------------- Append - -> Seq Scan on range_rel_2 + -> Seq Scan on range_rel_2 range_rel_1 Filter: ('Sun Feb 15 00:00:00 2015'::timestamp without time zone < dt) - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 + -> Seq Scan on range_rel_3 range_rel_2 + -> Seq Scan on range_rel_4 range_rel_3 (5 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-01'; - QUERY PLAN -------------------------- - Seq Scan on range_rel_2 + QUERY PLAN +----------------------------------- + Seq Scan on range_rel_2 range_rel (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-15' AND dt < '2015-03-15'; QUERY PLAN --------------------------------------------------------------------------------- Append - -> Seq Scan on range_rel_2 + -> Seq Scan on range_rel_2 range_rel_1 Filter: (dt >= 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) - -> Seq Scan on range_rel_3 + -> Seq Scan on range_rel_3 range_rel_2 Filter: (dt < 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) (5 rows) @@ -573,12 +573,12 @@ SET enable_indexscan = ON; SET enable_bitmapscan = OFF; SET enable_seqscan = OFF; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; - QUERY PLAN ------------------------------- + QUERY PLAN +----------------------------------------- Append - -> Seq Scan on hash_rel_0 - -> Seq Scan on hash_rel_1 - -> Seq Scan on hash_rel_2 + -> Seq Scan on hash_rel_0 hash_rel_1 + -> Seq Scan on hash_rel_1 hash_rel_2 + -> Seq Scan on hash_rel_2 hash_rel_3 (4 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE false; @@ -596,16 +596,16 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = NULL; (2 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; - QUERY PLAN ------------------------- - Seq Scan on hash_rel_1 + QUERY PLAN +--------------------------------- + Seq Scan on hash_rel_1 hash_rel Filter: (value = 2) (2 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE 2 = value; /* test commutator */ - QUERY PLAN ------------------------- - Seq Scan on hash_rel_1 + QUERY PLAN +--------------------------------- + Seq Scan on hash_rel_1 hash_rel Filter: (2 = value) (2 rows) @@ -620,45 +620,45 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; (5 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 = id; /* test commutator */ - QUERY PLAN ----------------------------------------------------------- - Index Scan using num_range_rel_3_pkey on num_range_rel_3 + QUERY PLAN +------------------------------------------------------------------------ + Index Scan using num_range_rel_3_pkey on num_range_rel_3 num_range_rel Index Cond: (id = 2500) (2 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 < id; /* test commutator */ - QUERY PLAN ----------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------- Append - -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 num_range_rel_1 Index Cond: (id > 2500) - -> Seq Scan on num_range_rel_4 + -> Seq Scan on num_range_rel_4 num_range_rel_2 (4 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; - QUERY PLAN ----------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------- Append - -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 num_range_rel_1 Index Cond: (id > 2500) - -> Seq Scan on num_range_rel_4 + -> Seq Scan on num_range_rel_4 num_range_rel_2 (4 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1000 AND id < 3000; - QUERY PLAN ------------------------------------ + QUERY PLAN +--------------------------------------------------- Append - -> Seq Scan on num_range_rel_2 - -> Seq Scan on num_range_rel_3 + -> Seq Scan on num_range_rel_2 num_range_rel_1 + -> Seq Scan on num_range_rel_3 num_range_rel_2 (3 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1500 AND id < 2500; - QUERY PLAN ----------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------- Append - -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 num_range_rel_1 Index Cond: (id >= 1500) - -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 num_range_rel_2 Index Cond: (id < 2500) (5 rows) @@ -699,35 +699,35 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2015-02-15'; QUERY PLAN ------------------------------------------------------------------------------------ Append - -> Index Scan using range_rel_2_dt_idx on range_rel_2 + -> Index Scan using range_rel_2_dt_idx on range_rel_2 range_rel_1 Index Cond: (dt > 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 + -> Seq Scan on range_rel_3 range_rel_2 + -> Seq Scan on range_rel_4 range_rel_3 (5 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE '2015-02-15' < dt; /* test commutator */ QUERY PLAN ------------------------------------------------------------------------------------ Append - -> Index Scan using range_rel_2_dt_idx on range_rel_2 + -> Index Scan using range_rel_2_dt_idx on range_rel_2 range_rel_1 Index Cond: (dt > 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 + -> Seq Scan on range_rel_3 range_rel_2 + -> Seq Scan on range_rel_4 range_rel_3 (5 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-01'; - QUERY PLAN -------------------------- - Seq Scan on range_rel_2 + QUERY PLAN +----------------------------------- + Seq Scan on range_rel_2 range_rel (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-15' AND dt < '2015-03-15'; QUERY PLAN ------------------------------------------------------------------------------------- Append - -> Index Scan using range_rel_2_dt_idx on range_rel_2 + -> Index Scan using range_rel_2_dt_idx on range_rel_2 range_rel_1 Index Cond: (dt >= 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) - -> Index Scan using range_rel_3_dt_idx on range_rel_3 + -> Index Scan using range_rel_3_dt_idx on range_rel_3 range_rel_2 Index Cond: (dt < 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) (5 rows) @@ -774,7 +774,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2015-03-01' ORDER B QUERY PLAN ------------------------------------- Sort - Sort Key: range_rel_1.dt + Sort Key: range_rel.dt -> Append -> Seq Scan on range_rel_1 -> Seq Scan on range_rel_2 @@ -823,18 +823,18 @@ CREATE OR REPLACE FUNCTION test.sql_inline_func(i_id int) RETURNS SETOF INT AS $ select * from test.sql_inline where id = i_id limit 1; $$ LANGUAGE sql STABLE; EXPLAIN (COSTS OFF) SELECT * FROM test.sql_inline_func(5); - QUERY PLAN --------------------------------- + QUERY PLAN +------------------------------------------- Limit - -> Seq Scan on sql_inline_0 + -> Seq Scan on sql_inline_0 sql_inline Filter: (id = 5) (3 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.sql_inline_func(1); - QUERY PLAN --------------------------------- + QUERY PLAN +------------------------------------------- Limit - -> Seq Scan on sql_inline_2 + -> Seq Scan on sql_inline_2 sql_inline Filter: (id = 1) (3 rows) @@ -876,12 +876,12 @@ SELECT pathman.split_range_partition('test.num_range_rel_1', 500); (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 700; - QUERY PLAN ----------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------- Append -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 Index Cond: (id >= 100) - -> Index Scan using num_range_rel_5_pkey on num_range_rel_5 + -> Index Scan using num_range_rel_5_pkey on num_range_rel_5 num_range_rel_2 Index Cond: (id <= 700) (5 rows) @@ -907,9 +907,9 @@ SELECT pathman.merge_range_partitions('test.num_range_rel_1', 'test.num_range_re (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 700; - QUERY PLAN ----------------------------------------------------------- - Index Scan using num_range_rel_1_pkey on num_range_rel_1 + QUERY PLAN +------------------------------------------------------------------------ + Index Scan using num_range_rel_1_pkey on num_range_rel_1 num_range_rel Index Cond: ((id >= 100) AND (id <= 700)) (2 rows) @@ -927,9 +927,9 @@ SELECT pathman.append_range_partition('test.num_range_rel'); (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 4000; - QUERY PLAN ------------------------------ - Seq Scan on num_range_rel_6 + QUERY PLAN +------------------------------------------- + Seq Scan on num_range_rel_6 num_range_rel (1 row) SELECT pathman.prepend_range_partition('test.num_range_rel'); @@ -939,9 +939,9 @@ SELECT pathman.prepend_range_partition('test.num_range_rel'); (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id < 0; - QUERY PLAN ------------------------------ - Seq Scan on num_range_rel_7 + QUERY PLAN +------------------------------------------- + Seq Scan on num_range_rel_7 num_range_rel (1 row) SELECT pathman.drop_range_partition('test.num_range_rel_7'); @@ -995,9 +995,9 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-12-15' A QUERY PLAN ------------------------------------------------------------------------------------- Append - -> Index Scan using range_rel_7_dt_idx on range_rel_7 + -> Index Scan using range_rel_7_dt_idx on range_rel_7 range_rel_1 Index Cond: (dt >= 'Mon Dec 15 00:00:00 2014'::timestamp without time zone) - -> Index Scan using range_rel_1_dt_idx on range_rel_1 + -> Index Scan using range_rel_1_dt_idx on range_rel_1 range_rel_2 Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) (5 rows) @@ -1010,7 +1010,7 @@ SELECT pathman.drop_range_partition('test.range_rel_7'); EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-12-15' AND '2015-01-15'; QUERY PLAN ------------------------------------------------------------------------------- - Index Scan using range_rel_1_dt_idx on range_rel_1 + Index Scan using range_rel_1_dt_idx on range_rel_1 range_rel Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) (2 rows) @@ -1026,9 +1026,9 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-12-15' A QUERY PLAN ------------------------------------------------------------------------------------- Append - -> Index Scan using range_rel_8_dt_idx on range_rel_8 + -> Index Scan using range_rel_8_dt_idx on range_rel_8 range_rel_1 Index Cond: (dt >= 'Mon Dec 15 00:00:00 2014'::timestamp without time zone) - -> Index Scan using range_rel_1_dt_idx on range_rel_1 + -> Index Scan using range_rel_1_dt_idx on range_rel_1 range_rel_2 Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) (5 rows) @@ -1045,10 +1045,10 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-11-15' A QUERY PLAN ------------------------------------------------------------------------------------- Append - -> Index Scan using range_rel_archive_dt_idx on range_rel_archive + -> Index Scan using range_rel_archive_dt_idx on range_rel_archive range_rel_1 Index Cond: (dt >= 'Sat Nov 15 00:00:00 2014'::timestamp without time zone) - -> Seq Scan on range_rel_8 - -> Index Scan using range_rel_1_dt_idx on range_rel_1 + -> Seq Scan on range_rel_8 range_rel_2 + -> Index Scan using range_rel_1_dt_idx on range_rel_1 range_rel_3 Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) (6 rows) @@ -1062,8 +1062,8 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-11-15' A QUERY PLAN ------------------------------------------------------------------------------------- Append - -> Seq Scan on range_rel_8 - -> Index Scan using range_rel_1_dt_idx on range_rel_1 + -> Seq Scan on range_rel_8 range_rel_1 + -> Index Scan using range_rel_1_dt_idx on range_rel_1 range_rel_2 Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) (4 rows) @@ -1120,19 +1120,19 @@ SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.range_rel'::RE INSERT INTO test.range_rel (dt) VALUES ('2012-06-15'); INSERT INTO test.range_rel (dt) VALUES ('2015-12-15'); EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2015-01-01'; - QUERY PLAN --------------------------------------------- + QUERY PLAN +-------------------------------------------------------- Append - -> Seq Scan on range_rel_minus_infinity - -> Seq Scan on range_rel_8 + -> Seq Scan on range_rel_minus_infinity range_rel_1 + -> Seq Scan on range_rel_8 range_rel_2 (3 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-05-01'; - QUERY PLAN -------------------------------------------- + QUERY PLAN +------------------------------------------------------- Append - -> Seq Scan on range_rel_6 - -> Seq Scan on range_rel_plus_infinity + -> Seq Scan on range_rel_6 range_rel_1 + -> Seq Scan on range_rel_plus_infinity range_rel_2 (3 rows) /* @@ -1199,12 +1199,12 @@ SELECT pathman.replace_hash_partition('test.hash_rel_0', 'test.hash_rel_extern') /* Check the consistency of test.hash_rel_0 and test.hash_rel_extern relations */ EXPLAIN(COSTS OFF) SELECT * FROM test.hash_rel; - QUERY PLAN ------------------------------------ + QUERY PLAN +---------------------------------------------- Append - -> Seq Scan on hash_rel_extern - -> Seq Scan on hash_rel_1 - -> Seq Scan on hash_rel_2 + -> Seq Scan on hash_rel_extern hash_rel_1 + -> Seq Scan on hash_rel_1 hash_rel_2 + -> Seq Scan on hash_rel_2 hash_rel_3 (4 rows) SELECT parent, partition, parttype @@ -1247,12 +1247,12 @@ CREATE TABLE test.hash_rel_wrong( SELECT pathman.replace_hash_partition('test.hash_rel_1', 'test.hash_rel_wrong'); ERROR: column "value" in child table must be marked NOT NULL EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; - QUERY PLAN ------------------------------------ + QUERY PLAN +---------------------------------------------- Append - -> Seq Scan on hash_rel_extern - -> Seq Scan on hash_rel_1 - -> Seq Scan on hash_rel_2 + -> Seq Scan on hash_rel_extern hash_rel_1 + -> Seq Scan on hash_rel_1 hash_rel_2 + -> Seq Scan on hash_rel_2 hash_rel_3 (4 rows) /* @@ -1350,7 +1350,7 @@ SELECT generate_series('2014-12-31', '2014-12-01', '-1 day'::interval); EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt = '2014-12-15'; QUERY PLAN -------------------------------------------------------------------------- - Seq Scan on range_rel_14 + Seq Scan on range_rel_14 range_rel Filter: (dt = 'Mon Dec 15 00:00:00 2014'::timestamp without time zone) (2 rows) @@ -1363,7 +1363,7 @@ SELECT * FROM test.range_rel WHERE dt = '2014-12-15'; EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt = '2015-03-15'; QUERY PLAN -------------------------------------------------------------------------- - Seq Scan on range_rel_8 + Seq Scan on range_rel_8 range_rel Filter: (dt = 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) (2 rows) @@ -1532,14 +1532,14 @@ SELECT create_hash_partitions('test.hash_rel', 'value', 3); (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE id = 1234; - QUERY PLAN ------------------------------------------------------- + QUERY PLAN +----------------------------------------------------------------- Append - -> Index Scan using hash_rel_0_pkey on hash_rel_0 + -> Index Scan using hash_rel_0_pkey on hash_rel_0 hash_rel_1 Index Cond: (id = 1234) - -> Index Scan using hash_rel_1_pkey on hash_rel_1 + -> Index Scan using hash_rel_1_pkey on hash_rel_1 hash_rel_2 Index Cond: (id = 1234) - -> Index Scan using hash_rel_2_pkey on hash_rel_2 + -> Index Scan using hash_rel_2_pkey on hash_rel_2 hash_rel_3 Index Cond: (id = 1234) (7 rows) @@ -1580,21 +1580,21 @@ SELECT prepend_range_partition('test.range_rel'); (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2010-03-01'; - QUERY PLAN --------------------------------- + QUERY PLAN +-------------------------------------------- Append - -> Seq Scan on range_rel_15 - -> Seq Scan on range_rel_1 - -> Seq Scan on range_rel_13 + -> Seq Scan on range_rel_15 range_rel_1 + -> Seq Scan on range_rel_1 range_rel_2 + -> Seq Scan on range_rel_13 range_rel_3 (4 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2010-12-15'; QUERY PLAN -------------------------------------------------------------------------------- Append - -> Seq Scan on range_rel_12 + -> Seq Scan on range_rel_12 range_rel_1 Filter: (dt > 'Wed Dec 15 00:00:00 2010'::timestamp without time zone) - -> Seq Scan on range_rel_14 + -> Seq Scan on range_rel_14 range_rel_2 (4 rows) /* Create range partitions from whole range */ @@ -1682,14 +1682,14 @@ SELECT set_enable_parent('test.special_case_1_ind_o_s', true); (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; - QUERY PLAN --------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------- Append -> Seq Scan on special_case_1_ind_o_s special_case_1_ind_o_s_1 Filter: ((val < 75) AND (comment = 'a'::text)) - -> Seq Scan on special_case_1_ind_o_s_1 special_case_1_ind_o_s_1_1 + -> Seq Scan on special_case_1_ind_o_s_1 special_case_1_ind_o_s_2 Filter: (comment = 'a'::text) - -> Index Only Scan using special_case_1_ind_o_s_2_val_comment_idx on special_case_1_ind_o_s_2 + -> Index Only Scan using special_case_1_ind_o_s_2_val_comment_idx on special_case_1_ind_o_s_2 special_case_1_ind_o_s_3 Index Cond: ((val < 75) AND (comment = 'a'::text)) (7 rows) @@ -1757,18 +1757,18 @@ SELECT set_enable_parent('test.index_on_childs', true); VACUUM ANALYZE test.index_on_childs; EXPLAIN (COSTS OFF) SELECT * FROM test.index_on_childs WHERE c1 > 100 AND c1 < 2500 AND c2 = 500; - QUERY PLAN ------------------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------------------------------ Append -> Index Scan using index_on_childs_c2_idx on index_on_childs index_on_childs_1 Index Cond: (c2 = 500) Filter: ((c1 > 100) AND (c1 < 2500)) - -> Index Scan using index_on_childs_1_1k_c2_idx on index_on_childs_1_1k + -> Index Scan using index_on_childs_1_1k_c2_idx on index_on_childs_1_1k index_on_childs_2 Index Cond: (c2 = 500) Filter: (c1 > 100) - -> Index Scan using index_on_childs_1k_2k_c2_idx on index_on_childs_1k_2k + -> Index Scan using index_on_childs_1k_2k_c2_idx on index_on_childs_1k_2k index_on_childs_3 Index Cond: (c2 = 500) - -> Index Scan using index_on_childs_2k_3k_c2_idx on index_on_childs_2k_3k + -> Index Scan using index_on_childs_2k_3k_c2_idx on index_on_childs_2k_3k index_on_childs_4 Index Cond: (c2 = 500) Filter: (c1 < 2500) (12 rows) diff --git a/expected/pathman_calamity_2.out b/expected/pathman_calamity_2.out index f647e788..5bb1053f 100644 --- a/expected/pathman_calamity_2.out +++ b/expected/pathman_calamity_2.out @@ -603,25 +603,25 @@ NOTICE: merging column "val" with inherited definition SELECT add_to_pathman_config('calamity.part_test', 'val'); ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ - QUERY PLAN ------------------------------ + QUERY PLAN +--------------------------------------- Append - -> Seq Scan on part_ok_0 - -> Seq Scan on part_ok_1 - -> Seq Scan on part_ok_2 - -> Seq Scan on part_ok_3 + -> Seq Scan on part_ok_0 part_ok_1 + -> Seq Scan on part_ok_1 part_ok_2 + -> Seq Scan on part_ok_2 part_ok_3 + -> Seq Scan on part_ok_3 part_ok_4 (5 rows) SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ - QUERY PLAN ------------------------------ + QUERY PLAN +--------------------------------------- Append - -> Seq Scan on part_ok_0 - -> Seq Scan on part_ok_1 - -> Seq Scan on part_ok_2 - -> Seq Scan on part_ok_3 + -> Seq Scan on part_ok_0 part_ok_1 + -> Seq Scan on part_ok_1 part_ok_2 + -> Seq Scan on part_ok_2 part_ok_3 + -> Seq Scan on part_ok_3 part_ok_4 (5 rows) ALTER TABLE calamity.wrong_partition @@ -630,13 +630,13 @@ CHECK (val = 1 OR val = 2); /* wrong constraint */ SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); ERROR: wrong constraint format for RANGE partition "wrong_partition" EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ - QUERY PLAN ------------------------------ + QUERY PLAN +--------------------------------------- Append - -> Seq Scan on part_ok_0 - -> Seq Scan on part_ok_1 - -> Seq Scan on part_ok_2 - -> Seq Scan on part_ok_3 + -> Seq Scan on part_ok_0 part_ok_1 + -> Seq Scan on part_ok_1 part_ok_2 + -> Seq Scan on part_ok_2 part_ok_3 + -> Seq Scan on part_ok_3 part_ok_4 (5 rows) ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_check; @@ -646,13 +646,13 @@ CHECK (val >= 10 AND val = 2); /* wrong constraint */ SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); ERROR: wrong constraint format for RANGE partition "wrong_partition" EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ - QUERY PLAN ------------------------------ + QUERY PLAN +--------------------------------------- Append - -> Seq Scan on part_ok_0 - -> Seq Scan on part_ok_1 - -> Seq Scan on part_ok_2 - -> Seq Scan on part_ok_3 + -> Seq Scan on part_ok_0 part_ok_1 + -> Seq Scan on part_ok_1 part_ok_2 + -> Seq Scan on part_ok_2 part_ok_3 + -> Seq Scan on part_ok_3 part_ok_4 (5 rows) ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_check; diff --git a/expected/pathman_calamity_3.out b/expected/pathman_calamity_3.out index f64a5f8b..bfb3b63c 100644 --- a/expected/pathman_calamity_3.out +++ b/expected/pathman_calamity_3.out @@ -607,25 +607,25 @@ NOTICE: merging column "val" with inherited definition SELECT add_to_pathman_config('calamity.part_test', 'val'); ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ - QUERY PLAN ------------------------------ + QUERY PLAN +--------------------------------------- Append - -> Seq Scan on part_ok_0 - -> Seq Scan on part_ok_1 - -> Seq Scan on part_ok_2 - -> Seq Scan on part_ok_3 + -> Seq Scan on part_ok_0 part_ok_1 + -> Seq Scan on part_ok_1 part_ok_2 + -> Seq Scan on part_ok_2 part_ok_3 + -> Seq Scan on part_ok_3 part_ok_4 (5 rows) SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ - QUERY PLAN ------------------------------ + QUERY PLAN +--------------------------------------- Append - -> Seq Scan on part_ok_0 - -> Seq Scan on part_ok_1 - -> Seq Scan on part_ok_2 - -> Seq Scan on part_ok_3 + -> Seq Scan on part_ok_0 part_ok_1 + -> Seq Scan on part_ok_1 part_ok_2 + -> Seq Scan on part_ok_2 part_ok_3 + -> Seq Scan on part_ok_3 part_ok_4 (5 rows) ALTER TABLE calamity.wrong_partition @@ -634,13 +634,13 @@ CHECK (val = 1 OR val = 2); /* wrong constraint */ SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); ERROR: wrong constraint format for RANGE partition "wrong_partition" EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ - QUERY PLAN ------------------------------ + QUERY PLAN +--------------------------------------- Append - -> Seq Scan on part_ok_0 - -> Seq Scan on part_ok_1 - -> Seq Scan on part_ok_2 - -> Seq Scan on part_ok_3 + -> Seq Scan on part_ok_0 part_ok_1 + -> Seq Scan on part_ok_1 part_ok_2 + -> Seq Scan on part_ok_2 part_ok_3 + -> Seq Scan on part_ok_3 part_ok_4 (5 rows) ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_check; @@ -650,13 +650,13 @@ CHECK (val >= 10 AND val = 2); /* wrong constraint */ SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); ERROR: wrong constraint format for RANGE partition "wrong_partition" EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ - QUERY PLAN ------------------------------ + QUERY PLAN +--------------------------------------- Append - -> Seq Scan on part_ok_0 - -> Seq Scan on part_ok_1 - -> Seq Scan on part_ok_2 - -> Seq Scan on part_ok_3 + -> Seq Scan on part_ok_0 part_ok_1 + -> Seq Scan on part_ok_1 part_ok_2 + -> Seq Scan on part_ok_2 part_ok_3 + -> Seq Scan on part_ok_3 part_ok_4 (5 rows) ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_check; diff --git a/expected/pathman_cte_2.out b/expected/pathman_cte_2.out index 6b64ad42..b9bf8730 100644 --- a/expected/pathman_cte_2.out +++ b/expected/pathman_cte_2.out @@ -29,8 +29,8 @@ SELECT * FROM ttt; QUERY PLAN -------------------------------------------------------------------------------- Append - -> Seq Scan on range_rel_2 - -> Seq Scan on range_rel_3 + -> Seq Scan on range_rel_2 range_rel_1 + -> Seq Scan on range_rel_3 range_rel_2 Filter: (dt < 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) (4 rows) @@ -52,9 +52,9 @@ SELECT create_hash_partitions('test_cte.hash_rel', 'value', 3); EXPLAIN (COSTS OFF) WITH ttt AS (SELECT * FROM test_cte.hash_rel WHERE value = 2) SELECT * FROM ttt; - QUERY PLAN ------------------------- - Seq Scan on hash_rel_1 + QUERY PLAN +--------------------------------- + Seq Scan on hash_rel_1 hash_rel Filter: (value = 2) (2 rows) diff --git a/expected/pathman_cte_3.out b/expected/pathman_cte_3.out new file mode 100644 index 00000000..a7f3acd0 --- /dev/null +++ b/expected/pathman_cte_3.out @@ -0,0 +1,266 @@ +/* + * Test simple CTE queries. + * Since 12 (608b167f9f), CTEs which are scanned once are no longer an + * optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_cte_1.out instead. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_cte; +CREATE TABLE test_cte.range_rel ( + id INT4, + dt TIMESTAMP NOT NULL, + txt TEXT); +INSERT INTO test_cte.range_rel (dt, txt) +SELECT g, md5(g::TEXT) +FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) AS g; +SELECT create_range_partitions('test_cte.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +/* perform a query */ +EXPLAIN (COSTS OFF) + WITH ttt AS (SELECT * FROM test_cte.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-15') +SELECT * FROM ttt; + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_2 range_rel_1 + -> Seq Scan on range_rel_3 range_rel_2 + Filter: (dt < 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) +(4 rows) + +DROP TABLE test_cte.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +CREATE TABLE test_cte.hash_rel ( + id INT4, + value INTEGER NOT NULL); +INSERT INTO test_cte.hash_rel VALUES (1, 1); +INSERT INTO test_cte.hash_rel VALUES (2, 2); +INSERT INTO test_cte.hash_rel VALUES (3, 3); +SELECT create_hash_partitions('test_cte.hash_rel', 'value', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +/* perform a query */ +EXPLAIN (COSTS OFF) + WITH ttt AS (SELECT * FROM test_cte.hash_rel WHERE value = 2) +SELECT * FROM ttt; + QUERY PLAN +--------------------------------- + Seq Scan on hash_rel_1 hash_rel + Filter: (value = 2) +(2 rows) + +DROP TABLE test_cte.hash_rel CASCADE; +NOTICE: drop cascades to 3 other objects +/* + * Test CTE query - by @parihaaraka (add varno to WalkerContext) + */ +CREATE TABLE test_cte.cte_del_xacts (id BIGSERIAL PRIMARY KEY, pdate DATE NOT NULL); +INSERT INTO test_cte.cte_del_xacts (pdate) +SELECT gen_date +FROM generate_series('2016-01-01'::date, '2016-04-9'::date, '1 day') AS gen_date; +CREATE TABLE test_cte.cte_del_xacts_specdata +( + tid BIGINT PRIMARY KEY, + test_mode SMALLINT, + state_code SMALLINT NOT NULL DEFAULT 8, + regtime TIMESTAMP WITHOUT TIME ZONE NOT NULL +); +INSERT INTO test_cte.cte_del_xacts_specdata VALUES (1, 1, 1, current_timestamp); /* for subquery test */ +/* create 2 partitions */ +SELECT create_range_partitions('test_cte.cte_del_xacts'::regclass, 'pdate', + '2016-01-01'::date, '50 days'::interval); + create_range_partitions +------------------------- + 2 +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------- + Delete on cte_del_xacts t + Delete on cte_del_xacts t + Delete on cte_del_xacts_1 t_1 + Delete on cte_del_xacts_2 t_2 + -> Hash Join + Hash Cond: ((cte_del_xacts_specdata.tid = t.id) AND ((cte_del_xacts_specdata.regtime)::date = t.pdate)) + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) + -> Hash + -> Seq Scan on cte_del_xacts t + -> Hash Join + Hash Cond: ((t_1.id = cte_del_xacts_specdata.tid) AND (t_1.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Seq Scan on cte_del_xacts_1 t_1 + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) + -> Hash Join + Hash Cond: ((t_2.id = cte_del_xacts_specdata.tid) AND (t_2.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Seq Scan on cte_del_xacts_2 t_2 + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) +(22 rows) + +SELECT drop_partitions('test_cte.cte_del_xacts'); /* now drop partitions */ +NOTICE: 50 rows copied from test_cte.cte_del_xacts_1 +NOTICE: 50 rows copied from test_cte.cte_del_xacts_2 + drop_partitions +----------------- + 2 +(1 row) + +/* create 1 partition */ +SELECT create_range_partitions('test_cte.cte_del_xacts'::regclass, 'pdate', + '2016-01-01'::date, '1 year'::interval); + create_range_partitions +------------------------- + 1 +(1 row) + +/* parent enabled! */ +SELECT set_enable_parent('test_cte.cte_del_xacts', true); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------- + Delete on cte_del_xacts t + Delete on cte_del_xacts t + Delete on cte_del_xacts_1 t_1 + -> Hash Join + Hash Cond: ((cte_del_xacts_specdata.tid = t.id) AND ((cte_del_xacts_specdata.regtime)::date = t.pdate)) + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) + -> Hash + -> Seq Scan on cte_del_xacts t + -> Hash Join + Hash Cond: ((t_1.id = cte_del_xacts_specdata.tid) AND (t_1.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Seq Scan on cte_del_xacts_1 t_1 + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) +(15 rows) + +/* parent disabled! */ +SELECT set_enable_parent('test_cte.cte_del_xacts', false); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Delete on cte_del_xacts_1 t + -> Hash Join + Hash Cond: ((t.id = cte_del_xacts_specdata.tid) AND (t.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Seq Scan on cte_del_xacts_1 t + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) +(7 rows) + +/* create stub pl/PgSQL function */ +CREATE OR REPLACE FUNCTION test_cte.cte_del_xacts_stab(name TEXT) +RETURNS smallint AS +$$ +begin + return 2::smallint; +end +$$ +LANGUAGE plpgsql STABLE; +/* test subquery planning */ +WITH tmp AS ( + SELECT tid FROM test_cte.cte_del_xacts_specdata + WHERE state_code != test_cte.cte_del_xacts_stab('test')) +SELECT * FROM test_cte.cte_del_xacts t JOIN tmp ON t.id = tmp.tid; + id | pdate | tid +----+------------+----- + 1 | 01-01-2016 | 1 +(1 row) + +/* test subquery planning (one more time) */ +WITH tmp AS ( + SELECT tid FROM test_cte.cte_del_xacts_specdata + WHERE state_code != test_cte.cte_del_xacts_stab('test')) +SELECT * FROM test_cte.cte_del_xacts t JOIN tmp ON t.id = tmp.tid; + id | pdate | tid +----+------------+----- + 1 | 01-01-2016 | 1 +(1 row) + +DROP FUNCTION test_cte.cte_del_xacts_stab(TEXT); +DROP TABLE test_cte.cte_del_xacts, test_cte.cte_del_xacts_specdata CASCADE; +NOTICE: drop cascades to 2 other objects +/* Test recursive CTE */ +CREATE TABLE test_cte.recursive_cte_test_tbl(id INT NOT NULL, name TEXT NOT NULL); +SELECT create_hash_partitions('test_cte.recursive_cte_test_tbl', 'id', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||id FROM generate_series(1,100) f(id); +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||(id + 1) FROM generate_series(1,100) f(id); +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||(id + 2) FROM generate_series(1,100) f(id); +SELECT * FROM test_cte.recursive_cte_test_tbl WHERE id = 5; + id | name +----+------- + 5 | name5 + 5 | name6 + 5 | name7 +(3 rows) + +WITH RECURSIVE test AS ( + SELECT min(name) AS name + FROM test_cte.recursive_cte_test_tbl + WHERE id = 5 + UNION ALL + SELECT (SELECT min(name) + FROM test_cte.recursive_cte_test_tbl + WHERE id = 5 AND name > test.name) + FROM test + WHERE name IS NOT NULL) +SELECT * FROM test; + name +------- + name5 + name6 + name7 + +(4 rows) + +DROP TABLE test_cte.recursive_cte_test_tbl CASCADE; +NOTICE: drop cascades to 2 other objects +DROP SCHEMA test_cte; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_domains_1.out b/expected/pathman_domains_1.out new file mode 100644 index 00000000..aaa0867f --- /dev/null +++ b/expected/pathman_domains_1.out @@ -0,0 +1,131 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA domains; +CREATE DOMAIN domains.dom_test AS numeric CHECK (value < 1200); +CREATE TABLE domains.dom_table(val domains.dom_test NOT NULL); +INSERT INTO domains.dom_table SELECT generate_series(1, 999); +SELECT create_range_partitions('domains.dom_table', 'val', 1, 100); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT * FROM domains.dom_table +WHERE val < 250; + QUERY PLAN +--------------------------------------------------- + Append + -> Seq Scan on dom_table_1 + -> Seq Scan on dom_table_2 + -> Seq Scan on dom_table_3 + Filter: ((val)::numeric < '250'::numeric) +(5 rows) + +INSERT INTO domains.dom_table VALUES(1500); +ERROR: value for domain domains.dom_test violates check constraint "dom_test_check" +INSERT INTO domains.dom_table VALUES(-10); +SELECT append_range_partition('domains.dom_table'); + append_range_partition +------------------------ + domains.dom_table_12 +(1 row) + +SELECT prepend_range_partition('domains.dom_table'); + prepend_range_partition +------------------------- + domains.dom_table_13 +(1 row) + +SELECT merge_range_partitions('domains.dom_table_1', 'domains.dom_table_2'); + merge_range_partitions +------------------------ + domains.dom_table_1 +(1 row) + +SELECT split_range_partition('domains.dom_table_1', 50); + split_range_partition +----------------------- + domains.dom_table_14 +(1 row) + +INSERT INTO domains.dom_table VALUES(1101); +EXPLAIN (COSTS OFF) +SELECT * FROM domains.dom_table +WHERE val < 450; + QUERY PLAN +--------------------------------------------------- + Append + -> Seq Scan on dom_table_13 dom_table_1 + -> Seq Scan on dom_table_11 dom_table_2 + -> Seq Scan on dom_table_1 dom_table_3 + -> Seq Scan on dom_table_14 dom_table_4 + -> Seq Scan on dom_table_3 dom_table_5 + -> Seq Scan on dom_table_4 dom_table_6 + -> Seq Scan on dom_table_5 dom_table_7 + Filter: ((val)::numeric < '450'::numeric) +(9 rows) + +SELECT * FROM pathman_partition_list +ORDER BY range_min::INT, range_max::INT; + parent | partition | parttype | expr | range_min | range_max +-------------------+----------------------+----------+------+-----------+----------- + domains.dom_table | domains.dom_table_13 | 2 | val | -199 | -99 + domains.dom_table | domains.dom_table_11 | 2 | val | -99 | 1 + domains.dom_table | domains.dom_table_1 | 2 | val | 1 | 50 + domains.dom_table | domains.dom_table_14 | 2 | val | 50 | 201 + domains.dom_table | domains.dom_table_3 | 2 | val | 201 | 301 + domains.dom_table | domains.dom_table_4 | 2 | val | 301 | 401 + domains.dom_table | domains.dom_table_5 | 2 | val | 401 | 501 + domains.dom_table | domains.dom_table_6 | 2 | val | 501 | 601 + domains.dom_table | domains.dom_table_7 | 2 | val | 601 | 701 + domains.dom_table | domains.dom_table_8 | 2 | val | 701 | 801 + domains.dom_table | domains.dom_table_9 | 2 | val | 801 | 901 + domains.dom_table | domains.dom_table_10 | 2 | val | 901 | 1001 + domains.dom_table | domains.dom_table_12 | 2 | val | 1001 | 1101 + domains.dom_table | domains.dom_table_15 | 2 | val | 1101 | 1201 +(14 rows) + +SELECT drop_partitions('domains.dom_table'); +NOTICE: 49 rows copied from domains.dom_table_1 +NOTICE: 100 rows copied from domains.dom_table_3 +NOTICE: 100 rows copied from domains.dom_table_4 +NOTICE: 100 rows copied from domains.dom_table_5 +NOTICE: 100 rows copied from domains.dom_table_6 +NOTICE: 100 rows copied from domains.dom_table_7 +NOTICE: 100 rows copied from domains.dom_table_8 +NOTICE: 100 rows copied from domains.dom_table_9 +NOTICE: 99 rows copied from domains.dom_table_10 +NOTICE: 1 rows copied from domains.dom_table_11 +NOTICE: 0 rows copied from domains.dom_table_12 +NOTICE: 0 rows copied from domains.dom_table_13 +NOTICE: 151 rows copied from domains.dom_table_14 +NOTICE: 1 rows copied from domains.dom_table_15 + drop_partitions +----------------- + 14 +(1 row) + +SELECT create_hash_partitions('domains.dom_table', 'val', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +SELECT * FROM pathman_partition_list +ORDER BY "partition"::TEXT; + parent | partition | parttype | expr | range_min | range_max +-------------------+---------------------+----------+------+-----------+----------- + domains.dom_table | domains.dom_table_0 | 1 | val | | + domains.dom_table | domains.dom_table_1 | 1 | val | | + domains.dom_table | domains.dom_table_2 | 1 | val | | + domains.dom_table | domains.dom_table_3 | 1 | val | | + domains.dom_table | domains.dom_table_4 | 1 | val | | +(5 rows) + +DROP TABLE domains.dom_table CASCADE; +NOTICE: drop cascades to 5 other objects +DROP DOMAIN domains.dom_test CASCADE; +DROP SCHEMA domains; +DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_expressions_3.out b/expected/pathman_expressions_3.out new file mode 100644 index 00000000..eacb1009 --- /dev/null +++ b/expected/pathman_expressions_3.out @@ -0,0 +1,436 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_expressions_2.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_exprs; +/* + * Test partitioning expression canonicalization process + */ +CREATE TABLE test_exprs.canon(c JSONB NOT NULL); +SELECT create_range_partitions('test_exprs.canon', '(C->>''key'')::int8', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT expr FROM pathman_config; /* check expression */ + expr +------------------------------- + ((c ->> 'key'::text))::bigint +(1 row) + +INSERT INTO test_exprs.canon VALUES ('{ "key": 2, "value": 0 }'); +SELECT *, tableoid::REGCLASS FROM test_exprs.canon; + c | tableoid +------------------------+-------------------- + {"key": 2, "value": 0} | test_exprs.canon_1 +(1 row) + +DROP TABLE test_exprs.canon CASCADE; +NOTICE: drop cascades to 3 other objects +CREATE TABLE test_exprs.canon(val TEXT NOT NULL); +CREATE SEQUENCE test_exprs.canon_seq; +SELECT add_to_pathman_config('test_exprs.canon', 'VAL collate "C"', NULL); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'a'::TEXT, 'b'); + add_range_partition +--------------------- + test_exprs.canon_1 +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'b'::TEXT, 'c'); + add_range_partition +--------------------- + test_exprs.canon_2 +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'c'::TEXT, 'd'); + add_range_partition +--------------------- + test_exprs.canon_3 +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'd'::TEXT, 'e'); + add_range_partition +--------------------- + test_exprs.canon_4 +(1 row) + +SELECT expr FROM pathman_config; /* check expression */ + expr +------------------- + (val COLLATE "C") +(1 row) + +INSERT INTO test_exprs.canon VALUES ('b'); +SELECT *, tableoid::REGCLASS FROM test_exprs.canon; + val | tableoid +-----+-------------------- + b | test_exprs.canon_2 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.canon WHERE val COLLATE "C" < ALL (array['b', 'c']); + QUERY PLAN +--------------------------- + Seq Scan on canon_1 canon +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.canon WHERE val COLLATE "POSIX" < ALL (array['b', 'c']); + QUERY PLAN +----------------------------------------------------------- + Append + -> Seq Scan on canon_1 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on canon_2 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on canon_3 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on canon_4 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") +(9 rows) + +DROP TABLE test_exprs.canon CASCADE; +NOTICE: drop cascades to 5 other objects +/* + * Test composite key. + */ +CREATE TABLE test_exprs.composite(a INT4 NOT NULL, b TEXT NOT NULL); +CREATE SEQUENCE test_exprs.composite_seq; +SELECT add_to_pathman_config('test_exprs.composite', + '(a, b)::test_exprs.composite', + NULL); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(1,a)'::test_exprs.composite, + '(10,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_1 +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(10,a)'::test_exprs.composite, + '(20,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_2 +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(20,a)'::test_exprs.composite, + '(30,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_3 +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(30,a)'::test_exprs.composite, + '(40,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_4 +(1 row) + +SELECT expr FROM pathman_config; /* check expression */ + expr +--------------------------------- + ROW(a, b)::test_exprs.composite +(1 row) + +INSERT INTO test_exprs.composite VALUES(2, 'a'); +INSERT INTO test_exprs.composite VALUES(11, 'a'); +INSERT INTO test_exprs.composite VALUES(2, 'b'); +INSERT INTO test_exprs.composite VALUES(50, 'b'); +ERROR: cannot spawn new partition for key '(50,b)' +SELECT *, tableoid::REGCLASS FROM test_exprs.composite; + a | b | tableoid +----+---+------------------------ + 2 | a | test_exprs.composite_1 + 2 | b | test_exprs.composite_1 + 11 | a | test_exprs.composite_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b)::test_exprs.composite < (21, 0)::test_exprs.composite; + QUERY PLAN +------------------------------------------------------------------------------------ + Append + -> Seq Scan on composite_1 + -> Seq Scan on composite_2 + -> Seq Scan on composite_3 + Filter: (ROW(a, b)::test_exprs.composite < '(21,0)'::test_exprs.composite) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b) < (21, 0)::test_exprs.composite; + QUERY PLAN +-------------------------------------------------------------- + Append + -> Seq Scan on composite_1 + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) + -> Seq Scan on composite_2 + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) + -> Seq Scan on composite_3 + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) + -> Seq Scan on composite_4 + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b)::test_exprs.composite < (21, 0); + QUERY PLAN +---------------------------------------------------------------------- + Append + -> Seq Scan on composite_1 + -> Seq Scan on composite_2 + -> Seq Scan on composite_3 + Filter: (ROW(a, b)::test_exprs.composite < '(21,0)'::record) +(5 rows) + +DROP TABLE test_exprs.composite CASCADE; +NOTICE: drop cascades to 5 other objects +/* We use this rel to check 'pathman_hooks_enabled' */ +CREATE TABLE test_exprs.canary(val INT4 NOT NULL); +CREATE TABLE test_exprs.canary_copy (LIKE test_exprs.canary); +SELECT create_hash_partitions('test_exprs.canary', 'val', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +/* + * Test HASH + */ +CREATE TABLE test_exprs.hash_rel ( + id SERIAL PRIMARY KEY, + value INTEGER NOT NULL, + value2 INTEGER NOT NULL +); +INSERT INTO test_exprs.hash_rel (value, value2) + SELECT val, val * 2 FROM generate_series(1, 5) val; +SELECT COUNT(*) FROM test_exprs.hash_rel; + count +------- + 5 +(1 row) + +\set VERBOSITY default +/* Try using constant expression */ +SELECT create_hash_partitions('test_exprs.hash_rel', '1 + 1', 4); +ERROR: failed to analyze partitioning expression "1 + 1" +DETAIL: partitioning expression should reference table "hash_rel" +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using system attributes */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'xmin', 4); +ERROR: failed to analyze partitioning expression "xmin" +DETAIL: system attributes are not supported +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using subqueries */ +SELECT create_hash_partitions('test_exprs.hash_rel', + 'value, (select oid from pg_class limit 1)', + 4); +ERROR: failed to analyze partitioning expression "value, (select oid from pg_class limit 1)" +DETAIL: subqueries are not allowed in partitioning expression +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using mutable expression */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'random()', 4); +ERROR: failed to analyze partitioning expression "random()" +DETAIL: functions in partitioning expression must be marked IMMUTABLE +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using broken parentheses */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2))', 4); +ERROR: failed to parse partitioning expression "value * value2))" +DETAIL: syntax error at or near ")" +QUERY: SELECT public.validate_expression(parent_relid, expression) +CONTEXT: PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using missing columns */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value3', 4); +ERROR: failed to analyze partitioning expression "value * value3" +DETAIL: column "value3" does not exist +HINT: Perhaps you meant to reference the column "hash_rel.value" or the column "hash_rel.value2". +QUERY: SELECT public.validate_expression(parent_relid, expression) +CONTEXT: PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ +EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy +SELECT * FROM test_exprs.canary WHERE val = 1; + QUERY PLAN +----------------------------------- + Insert on canary_copy + -> Seq Scan on canary_0 canary + Filter: (val = 1) +(3 rows) + +\set VERBOSITY terse +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +SELECT COUNT(*) FROM ONLY test_exprs.hash_rel; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM test_exprs.hash_rel; + count +------- + 5 +(1 row) + +INSERT INTO test_exprs.hash_rel (value, value2) + SELECT val, val * 2 FROM generate_series(6, 10) val; +SELECT COUNT(*) FROM ONLY test_exprs.hash_rel; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM test_exprs.hash_rel; + count +------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.hash_rel WHERE value = 5; + QUERY PLAN +----------------------------------------- + Append + -> Seq Scan on hash_rel_0 hash_rel_1 + Filter: (value = 5) + -> Seq Scan on hash_rel_1 hash_rel_2 + Filter: (value = 5) + -> Seq Scan on hash_rel_2 hash_rel_3 + Filter: (value = 5) + -> Seq Scan on hash_rel_3 hash_rel_4 + Filter: (value = 5) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.hash_rel WHERE (value * value2) = 5; + QUERY PLAN +---------------------------------- + Seq Scan on hash_rel_0 hash_rel + Filter: ((value * value2) = 5) +(2 rows) + +/* + * Test RANGE + */ +CREATE TABLE test_exprs.range_rel (id SERIAL PRIMARY KEY, dt TIMESTAMP NOT NULL, txt TEXT); +INSERT INTO test_exprs.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2020-04-30', '1 month'::interval) as g; +\set VERBOSITY default +/* Try using constant expression */ +SELECT create_range_partitions('test_exprs.range_rel', '''16 years''::interval', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); +ERROR: failed to analyze partitioning expression "'16 years'::interval" +DETAIL: partitioning expression should reference table "range_rel" +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM +/* Try using mutable expression */ +SELECT create_range_partitions('test_exprs.range_rel', 'RANDOM()', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); +ERROR: failed to analyze partitioning expression "RANDOM()" +DETAIL: functions in partitioning expression must be marked IMMUTABLE +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM +/* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ +EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy +SELECT * FROM test_exprs.canary WHERE val = 1; + QUERY PLAN +----------------------------------- + Insert on canary_copy + -> Seq Scan on canary_0 canary + Filter: (val = 1) +(3 rows) + +\set VERBOSITY terse +SELECT create_range_partitions('test_exprs.range_rel', 'AGE(dt, ''2000-01-01''::DATE)', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +INSERT INTO test_exprs.range_rel_1 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); +ERROR: new row for relation "range_rel_1" violates check constraint "pathman_range_rel_1_check" +SELECT COUNT(*) FROM test_exprs.range_rel_6; + count +------- + 4 +(1 row) + +INSERT INTO test_exprs.range_rel_6 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); +SELECT COUNT(*) FROM test_exprs.range_rel_6; + count +------- + 5 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.range_rel WHERE (AGE(dt, '2000-01-01'::DATE)) = '18 years'::interval; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Seq Scan on range_rel_4 range_rel + Filter: (age(dt, 'Sat Jan 01 00:00:00 2000'::timestamp without time zone) = '@ 18 years'::interval) +(2 rows) + +DROP TABLE test_exprs.canary CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test_exprs.canary_copy CASCADE; +DROP TABLE test_exprs.range_rel CASCADE; +NOTICE: drop cascades to 11 other objects +DROP TABLE test_exprs.hash_rel CASCADE; +NOTICE: drop cascades to 4 other objects +DROP SCHEMA test_exprs; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_gaps_2.out b/expected/pathman_gaps_2.out new file mode 100644 index 00000000..b229be66 --- /dev/null +++ b/expected/pathman_gaps_2.out @@ -0,0 +1,819 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA gaps; +CREATE TABLE gaps.test_1(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_1', 'val', 1, 10, 3); + create_range_partitions +------------------------- + 3 +(1 row) + +DROP TABLE gaps.test_1_2; +CREATE TABLE gaps.test_2(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_2', 'val', 1, 10, 5); + create_range_partitions +------------------------- + 5 +(1 row) + +DROP TABLE gaps.test_2_3; +CREATE TABLE gaps.test_3(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_3', 'val', 1, 10, 8); + create_range_partitions +------------------------- + 8 +(1 row) + +DROP TABLE gaps.test_3_4; +CREATE TABLE gaps.test_4(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_4', 'val', 1, 10, 11); + create_range_partitions +------------------------- + 11 +(1 row) + +DROP TABLE gaps.test_4_4; +DROP TABLE gaps.test_4_5; +/* Check existing partitions */ +SELECT * FROM pathman_partition_list ORDER BY parent, partition; + parent | partition | parttype | expr | range_min | range_max +-------------+----------------+----------+------+-----------+----------- + gaps.test_1 | gaps.test_1_1 | 2 | val | 1 | 11 + gaps.test_1 | gaps.test_1_3 | 2 | val | 21 | 31 + gaps.test_2 | gaps.test_2_1 | 2 | val | 1 | 11 + gaps.test_2 | gaps.test_2_2 | 2 | val | 11 | 21 + gaps.test_2 | gaps.test_2_4 | 2 | val | 31 | 41 + gaps.test_2 | gaps.test_2_5 | 2 | val | 41 | 51 + gaps.test_3 | gaps.test_3_1 | 2 | val | 1 | 11 + gaps.test_3 | gaps.test_3_2 | 2 | val | 11 | 21 + gaps.test_3 | gaps.test_3_3 | 2 | val | 21 | 31 + gaps.test_3 | gaps.test_3_5 | 2 | val | 41 | 51 + gaps.test_3 | gaps.test_3_6 | 2 | val | 51 | 61 + gaps.test_3 | gaps.test_3_7 | 2 | val | 61 | 71 + gaps.test_3 | gaps.test_3_8 | 2 | val | 71 | 81 + gaps.test_4 | gaps.test_4_1 | 2 | val | 1 | 11 + gaps.test_4 | gaps.test_4_2 | 2 | val | 11 | 21 + gaps.test_4 | gaps.test_4_3 | 2 | val | 21 | 31 + gaps.test_4 | gaps.test_4_6 | 2 | val | 51 | 61 + gaps.test_4 | gaps.test_4_7 | 2 | val | 61 | 71 + gaps.test_4 | gaps.test_4_8 | 2 | val | 71 | 81 + gaps.test_4 | gaps.test_4_9 | 2 | val | 81 | 91 + gaps.test_4 | gaps.test_4_10 | 2 | val | 91 | 101 + gaps.test_4 | gaps.test_4_11 | 2 | val | 101 | 111 +(22 rows) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 11; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 16; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 21; + QUERY PLAN +----------------------------- + Seq Scan on test_1_3 test_1 + Filter: (val = 21) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 11; + QUERY PLAN +----------------------------- + Seq Scan on test_1_1 test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 11; + QUERY PLAN +----------------------------- + Seq Scan on test_1_1 test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 16; + QUERY PLAN +----------------------------- + Seq Scan on test_1_1 test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 16; + QUERY PLAN +----------------------------- + Seq Scan on test_1_1 test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 21; + QUERY PLAN +----------------------------- + Seq Scan on test_1_1 test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 21; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_1_1 + -> Seq Scan on test_1_3 test_1_2 + Filter: (val <= 21) +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 11; + QUERY PLAN +----------------------------- + Seq Scan on test_1_3 test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 11; + QUERY PLAN +----------------------------- + Seq Scan on test_1_3 test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 16; + QUERY PLAN +----------------------------- + Seq Scan on test_1_3 test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 16; + QUERY PLAN +----------------------------- + Seq Scan on test_1_3 test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 21; + QUERY PLAN +----------------------------- + Seq Scan on test_1_3 test_1 + Filter: (val > 21) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 21; + QUERY PLAN +----------------------------- + Seq Scan on test_1_3 test_1 +(1 row) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 21; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 26; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 31; + QUERY PLAN +----------------------------- + Seq Scan on test_2_4 test_2 + Filter: (val = 31) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 26; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 26; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 31; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 test_2_3 + Filter: (val <= 31) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 41; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 test_2_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 41; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 test_2_3 + -> Seq Scan on test_2_5 test_2_4 + Filter: (val <= 41) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 11; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_2 test_2_1 + Filter: (val > 11) + -> Seq Scan on test_2_4 test_2_2 + -> Seq Scan on test_2_5 test_2_3 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 11; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_2 test_2_1 + -> Seq Scan on test_2_4 test_2_2 + -> Seq Scan on test_2_5 test_2_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 21; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_4 test_2_1 + -> Seq Scan on test_2_5 test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 21; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_4 test_2_1 + -> Seq Scan on test_2_5 test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 26; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_4 test_2_1 + -> Seq Scan on test_2_5 test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 26; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_4 test_2_1 + -> Seq Scan on test_2_5 test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 31; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_4 test_2_1 + Filter: (val > 31) + -> Seq Scan on test_2_5 test_2_2 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 31; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_4 test_2_1 + -> Seq Scan on test_2_5 test_2_2 +(3 rows) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 31; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 36; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 41; + QUERY PLAN +----------------------------- + Seq Scan on test_3_5 test_3 + Filter: (val = 41) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 41; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 test_3_4 + Filter: (val <= 41) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 51; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 test_3_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 51; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 test_3_4 + -> Seq Scan on test_3_6 test_3_5 + Filter: (val <= 51) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 21; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_3 test_3_1 + Filter: (val > 21) + -> Seq Scan on test_3_5 test_3_2 + -> Seq Scan on test_3_6 test_3_3 + -> Seq Scan on test_3_7 test_3_4 + -> Seq Scan on test_3_8 test_3_5 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 21; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_3 test_3_1 + -> Seq Scan on test_3_5 test_3_2 + -> Seq Scan on test_3_6 test_3_3 + -> Seq Scan on test_3_7 test_3_4 + -> Seq Scan on test_3_8 test_3_5 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 31; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_5 test_3_1 + -> Seq Scan on test_3_6 test_3_2 + -> Seq Scan on test_3_7 test_3_3 + -> Seq Scan on test_3_8 test_3_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 31; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_5 test_3_1 + -> Seq Scan on test_3_6 test_3_2 + -> Seq Scan on test_3_7 test_3_3 + -> Seq Scan on test_3_8 test_3_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 36; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_5 test_3_1 + -> Seq Scan on test_3_6 test_3_2 + -> Seq Scan on test_3_7 test_3_3 + -> Seq Scan on test_3_8 test_3_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 36; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_5 test_3_1 + -> Seq Scan on test_3_6 test_3_2 + -> Seq Scan on test_3_7 test_3_3 + -> Seq Scan on test_3_8 test_3_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 41; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_5 test_3_1 + Filter: (val > 41) + -> Seq Scan on test_3_6 test_3_2 + -> Seq Scan on test_3_7 test_3_3 + -> Seq Scan on test_3_8 test_3_4 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 41; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_5 test_3_1 + -> Seq Scan on test_3_6 test_3_2 + -> Seq Scan on test_3_7 test_3_3 + -> Seq Scan on test_3_8 test_3_4 +(5 rows) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 31; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 36; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 41; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 46; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 51; + QUERY PLAN +----------------------------- + Seq Scan on test_4_6 test_4 + Filter: (val = 51) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 46; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 46; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 51; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 51; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 test_4_4 + Filter: (val <= 51) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 61; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 test_4_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 61; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 test_4_4 + -> Seq Scan on test_4_7 test_4_5 + Filter: (val <= 61) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 21; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_3 test_4_1 + Filter: (val > 21) + -> Seq Scan on test_4_6 test_4_2 + -> Seq Scan on test_4_7 test_4_3 + -> Seq Scan on test_4_8 test_4_4 + -> Seq Scan on test_4_9 test_4_5 + -> Seq Scan on test_4_10 test_4_6 + -> Seq Scan on test_4_11 test_4_7 +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 21; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_3 test_4_1 + -> Seq Scan on test_4_6 test_4_2 + -> Seq Scan on test_4_7 test_4_3 + -> Seq Scan on test_4_8 test_4_4 + -> Seq Scan on test_4_9 test_4_5 + -> Seq Scan on test_4_10 test_4_6 + -> Seq Scan on test_4_11 test_4_7 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 31; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_6 test_4_1 + -> Seq Scan on test_4_7 test_4_2 + -> Seq Scan on test_4_8 test_4_3 + -> Seq Scan on test_4_9 test_4_4 + -> Seq Scan on test_4_10 test_4_5 + -> Seq Scan on test_4_11 test_4_6 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 31; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_6 test_4_1 + -> Seq Scan on test_4_7 test_4_2 + -> Seq Scan on test_4_8 test_4_3 + -> Seq Scan on test_4_9 test_4_4 + -> Seq Scan on test_4_10 test_4_5 + -> Seq Scan on test_4_11 test_4_6 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 36; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_6 test_4_1 + -> Seq Scan on test_4_7 test_4_2 + -> Seq Scan on test_4_8 test_4_3 + -> Seq Scan on test_4_9 test_4_4 + -> Seq Scan on test_4_10 test_4_5 + -> Seq Scan on test_4_11 test_4_6 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 36; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_6 test_4_1 + -> Seq Scan on test_4_7 test_4_2 + -> Seq Scan on test_4_8 test_4_3 + -> Seq Scan on test_4_9 test_4_4 + -> Seq Scan on test_4_10 test_4_5 + -> Seq Scan on test_4_11 test_4_6 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 41; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_6 test_4_1 + -> Seq Scan on test_4_7 test_4_2 + -> Seq Scan on test_4_8 test_4_3 + -> Seq Scan on test_4_9 test_4_4 + -> Seq Scan on test_4_10 test_4_5 + -> Seq Scan on test_4_11 test_4_6 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 41; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_6 test_4_1 + -> Seq Scan on test_4_7 test_4_2 + -> Seq Scan on test_4_8 test_4_3 + -> Seq Scan on test_4_9 test_4_4 + -> Seq Scan on test_4_10 test_4_5 + -> Seq Scan on test_4_11 test_4_6 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 46; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_6 test_4_1 + -> Seq Scan on test_4_7 test_4_2 + -> Seq Scan on test_4_8 test_4_3 + -> Seq Scan on test_4_9 test_4_4 + -> Seq Scan on test_4_10 test_4_5 + -> Seq Scan on test_4_11 test_4_6 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 46; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_6 test_4_1 + -> Seq Scan on test_4_7 test_4_2 + -> Seq Scan on test_4_8 test_4_3 + -> Seq Scan on test_4_9 test_4_4 + -> Seq Scan on test_4_10 test_4_5 + -> Seq Scan on test_4_11 test_4_6 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 51; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_6 test_4_1 + Filter: (val > 51) + -> Seq Scan on test_4_7 test_4_2 + -> Seq Scan on test_4_8 test_4_3 + -> Seq Scan on test_4_9 test_4_4 + -> Seq Scan on test_4_10 test_4_5 + -> Seq Scan on test_4_11 test_4_6 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 51; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_6 test_4_1 + -> Seq Scan on test_4_7 test_4_2 + -> Seq Scan on test_4_8 test_4_3 + -> Seq Scan on test_4_9 test_4_4 + -> Seq Scan on test_4_10 test_4_5 + -> Seq Scan on test_4_11 test_4_6 +(7 rows) + +DROP TABLE gaps.test_1 CASCADE; +NOTICE: drop cascades to 3 other objects +DROP TABLE gaps.test_2 CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE gaps.test_3 CASCADE; +NOTICE: drop cascades to 8 other objects +DROP TABLE gaps.test_4 CASCADE; +NOTICE: drop cascades to 10 other objects +DROP SCHEMA gaps; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_hashjoin_4.out b/expected/pathman_hashjoin_4.out index ef8dfc29..e827628f 100644 --- a/expected/pathman_hashjoin_4.out +++ b/expected/pathman_hashjoin_4.out @@ -54,11 +54,11 @@ WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; QUERY PLAN --------------------------------------------------------------------------------------- Sort - Sort Key: j2_1.dt + Sort Key: j2.dt -> Hash Join - Hash Cond: (j1_1.id = j2_1.id) + Hash Cond: (j1.id = j2.id) -> Hash Join - Hash Cond: (j3_1.id = j1_1.id) + Hash Cond: (j3.id = j1.id) -> Append -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3_1 -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_2 diff --git a/expected/pathman_hashjoin_5.out b/expected/pathman_hashjoin_5.out index a8f3b6e7..c66a9306 100644 --- a/expected/pathman_hashjoin_5.out +++ b/expected/pathman_hashjoin_5.out @@ -56,7 +56,7 @@ WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; Sort Sort Key: j2.dt -> Hash Join - Hash Cond: (j3_1.id = j2.id) + Hash Cond: (j3.id = j2.id) -> Append -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3_1 -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_2 diff --git a/expected/pathman_inserts_2.out b/expected/pathman_inserts_2.out index 91f05753..3c31fc53 100644 --- a/expected/pathman_inserts_2.out +++ b/expected/pathman_inserts_2.out @@ -902,124 +902,124 @@ FROM generate_series(1, 10) i; EXPLAIN (VERBOSE, COSTS OFF) INSERT INTO test_inserts.storage (b, d, e) SELECT b, d, e FROM test_inserts.storage; - QUERY PLAN ----------------------------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------------------------- Insert on test_inserts.storage -> Custom Scan (PartitionFilter) Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e -> Result - Output: NULL::integer, storage_11.b, NULL::integer, storage_11.d, storage_11.e + Output: NULL::integer, storage_1.b, NULL::integer, storage_1.d, storage_1.e -> Append - -> Seq Scan on test_inserts.storage_11 - Output: storage_11.b, storage_11.d, storage_11.e - -> Seq Scan on test_inserts.storage_1 storage_1_1 - Output: storage_1_1.b, storage_1_1.d, storage_1_1.e - -> Seq Scan on test_inserts.storage_2 + -> Seq Scan on test_inserts.storage_11 storage_2 Output: storage_2.b, storage_2.d, storage_2.e - -> Seq Scan on test_inserts.storage_3 + -> Seq Scan on test_inserts.storage_1 storage_3 Output: storage_3.b, storage_3.d, storage_3.e - -> Seq Scan on test_inserts.storage_4 + -> Seq Scan on test_inserts.storage_2 storage_4 Output: storage_4.b, storage_4.d, storage_4.e - -> Seq Scan on test_inserts.storage_5 + -> Seq Scan on test_inserts.storage_3 storage_5 Output: storage_5.b, storage_5.d, storage_5.e - -> Seq Scan on test_inserts.storage_6 + -> Seq Scan on test_inserts.storage_4 storage_6 Output: storage_6.b, storage_6.d, storage_6.e - -> Seq Scan on test_inserts.storage_7 + -> Seq Scan on test_inserts.storage_5 storage_7 Output: storage_7.b, storage_7.d, storage_7.e - -> Seq Scan on test_inserts.storage_8 + -> Seq Scan on test_inserts.storage_6 storage_8 Output: storage_8.b, storage_8.d, storage_8.e - -> Seq Scan on test_inserts.storage_9 + -> Seq Scan on test_inserts.storage_7 storage_9 Output: storage_9.b, storage_9.d, storage_9.e - -> Seq Scan on test_inserts.storage_10 + -> Seq Scan on test_inserts.storage_8 storage_10 Output: storage_10.b, storage_10.d, storage_10.e - -> Seq Scan on test_inserts.storage_12 + -> Seq Scan on test_inserts.storage_9 storage_11 + Output: storage_11.b, storage_11.d, storage_11.e + -> Seq Scan on test_inserts.storage_10 storage_12 Output: storage_12.b, storage_12.d, storage_12.e - -> Seq Scan on test_inserts.storage_13 + -> Seq Scan on test_inserts.storage_12 storage_13 Output: storage_13.b, storage_13.d, storage_13.e - -> Seq Scan on test_inserts.storage_14 + -> Seq Scan on test_inserts.storage_13 storage_14 Output: storage_14.b, storage_14.d, storage_14.e + -> Seq Scan on test_inserts.storage_14 storage_15 + Output: storage_15.b, storage_15.d, storage_15.e (34 rows) EXPLAIN (VERBOSE, COSTS OFF) INSERT INTO test_inserts.storage (b, d) SELECT b, d FROM test_inserts.storage; - QUERY PLAN ----------------------------------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------------------- Insert on test_inserts.storage -> Custom Scan (PartitionFilter) Output: NULL::integer, storage.b, NULL::integer, storage.d, NULL::bigint -> Result - Output: NULL::integer, storage_11.b, NULL::integer, storage_11.d, NULL::bigint + Output: NULL::integer, storage_1.b, NULL::integer, storage_1.d, NULL::bigint -> Append - -> Seq Scan on test_inserts.storage_11 - Output: storage_11.b, storage_11.d - -> Seq Scan on test_inserts.storage_1 storage_1_1 - Output: storage_1_1.b, storage_1_1.d - -> Seq Scan on test_inserts.storage_2 + -> Seq Scan on test_inserts.storage_11 storage_2 Output: storage_2.b, storage_2.d - -> Seq Scan on test_inserts.storage_3 + -> Seq Scan on test_inserts.storage_1 storage_3 Output: storage_3.b, storage_3.d - -> Seq Scan on test_inserts.storage_4 + -> Seq Scan on test_inserts.storage_2 storage_4 Output: storage_4.b, storage_4.d - -> Seq Scan on test_inserts.storage_5 + -> Seq Scan on test_inserts.storage_3 storage_5 Output: storage_5.b, storage_5.d - -> Seq Scan on test_inserts.storage_6 + -> Seq Scan on test_inserts.storage_4 storage_6 Output: storage_6.b, storage_6.d - -> Seq Scan on test_inserts.storage_7 + -> Seq Scan on test_inserts.storage_5 storage_7 Output: storage_7.b, storage_7.d - -> Seq Scan on test_inserts.storage_8 + -> Seq Scan on test_inserts.storage_6 storage_8 Output: storage_8.b, storage_8.d - -> Seq Scan on test_inserts.storage_9 + -> Seq Scan on test_inserts.storage_7 storage_9 Output: storage_9.b, storage_9.d - -> Seq Scan on test_inserts.storage_10 + -> Seq Scan on test_inserts.storage_8 storage_10 Output: storage_10.b, storage_10.d - -> Seq Scan on test_inserts.storage_12 + -> Seq Scan on test_inserts.storage_9 storage_11 + Output: storage_11.b, storage_11.d + -> Seq Scan on test_inserts.storage_10 storage_12 Output: storage_12.b, storage_12.d - -> Seq Scan on test_inserts.storage_13 + -> Seq Scan on test_inserts.storage_12 storage_13 Output: storage_13.b, storage_13.d - -> Seq Scan on test_inserts.storage_14 + -> Seq Scan on test_inserts.storage_13 storage_14 Output: storage_14.b, storage_14.d + -> Seq Scan on test_inserts.storage_14 storage_15 + Output: storage_15.b, storage_15.d (34 rows) EXPLAIN (VERBOSE, COSTS OFF) INSERT INTO test_inserts.storage (b) SELECT b FROM test_inserts.storage; - QUERY PLAN --------------------------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------------------------- Insert on test_inserts.storage -> Custom Scan (PartitionFilter) Output: NULL::integer, storage.b, NULL::integer, NULL::text, NULL::bigint -> Result - Output: NULL::integer, storage_11.b, NULL::integer, NULL::text, NULL::bigint + Output: NULL::integer, storage_1.b, NULL::integer, NULL::text, NULL::bigint -> Append - -> Seq Scan on test_inserts.storage_11 - Output: storage_11.b - -> Seq Scan on test_inserts.storage_1 storage_1_1 - Output: storage_1_1.b - -> Seq Scan on test_inserts.storage_2 + -> Seq Scan on test_inserts.storage_11 storage_2 Output: storage_2.b - -> Seq Scan on test_inserts.storage_3 + -> Seq Scan on test_inserts.storage_1 storage_3 Output: storage_3.b - -> Seq Scan on test_inserts.storage_4 + -> Seq Scan on test_inserts.storage_2 storage_4 Output: storage_4.b - -> Seq Scan on test_inserts.storage_5 + -> Seq Scan on test_inserts.storage_3 storage_5 Output: storage_5.b - -> Seq Scan on test_inserts.storage_6 + -> Seq Scan on test_inserts.storage_4 storage_6 Output: storage_6.b - -> Seq Scan on test_inserts.storage_7 + -> Seq Scan on test_inserts.storage_5 storage_7 Output: storage_7.b - -> Seq Scan on test_inserts.storage_8 + -> Seq Scan on test_inserts.storage_6 storage_8 Output: storage_8.b - -> Seq Scan on test_inserts.storage_9 + -> Seq Scan on test_inserts.storage_7 storage_9 Output: storage_9.b - -> Seq Scan on test_inserts.storage_10 + -> Seq Scan on test_inserts.storage_8 storage_10 Output: storage_10.b - -> Seq Scan on test_inserts.storage_12 + -> Seq Scan on test_inserts.storage_9 storage_11 + Output: storage_11.b + -> Seq Scan on test_inserts.storage_10 storage_12 Output: storage_12.b - -> Seq Scan on test_inserts.storage_13 + -> Seq Scan on test_inserts.storage_12 storage_13 Output: storage_13.b - -> Seq Scan on test_inserts.storage_14 + -> Seq Scan on test_inserts.storage_13 storage_14 Output: storage_14.b + -> Seq Scan on test_inserts.storage_14 storage_15 + Output: storage_15.b (34 rows) /* test gap case (missing partition in between) */ diff --git a/expected/pathman_join_clause_2.out b/expected/pathman_join_clause_2.out index a1fae839..df2ea0a5 100644 --- a/expected/pathman_join_clause_2.out +++ b/expected/pathman_join_clause_2.out @@ -132,10 +132,10 @@ WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); QUERY PLAN ---------------------------------------------------------------------- Nested Loop Left Join - Join Filter: (child_1.parent_id = parent.id) + Join Filter: (child.parent_id = parent.id) -> Seq Scan on parent Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) - -> Seq Scan on child_1 + -> Seq Scan on child_1 child Filter: (owner_id = 3) (6 rows) diff --git a/expected/pathman_join_clause_3.out b/expected/pathman_join_clause_3.out new file mode 100644 index 00000000..80b8de4c --- /dev/null +++ b/expected/pathman_join_clause_3.out @@ -0,0 +1,182 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +/* + * Test push down a join clause into child nodes of append + */ +/* create test tables */ +CREATE TABLE test.fk ( + id1 INT NOT NULL, + id2 INT NOT NULL, + start_key INT, + end_key INT, + PRIMARY KEY (id1, id2)); +CREATE TABLE test.mytbl ( + id1 INT NOT NULL, + id2 INT NOT NULL, + key INT NOT NULL, + CONSTRAINT fk_fk FOREIGN KEY (id1, id2) REFERENCES test.fk(id1, id2), + PRIMARY KEY (id1, key)); +SELECT pathman.create_hash_partitions('test.mytbl', 'id1', 8); + create_hash_partitions +------------------------ + 8 +(1 row) + +/* ...fill out with test data */ +INSERT INTO test.fk VALUES (1, 1); +INSERT INTO test.mytbl VALUES (1, 1, 5), (1, 1, 6); +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; +/* run test queries */ +EXPLAIN (COSTS OFF) /* test plan */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Nested Loop + -> Seq Scan on fk + -> Custom Scan (RuntimeAppend) + Prune by: (fk.id1 = m.id1) + -> Bitmap Heap Scan on mytbl_0 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_0_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_1 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_1_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_2 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_2_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_3 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_3_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_4 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_4_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_5 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_5_pkey + Index Cond: (id1 = fk.id1) + -> Seq Scan on mytbl_6 m + Filter: ((fk.id1 = id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Heap Scan on mytbl_7 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_7_pkey + Index Cond: (id1 = fk.id1) +(41 rows) + +/* test joint data */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + tableoid | id1 | id2 | key | start_key | end_key +--------------+-----+-----+-----+-----------+--------- + test.mytbl_6 | 1 | 1 | 5 | | +(1 row) + +/* + * Test case by @dimarick + */ +CREATE TABLE test.parent ( + id SERIAL NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child_nopart ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +INSERT INTO test.parent (owner_id) VALUES (1), (2), (3), (3); +INSERT INTO test.child (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +INSERT INTO test.child_nopart (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +SELECT pathman.create_hash_partitions('test.child', 'owner_id', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; +/* Query #1 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +----------------------------------------------------------------------------------------------------- + Nested Loop Left Join + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Custom Scan (RuntimeAppend) + Prune by: ((child.owner_id = 3) AND (child.owner_id = parent.owner_id)) + -> Seq Scan on child_1 child + Filter: ((owner_id = 3) AND (owner_id = parent.owner_id) AND (parent_id = parent.id)) +(7 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + +/* Query #2 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +---------------------------------------------------------------------- + Nested Loop Left Join + Join Filter: (child.parent_id = parent.id) + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Seq Scan on child_1 child + Filter: (owner_id = 3) +(6 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + +DROP TABLE test.child CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE test.child_nopart CASCADE; +DROP TABLE test.mytbl CASCADE; +NOTICE: drop cascades to 8 other objects +DROP TABLE test.fk CASCADE; +DROP TABLE test.parent CASCADE; +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_lateral_2.out b/expected/pathman_lateral_2.out index df5292f8..e4a64a56 100644 --- a/expected/pathman_lateral_2.out +++ b/expected/pathman_lateral_2.out @@ -32,13 +32,13 @@ select * from t1.id > t2.id and exists(select * from test_lateral.data t where t1.id = t2.id and t.id = t3.id); - QUERY PLAN --------------------------------------------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------------------- Nested Loop -> Nested Loop - Join Filter: ((t2_1.id + t1_1.id) = t_1.id) + Join Filter: ((t2.id + t1.id) = t.id) -> HashAggregate - Group Key: t_1.id + Group Key: t.id -> Append -> Seq Scan on data_0 t_1 -> Seq Scan on data_1 t_2 @@ -52,7 +52,7 @@ select * from -> Seq Scan on data_9 t_10 -> Materialize -> Nested Loop - Join Filter: ((t2_1.id > t1_1.id) AND (t1_1.id > t2_1.id) AND (t1_1.id = t2_1.id)) + Join Filter: ((t2.id > t1.id) AND (t1.id > t2.id) AND (t1.id = t2.id)) -> Append -> Seq Scan on data_0 t2_1 Filter: ((id >= 2) AND (id <= 299)) @@ -97,27 +97,27 @@ select * from -> Seq Scan on data_9 t1_10 Filter: ((id >= 1) AND (id <= 100)) -> Custom Scan (RuntimeAppend) - Prune by: (t_1.id = t3.id) + Prune by: (t.id = t3.id) -> Seq Scan on data_0 t3 - Filter: (t_1.id = id) + Filter: (t.id = id) -> Seq Scan on data_1 t3 - Filter: (t_1.id = id) + Filter: (t.id = id) -> Seq Scan on data_2 t3 - Filter: (t_1.id = id) + Filter: (t.id = id) -> Seq Scan on data_3 t3 - Filter: (t_1.id = id) + Filter: (t.id = id) -> Seq Scan on data_4 t3 - Filter: (t_1.id = id) + Filter: (t.id = id) -> Seq Scan on data_5 t3 - Filter: (t_1.id = id) + Filter: (t.id = id) -> Seq Scan on data_6 t3 - Filter: (t_1.id = id) + Filter: (t.id = id) -> Seq Scan on data_7 t3 - Filter: (t_1.id = id) + Filter: (t.id = id) -> Seq Scan on data_8 t3 - Filter: (t_1.id = id) + Filter: (t.id = id) -> Seq Scan on data_9 t3 - Filter: (t_1.id = id) + Filter: (t.id = id) (84 rows) set enable_hashjoin = on; diff --git a/expected/pathman_mergejoin_4.out b/expected/pathman_mergejoin_4.out index e2affa74..fc9bc95f 100644 --- a/expected/pathman_mergejoin_4.out +++ b/expected/pathman_mergejoin_4.out @@ -57,17 +57,17 @@ WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; QUERY PLAN --------------------------------------------------------------------------------- Sort - Sort Key: j2_1.dt + Sort Key: j2.dt -> Merge Join - Merge Cond: (j2_1.id = j3_1.id) + Merge Cond: (j2.id = j3.id) -> Merge Join - Merge Cond: (j1_1.id = j2_1.id) + Merge Cond: (j1.id = j2.id) -> Merge Append - Sort Key: j1_1.id + Sort Key: j1.id -> Index Scan using range_rel_1_pkey on range_rel_1 j1_1 -> Index Scan using range_rel_2_pkey on range_rel_2 j1_2 -> Merge Append - Sort Key: j2_1.id + Sort Key: j2.id -> Index Scan using range_rel_2_pkey on range_rel_2 j2_1 -> Index Scan using range_rel_3_pkey on range_rel_3 j2_2 -> Index Scan using range_rel_4_pkey on range_rel_4 j2_3 diff --git a/expected/pathman_mergejoin_5.out b/expected/pathman_mergejoin_5.out index 7b607435..b99e40db 100644 --- a/expected/pathman_mergejoin_5.out +++ b/expected/pathman_mergejoin_5.out @@ -59,7 +59,7 @@ WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; Sort Sort Key: j2.dt -> Merge Join - Merge Cond: (j2.id = j3_1.id) + Merge Cond: (j2.id = j3.id) -> Index Scan using range_rel_2_pkey on range_rel_2 j2 Index Cond: (id IS NOT NULL) -> Append diff --git a/expected/pathman_only_2.out b/expected/pathman_only_2.out index 63638012..c37dd5f4 100644 --- a/expected/pathman_only_2.out +++ b/expected/pathman_only_2.out @@ -36,16 +36,16 @@ UNION SELECT * FROM test_only.from_only_test; -> Append -> Seq Scan on from_only_test -> Append - -> Seq Scan on from_only_test_1 from_only_test_1_1 - -> Seq Scan on from_only_test_2 - -> Seq Scan on from_only_test_3 - -> Seq Scan on from_only_test_4 - -> Seq Scan on from_only_test_5 - -> Seq Scan on from_only_test_6 - -> Seq Scan on from_only_test_7 - -> Seq Scan on from_only_test_8 - -> Seq Scan on from_only_test_9 - -> Seq Scan on from_only_test_10 + -> Seq Scan on from_only_test_1 from_only_test_2 + -> Seq Scan on from_only_test_2 from_only_test_3 + -> Seq Scan on from_only_test_3 from_only_test_4 + -> Seq Scan on from_only_test_4 from_only_test_5 + -> Seq Scan on from_only_test_5 from_only_test_6 + -> Seq Scan on from_only_test_6 from_only_test_7 + -> Seq Scan on from_only_test_7 from_only_test_8 + -> Seq Scan on from_only_test_8 from_only_test_9 + -> Seq Scan on from_only_test_9 from_only_test_10 + -> Seq Scan on from_only_test_10 from_only_test_11 (15 rows) /* should be OK */ @@ -55,7 +55,7 @@ UNION SELECT * FROM ONLY test_only.from_only_test; QUERY PLAN ---------------------------------------------------------- HashAggregate - Group Key: from_only_test_1.val + Group Key: from_only_test.val -> Append -> Append -> Seq Scan on from_only_test_1 @@ -76,10 +76,10 @@ EXPLAIN (COSTS OFF) SELECT * FROM test_only.from_only_test UNION SELECT * FROM test_only.from_only_test UNION SELECT * FROM ONLY test_only.from_only_test; - QUERY PLAN ---------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------- HashAggregate - Group Key: from_only_test_1.val + Group Key: from_only_test.val -> Append -> Append -> Seq Scan on from_only_test_1 @@ -93,17 +93,17 @@ UNION SELECT * FROM ONLY test_only.from_only_test; -> Seq Scan on from_only_test_9 -> Seq Scan on from_only_test_10 -> Append - -> Seq Scan on from_only_test_1 from_only_test_1_1 - -> Seq Scan on from_only_test_2 from_only_test_2_1 - -> Seq Scan on from_only_test_3 from_only_test_3_1 - -> Seq Scan on from_only_test_4 from_only_test_4_1 - -> Seq Scan on from_only_test_5 from_only_test_5_1 - -> Seq Scan on from_only_test_6 from_only_test_6_1 - -> Seq Scan on from_only_test_7 from_only_test_7_1 - -> Seq Scan on from_only_test_8 from_only_test_8_1 - -> Seq Scan on from_only_test_9 from_only_test_9_1 - -> Seq Scan on from_only_test_10 from_only_test_10_1 - -> Seq Scan on from_only_test from_only_test_12 + -> Seq Scan on from_only_test_1 from_only_test_12 + -> Seq Scan on from_only_test_2 from_only_test_13 + -> Seq Scan on from_only_test_3 from_only_test_14 + -> Seq Scan on from_only_test_4 from_only_test_15 + -> Seq Scan on from_only_test_5 from_only_test_16 + -> Seq Scan on from_only_test_6 from_only_test_17 + -> Seq Scan on from_only_test_7 from_only_test_18 + -> Seq Scan on from_only_test_8 from_only_test_19 + -> Seq Scan on from_only_test_9 from_only_test_20 + -> Seq Scan on from_only_test_10 from_only_test_21 + -> Seq Scan on from_only_test from_only_test_22 (26 rows) /* should be OK */ @@ -111,34 +111,34 @@ EXPLAIN (COSTS OFF) SELECT * FROM ONLY test_only.from_only_test UNION SELECT * FROM test_only.from_only_test UNION SELECT * FROM test_only.from_only_test; - QUERY PLAN ---------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------- HashAggregate Group Key: from_only_test.val -> Append -> Seq Scan on from_only_test -> Append - -> Seq Scan on from_only_test_1 from_only_test_1_1 - -> Seq Scan on from_only_test_2 - -> Seq Scan on from_only_test_3 - -> Seq Scan on from_only_test_4 - -> Seq Scan on from_only_test_5 - -> Seq Scan on from_only_test_6 - -> Seq Scan on from_only_test_7 - -> Seq Scan on from_only_test_8 - -> Seq Scan on from_only_test_9 - -> Seq Scan on from_only_test_10 + -> Seq Scan on from_only_test_1 from_only_test_2 + -> Seq Scan on from_only_test_2 from_only_test_3 + -> Seq Scan on from_only_test_3 from_only_test_4 + -> Seq Scan on from_only_test_4 from_only_test_5 + -> Seq Scan on from_only_test_5 from_only_test_6 + -> Seq Scan on from_only_test_6 from_only_test_7 + -> Seq Scan on from_only_test_7 from_only_test_8 + -> Seq Scan on from_only_test_8 from_only_test_9 + -> Seq Scan on from_only_test_9 from_only_test_10 + -> Seq Scan on from_only_test_10 from_only_test_11 -> Append - -> Seq Scan on from_only_test_1 from_only_test_1_2 - -> Seq Scan on from_only_test_2 from_only_test_2_1 - -> Seq Scan on from_only_test_3 from_only_test_3_1 - -> Seq Scan on from_only_test_4 from_only_test_4_1 - -> Seq Scan on from_only_test_5 from_only_test_5_1 - -> Seq Scan on from_only_test_6 from_only_test_6_1 - -> Seq Scan on from_only_test_7 from_only_test_7_1 - -> Seq Scan on from_only_test_8 from_only_test_8_1 - -> Seq Scan on from_only_test_9 from_only_test_9_1 - -> Seq Scan on from_only_test_10 from_only_test_10_1 + -> Seq Scan on from_only_test_1 from_only_test_13 + -> Seq Scan on from_only_test_2 from_only_test_14 + -> Seq Scan on from_only_test_3 from_only_test_15 + -> Seq Scan on from_only_test_4 from_only_test_16 + -> Seq Scan on from_only_test_5 from_only_test_17 + -> Seq Scan on from_only_test_6 from_only_test_18 + -> Seq Scan on from_only_test_7 from_only_test_19 + -> Seq Scan on from_only_test_8 from_only_test_20 + -> Seq Scan on from_only_test_9 from_only_test_21 + -> Seq Scan on from_only_test_10 from_only_test_22 (26 rows) /* not ok, ONLY|non-ONLY in one query (this is not the case for PgPro) */ diff --git a/expected/pathman_rowmarks_3.out b/expected/pathman_rowmarks_3.out index c2539d76..af61e5f7 100644 --- a/expected/pathman_rowmarks_3.out +++ b/expected/pathman_rowmarks_3.out @@ -42,17 +42,17 @@ SELECT * FROM rowmarks.second ORDER BY id FOR UPDATE; /* Simple case (plan) */ EXPLAIN (COSTS OFF) SELECT * FROM rowmarks.first ORDER BY id FOR UPDATE; - QUERY PLAN ---------------------------------------- + QUERY PLAN +----------------------------------------------- LockRows -> Sort - Sort Key: first_0.id + Sort Key: first.id -> Append - -> Seq Scan on first_0 - -> Seq Scan on first_1 - -> Seq Scan on first_2 - -> Seq Scan on first_3 - -> Seq Scan on first_4 + -> Seq Scan on first_0 first_1 + -> Seq Scan on first_1 first_2 + -> Seq Scan on first_2 first_3 + -> Seq Scan on first_3 first_4 + -> Seq Scan on first_4 first_5 (9 rows) /* Simple case (execution) */ @@ -98,20 +98,20 @@ WHERE id = (SELECT id FROM rowmarks.first OFFSET 10 LIMIT 1 FOR UPDATE) FOR SHARE; - QUERY PLAN ---------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------- LockRows InitPlan 1 (returns $1) -> Limit -> LockRows -> Sort - Sort Key: first_0.id + Sort Key: first_1.id -> Append - -> Seq Scan on first_0 - -> Seq Scan on first_1 first_1_1 - -> Seq Scan on first_2 - -> Seq Scan on first_3 - -> Seq Scan on first_4 + -> Seq Scan on first_0 first_2 + -> Seq Scan on first_1 first_3 + -> Seq Scan on first_2 first_4 + -> Seq Scan on first_3 first_5 + -> Seq Scan on first_4 first_6 -> Custom Scan (RuntimeAppend) Prune by: (first.id = $1) -> Seq Scan on first_0 first @@ -187,19 +187,19 @@ SELECT * FROM rowmarks.first JOIN rowmarks.second USING(id) ORDER BY id FOR UPDATE; - QUERY PLAN ---------------------------------------------------- + QUERY PLAN +----------------------------------------------------- LockRows -> Sort - Sort Key: first_0.id + Sort Key: first.id -> Hash Join - Hash Cond: (first_0.id = second.id) + Hash Cond: (first.id = second.id) -> Append - -> Seq Scan on first_0 - -> Seq Scan on first_1 - -> Seq Scan on first_2 - -> Seq Scan on first_3 - -> Seq Scan on first_4 + -> Seq Scan on first_0 first_1 + -> Seq Scan on first_1 first_2 + -> Seq Scan on first_2 first_3 + -> Seq Scan on first_3 first_4 + -> Seq Scan on first_4 first_5 -> Hash -> Seq Scan on second (13 rows) @@ -244,53 +244,53 @@ SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ EXPLAIN (COSTS OFF) UPDATE rowmarks.second SET id = 2 WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); - QUERY PLAN ---------------------------------- + QUERY PLAN +--------------------------------------- Update on second -> Nested Loop Semi Join -> Seq Scan on second Filter: (id = 1) - -> Seq Scan on first_0 + -> Seq Scan on first_0 first Filter: (id = 1) (6 rows) EXPLAIN (COSTS OFF) UPDATE rowmarks.second SET id = 2 WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); - QUERY PLAN ------------------------------------------------ + QUERY PLAN +----------------------------------------------------- Update on second -> Nested Loop Semi Join - Join Filter: (second.id = first_0.id) + Join Filter: (second.id = first.id) -> Seq Scan on second -> Materialize -> Append - -> Seq Scan on first_0 + -> Seq Scan on first_0 first_1 Filter: (id < 1) - -> Seq Scan on first_1 + -> Seq Scan on first_1 first_2 Filter: (id < 1) - -> Seq Scan on first_2 + -> Seq Scan on first_2 first_3 Filter: (id < 1) - -> Seq Scan on first_3 + -> Seq Scan on first_3 first_4 Filter: (id < 1) - -> Seq Scan on first_4 + -> Seq Scan on first_4 first_5 Filter: (id < 1) (16 rows) EXPLAIN (COSTS OFF) UPDATE rowmarks.second SET id = 2 WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); - QUERY PLAN ------------------------------------------------ + QUERY PLAN +----------------------------------------------------- Update on second -> Nested Loop Semi Join - Join Filter: (second.id = first_0.id) + Join Filter: (second.id = first.id) -> Seq Scan on second -> Materialize -> Append - -> Seq Scan on first_0 + -> Seq Scan on first_0 first_1 Filter: (id = 1) - -> Seq Scan on first_1 + -> Seq Scan on first_1 first_2 Filter: (id = 2) (10 rows) @@ -298,13 +298,13 @@ EXPLAIN (COSTS OFF) UPDATE rowmarks.second SET id = 2 WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1) RETURNING *, tableoid::regclass; - QUERY PLAN ---------------------------------- + QUERY PLAN +--------------------------------------- Update on second -> Nested Loop Semi Join -> Seq Scan on second Filter: (id = 1) - -> Seq Scan on first_0 + -> Seq Scan on first_0 first Filter: (id = 1) (6 rows) @@ -326,53 +326,53 @@ SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ EXPLAIN (COSTS OFF) DELETE FROM rowmarks.second WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); - QUERY PLAN ---------------------------------- + QUERY PLAN +--------------------------------------- Delete on second -> Nested Loop Semi Join -> Seq Scan on second Filter: (id = 1) - -> Seq Scan on first_0 + -> Seq Scan on first_0 first Filter: (id = 1) (6 rows) EXPLAIN (COSTS OFF) DELETE FROM rowmarks.second WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); - QUERY PLAN ------------------------------------------------ + QUERY PLAN +----------------------------------------------------- Delete on second -> Nested Loop Semi Join - Join Filter: (second.id = first_0.id) + Join Filter: (second.id = first.id) -> Seq Scan on second -> Materialize -> Append - -> Seq Scan on first_0 + -> Seq Scan on first_0 first_1 Filter: (id < 1) - -> Seq Scan on first_1 + -> Seq Scan on first_1 first_2 Filter: (id < 1) - -> Seq Scan on first_2 + -> Seq Scan on first_2 first_3 Filter: (id < 1) - -> Seq Scan on first_3 + -> Seq Scan on first_3 first_4 Filter: (id < 1) - -> Seq Scan on first_4 + -> Seq Scan on first_4 first_5 Filter: (id < 1) (16 rows) EXPLAIN (COSTS OFF) DELETE FROM rowmarks.second WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); - QUERY PLAN ------------------------------------------------ + QUERY PLAN +----------------------------------------------------- Delete on second -> Nested Loop Semi Join - Join Filter: (second.id = first_0.id) + Join Filter: (second.id = first.id) -> Seq Scan on second -> Materialize -> Append - -> Seq Scan on first_0 + -> Seq Scan on first_0 first_1 Filter: (id = 1) - -> Seq Scan on first_1 + -> Seq Scan on first_1 first_2 Filter: (id = 2) (10 rows) diff --git a/expected/pathman_subpartitions_2.out b/expected/pathman_subpartitions_2.out new file mode 100644 index 00000000..26eae913 --- /dev/null +++ b/expected/pathman_subpartitions_2.out @@ -0,0 +1,461 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_subpartitions_1.out is the updated version. + */ +\set VERBOSITY terse +CREATE EXTENSION pg_pathman; +CREATE SCHEMA subpartitions; +/* Create two level partitioning structure */ +CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); +INSERT INTO subpartitions.abc SELECT i, i FROM generate_series(1, 200, 20) as i; +SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_hash_partitions('subpartitions.abc_1', 'a', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT create_hash_partitions('subpartitions.abc_2', 'b', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +SELECT * FROM pathman_partition_list; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc | subpartitions.abc_1 | 2 | a | 0 | 100 + subpartitions.abc | subpartitions.abc_2 | 2 | a | 100 | 200 + subpartitions.abc_1 | subpartitions.abc_1_0 | 1 | a | | + subpartitions.abc_1 | subpartitions.abc_1_1 | 1 | a | | + subpartitions.abc_1 | subpartitions.abc_1_2 | 1 | a | | + subpartitions.abc_2 | subpartitions.abc_2_0 | 1 | b | | + subpartitions.abc_2 | subpartitions.abc_2_1 | 1 | b | | +(7 rows) + +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; + tableoid | a | b +-----------------------+-----+----- + subpartitions.abc_1_2 | 1 | 1 + subpartitions.abc_1_0 | 21 | 21 + subpartitions.abc_1_1 | 41 | 41 + subpartitions.abc_1_0 | 61 | 61 + subpartitions.abc_1_2 | 81 | 81 + subpartitions.abc_2_0 | 101 | 101 + subpartitions.abc_2_1 | 121 | 121 + subpartitions.abc_2_0 | 141 | 141 + subpartitions.abc_2_1 | 161 | 161 + subpartitions.abc_2_1 | 181 | 181 +(10 rows) + +/* Insert should result in creation of new subpartition */ +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); + append_range_partition +------------------------ + subpartitions.abc_3 +(1 row) + +SELECT create_range_partitions('subpartitions.abc_3', 'b', 200, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT * FROM pathman_partition_list WHERE parent = 'subpartitions.abc_3'::regclass; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc_3 | subpartitions.abc_3_1 | 2 | b | 200 | 210 + subpartitions.abc_3 | subpartitions.abc_3_2 | 2 | b | 210 | 220 +(2 rows) + +INSERT INTO subpartitions.abc VALUES (215, 215); +SELECT * FROM pathman_partition_list WHERE parent = 'subpartitions.abc_3'::regclass; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc_3 | subpartitions.abc_3_1 | 2 | b | 200 | 210 + subpartitions.abc_3 | subpartitions.abc_3_2 | 2 | b | 210 | 220 +(2 rows) + +SELECT tableoid::regclass, * FROM subpartitions.abc WHERE a = 215 AND b = 215 ORDER BY a, b; + tableoid | a | b +-----------------------+-----+----- + subpartitions.abc_3_2 | 215 | 215 +(1 row) + +/* Pruning tests */ +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a < 150; + QUERY PLAN +--------------------------------------- + Append + -> Append + -> Seq Scan on abc_1_0 abc_2 + -> Seq Scan on abc_1_1 abc_3 + -> Seq Scan on abc_1_2 abc_4 + -> Append + -> Seq Scan on abc_2_0 abc_6 + Filter: (a < 150) + -> Seq Scan on abc_2_1 abc_7 + Filter: (a < 150) +(10 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE b = 215; + QUERY PLAN +--------------------------------------- + Append + -> Append + -> Seq Scan on abc_1_0 abc_2 + Filter: (b = 215) + -> Seq Scan on abc_1_1 abc_3 + Filter: (b = 215) + -> Seq Scan on abc_1_2 abc_4 + Filter: (b = 215) + -> Seq Scan on abc_2_1 abc_5 + Filter: (b = 215) + -> Seq Scan on abc_3_2 abc_6 + Filter: (b = 215) +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a = 215 AND b = 215; + QUERY PLAN +------------------------------------- + Seq Scan on abc_3_2 abc + Filter: ((a = 215) AND (b = 215)) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a >= 210 AND b >= 210; + QUERY PLAN +------------------------- + Seq Scan on abc_3_2 abc + Filter: (a >= 210) +(2 rows) + +CREATE OR REPLACE FUNCTION check_multilevel_queries() +RETURNS VOID AS +$$ +BEGIN + IF NOT EXISTS(SELECT * FROM (SELECT tableoid::regclass, * + FROM subpartitions.abc + WHERE a = 215 AND b = 215 + ORDER BY a, b) t1) + THEN + RAISE EXCEPTION 'should be at least one record in result'; + END IF; +END +$$ LANGUAGE plpgsql; +SELECT check_multilevel_queries(); + check_multilevel_queries +-------------------------- + +(1 row) + +DROP FUNCTION check_multilevel_queries(); +/* Multilevel partitioning with updates */ +CREATE OR REPLACE FUNCTION subpartitions.partitions_tree( + rel REGCLASS, + level TEXT DEFAULT ' ' +) +RETURNS SETOF TEXT AS +$$ +DECLARE + partition REGCLASS; + subpartition TEXT; +BEGIN + IF rel IS NULL THEN + RETURN; + END IF; + + RETURN NEXT rel::TEXT; + + FOR partition IN (SELECT l.partition FROM pathman_partition_list l WHERE parent = rel) + LOOP + FOR subpartition IN (SELECT subpartitions.partitions_tree(partition, level || ' ')) + LOOP + RETURN NEXT level || subpartition::TEXT; + END LOOP; + END LOOP; +END +$$ LANGUAGE plpgsql; +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_4'); + append_range_partition +------------------------ + subpartitions.abc_4 +(1 row) + +SELECT create_hash_partitions('subpartitions.abc_4', 'b', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +SELECT subpartitions.partitions_tree('subpartitions.abc'); + partitions_tree +-------------------------- + subpartitions.abc + subpartitions.abc_1 + subpartitions.abc_1_0 + subpartitions.abc_1_1 + subpartitions.abc_1_2 + subpartitions.abc_2 + subpartitions.abc_2_0 + subpartitions.abc_2_1 + subpartitions.abc_3 + subpartitions.abc_3_1 + subpartitions.abc_3_2 + subpartitions.abc_4 + subpartitions.abc_4_0 + subpartitions.abc_4_1 +(14 rows) + +DROP TABLE subpartitions.abc CASCADE; +NOTICE: drop cascades to 15 other objects +/* Test that update works correctly */ +SET pg_pathman.enable_partitionrouter = ON; +CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); +SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_range_partitions('subpartitions.abc_1', 'b', 0, 50, 2); /* 0 - 100 */ + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_range_partitions('subpartitions.abc_2', 'b', 0, 50, 2); /* 100 - 200 */ + create_range_partitions +------------------------- + 2 +(1 row) + +INSERT INTO subpartitions.abc SELECT 25, 25 FROM generate_series(1, 10); +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_1_1 */ + tableoid | a | b +-----------------------+----+---- + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 +(10 rows) + +UPDATE subpartitions.abc SET a = 125 WHERE a = 25 and b = 25; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_1 */ + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 +(10 rows) + +UPDATE subpartitions.abc SET b = 75 WHERE a = 125 and b = 25; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_2 */ + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 +(10 rows) + +UPDATE subpartitions.abc SET b = 125 WHERE a = 125 and b = 75; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_3 */ + tableoid | a | b +-----------------------+-----+----- + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 +(10 rows) + +/* split_range_partition */ +SELECT split_range_partition('subpartitions.abc_2', 150); /* FAIL */ +ERROR: cannot split partition that has children +SELECT split_range_partition('subpartitions.abc_2_2', 75); /* OK */ + split_range_partition +----------------------- + subpartitions.abc_2_4 +(1 row) + +SELECT subpartitions.partitions_tree('subpartitions.abc'); + partitions_tree +-------------------------- + subpartitions.abc + subpartitions.abc_1 + subpartitions.abc_1_1 + subpartitions.abc_1_2 + subpartitions.abc_2 + subpartitions.abc_2_1 + subpartitions.abc_2_2 + subpartitions.abc_2_4 + subpartitions.abc_2_3 +(9 rows) + +/* merge_range_partitions */ +TRUNCATE subpartitions.abc; +INSERT INTO subpartitions.abc VALUES (150, 0); +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); /* 200 - 300 */ + append_range_partition +------------------------ + subpartitions.abc_3 +(1 row) + +INSERT INTO subpartitions.abc VALUES (250, 50); +SELECT merge_range_partitions('subpartitions.abc_2', 'subpartitions.abc_3'); /* OK */ + merge_range_partitions +------------------------ + subpartitions.abc_2 +(1 row) + +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_1 | 150 | 0 + subpartitions.abc_2_2 | 250 | 50 +(2 rows) + +SELECT merge_range_partitions('subpartitions.abc_2_1', 'subpartitions.abc_2_2'); /* OK */ + merge_range_partitions +------------------------ + subpartitions.abc_2_1 +(1 row) + +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_1 | 150 | 0 + subpartitions.abc_2_1 | 250 | 50 +(2 rows) + +DROP TABLE subpartitions.abc CASCADE; +NOTICE: drop cascades to 10 other objects +/* Check insert & update with dropped columns */ +CREATE TABLE subpartitions.abc(a int, b int, c int, id1 int not null, id2 int not null, val serial); +SELECT create_range_partitions('subpartitions.abc', 'id1', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +ALTER TABLE subpartitions.abc DROP COLUMN c; +SELECT prepend_range_partition('subpartitions.abc'); + prepend_range_partition +------------------------- + subpartitions.abc_3 +(1 row) + +ALTER TABLE subpartitions.abc DROP COLUMN b; +SELECT create_range_partitions('subpartitions.abc_3', 'id2', 0, 10, 3); + create_range_partitions +------------------------- + 3 +(1 row) + +ALTER TABLE subpartitions.abc DROP COLUMN a; +SELECT prepend_range_partition('subpartitions.abc_3'); + prepend_range_partition +------------------------- + subpartitions.abc_3_4 +(1 row) + +SELECT * FROM pathman_partition_list ORDER BY parent, partition; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc | subpartitions.abc_1 | 2 | id1 | 0 | 100 + subpartitions.abc | subpartitions.abc_2 | 2 | id1 | 100 | 200 + subpartitions.abc | subpartitions.abc_3 | 2 | id1 | -100 | 0 + subpartitions.abc_3 | subpartitions.abc_3_1 | 2 | id2 | 0 | 10 + subpartitions.abc_3 | subpartitions.abc_3_2 | 2 | id2 | 10 | 20 + subpartitions.abc_3 | subpartitions.abc_3_3 | 2 | id2 | 20 | 30 + subpartitions.abc_3 | subpartitions.abc_3_4 | 2 | id2 | -10 | 0 +(7 rows) + +INSERT INTO subpartitions.abc VALUES (10, 0), (110, 0), (-1, 0), (-1, -1); +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY id1, id2, val; + tableoid | id1 | id2 | val +-----------------------+-----+-----+----- + subpartitions.abc_3_4 | -1 | -1 | 4 + subpartitions.abc_3_1 | -1 | 0 | 3 + subpartitions.abc_1 | 10 | 0 | 1 + subpartitions.abc_2 | 110 | 0 | 2 +(4 rows) + +SET pg_pathman.enable_partitionrouter = ON; +WITH updated AS (UPDATE subpartitions.abc SET id1 = -1, id2 = -1 RETURNING tableoid::regclass, *) +SELECT * FROM updated ORDER BY val ASC; + tableoid | id1 | id2 | val +-----------------------+-----+-----+----- + subpartitions.abc_3_4 | -1 | -1 | 1 + subpartitions.abc_3_4 | -1 | -1 | 2 + subpartitions.abc_3_4 | -1 | -1 | 3 + subpartitions.abc_3_4 | -1 | -1 | 4 +(4 rows) + +DROP TABLE subpartitions.abc CASCADE; +NOTICE: drop cascades to 9 other objects +--- basic check how rowmark plays along with subparts; PGPRO-2755 +CREATE TABLE subpartitions.a1(n1 integer); +CREATE TABLE subpartitions.a2(n1 integer not null, n2 integer not null); +SELECT create_range_partitions('subpartitions.a2', 'n1', 1, 2, 0); + create_range_partitions +------------------------- + 0 +(1 row) + +SELECT add_range_partition('subpartitions.a2', 10, 20, 'subpartitions.a2_1020'); + add_range_partition +----------------------- + subpartitions.a2_1020 +(1 row) + +SELECT create_range_partitions('subpartitions.a2_1020'::regclass, 'n2'::text, array[30,40], array['subpartitions.a2_1020_3040']); + create_range_partitions +------------------------- + 1 +(1 row) + +INSERT INTO subpartitions.a2 VALUES (10, 30), (11, 31), (12, 32), (19, 39); +INSERT INTO subpartitions.a1 VALUES (12), (19), (20); +SELECT a2.* FROM subpartitions.a1 JOIN subpartitions.a2 ON a2.n1=a1.n1 FOR UPDATE; + n1 | n2 +----+---- + 12 | 32 + 19 | 39 +(2 rows) + +DROP TABLE subpartitions.a2 CASCADE; +NOTICE: drop cascades to 4 other objects +DROP TABLE subpartitions.a1; +DROP FUNCTION subpartitions.partitions_tree(regclass, text); +DROP SCHEMA subpartitions; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_upd_del_3.out b/expected/pathman_upd_del_3.out new file mode 100644 index 00000000..70b41e7d --- /dev/null +++ b/expected/pathman_upd_del_3.out @@ -0,0 +1,462 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. Moreover, again since 12 (608b167f9f), CTEs which are + * scanned once are no longer an optimization fence, changing a good deal of + * plans here. There is an option to forcibly make them MATERIALIZED, but we + * also need to run tests on older versions, so put updated plans in + * pathman_upd_del_2.out instead. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +/* Temporary tables for JOINs */ +CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp VALUES (1, 1), (2, 2); +CREATE TABLE test.tmp2 (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp2 SELECT i % 10 + 1, i FROM generate_series(1, 100) i; +SELECT pathman.create_range_partitions('test.tmp2', 'id', 1, 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* Partition table by RANGE */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + value INTEGER); +INSERT INTO test.range_rel (dt, value) SELECT g, extract(day from g) +FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2010-01-01'::date, '1 month'::interval, + 12); + create_range_partitions +------------------------- + 12 +(1 row) + +VACUUM ANALYZE; +/* + * Test UPDATE and DELETE + */ +/* have partitions for this 'dt' */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Update on range_rel_6 + -> Seq Scan on range_rel_6 + Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) +(3 rows) + +BEGIN; +UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; + id | dt | value +-----+--------------------------+------- + 166 | Tue Jun 15 00:00:00 2010 | 111 +(1 row) + +ROLLBACK; +/* have partitions for this 'dt' */ +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt = '2010-06-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Delete on range_rel_6 + -> Seq Scan on range_rel_6 + Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) +(3 rows) + +BEGIN; +DELETE FROM test.range_rel WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* no partitions for this 'dt' */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 222 WHERE dt = '1990-01-01'; + QUERY PLAN +-------------------------------------------------------------------------------- + Update on range_rel + -> Seq Scan on range_rel + Filter: (dt = 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) +(3 rows) + +BEGIN; +UPDATE test.range_rel SET value = 111 WHERE dt = '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt = '1990-01-01'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* no partitions for this 'dt' */ +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt < '1990-01-01'; + QUERY PLAN +-------------------------------------------------------------------------------- + Delete on range_rel + -> Seq Scan on range_rel + Filter: (dt < 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) +(3 rows) + +BEGIN; +DELETE FROM test.range_rel WHERE dt < '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt < '1990-01-01'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* UPDATE + FROM, partitioned table */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = t.value +FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Update on range_rel_1 r + -> Nested Loop + Join Filter: (r.id = t.id) + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) + -> Seq Scan on tmp t +(6 rows) + +BEGIN; +UPDATE test.range_rel r SET value = t.value +FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; +/* UPDATE + FROM, single table */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = r.value +FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Update on tmp t + -> Nested Loop + -> Seq Scan on tmp t + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) +(6 rows) + +BEGIN; +UPDATE test.tmp t SET value = r.value +FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, partitioned table */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp t +WHERE r.dt = '2010-01-02' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Delete on range_rel_1 r + -> Nested Loop + Join Filter: (r.id = t.id) + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Seq Scan on tmp t +(6 rows) + +BEGIN; +DELETE FROM test.range_rel r USING test.tmp t +WHERE r.dt = '2010-01-02' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, single table */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp t USING test.range_rel r +WHERE r.dt = '2010-01-02' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Delete on tmp t + -> Nested Loop + -> Seq Scan on tmp t + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) +(6 rows) + +BEGIN; +DELETE FROM test.tmp t USING test.range_rel r +WHERE r.dt = '2010-01-02' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, two partitioned tables */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* DELETE + USING, partitioned table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* DELETE + USING, single table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; + QUERY PLAN +------------------------------------------------ + Delete on tmp r + -> Nested Loop + -> Nested Loop + -> Seq Scan on tmp r + -> Custom Scan (RuntimeAppend) + Prune by: (r.id = a1.id) + -> Seq Scan on tmp2_1 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_2 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_3 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_4 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_5 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_6 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_7 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_8 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_9 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_10 a1 + Filter: (r.id = id) + -> Custom Scan (RuntimeAppend) + Prune by: (a1.id = a2.id) + -> Seq Scan on tmp2_1 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_2 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_3 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_4 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_5 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_6 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_7 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_8 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_9 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_10 a2 + Filter: (a1.id = id) +(48 rows) + +BEGIN; +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ROLLBACK; +/* UPDATE + FROM, two partitioned tables */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* + * UPDATE + subquery with partitioned table (PG 9.5). + * See pathman_rel_pathlist_hook() + RELOPT_OTHER_MEMBER_REL. + */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = 2 +WHERE t.id IN (SELECT id + FROM test.tmp2 t2 + WHERE id = t.id); + QUERY PLAN +-------------------------------------------- + Update on tmp t + -> Seq Scan on tmp t + Filter: (SubPlan 1) + SubPlan 1 + -> Custom Scan (RuntimeAppend) + Prune by: (t2.id = t.id) + -> Seq Scan on tmp2_1 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_2 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_3 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_4 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_5 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_6 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_7 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_8 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_9 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_10 t2 + Filter: (id = t.id) +(26 rows) + +/* Test special rule for CTE; SELECT (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT * FROM test.range_rel r + WHERE r.dt = '2010-01-02') +DELETE FROM test.tmp USING q; + QUERY PLAN +-------------------------------------------------------------------------------------------- + Delete on tmp + -> Nested Loop + -> Seq Scan on tmp + -> Materialize + -> Seq Scan on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) +(6 rows) + +BEGIN; +WITH q AS (SELECT * FROM test.range_rel r + WHERE r.dt = '2010-01-02') +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; DELETE (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (DELETE FROM test.range_rel r + WHERE r.dt = '2010-01-02' + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------------------------------------------------------- + Delete on tmp + CTE q + -> Delete on range_rel_1 r + -> Seq Scan on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(8 rows) + +BEGIN; +WITH q AS (DELETE FROM test.range_rel r + WHERE r.dt = '2010-01-02' + RETURNING *) +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; DELETE + USING (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (DELETE FROM test.tmp t + USING test.range_rel r + WHERE r.dt = '2010-01-02' AND r.id = t.id + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------------------------------------------------------------- + Delete on tmp + CTE q + -> Delete on tmp t + -> Nested Loop + -> Seq Scan on tmp t + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(11 rows) + +BEGIN; +WITH q AS (DELETE FROM test.tmp t + USING test.range_rel r + WHERE r.dt = '2010-01-02' AND r.id = t.id + RETURNING *) +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; Nested CTEs (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (WITH n AS (SELECT id FROM test.tmp2 WHERE id = 2) + DELETE FROM test.tmp t + USING n + WHERE t.id = n.id + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +--------------------------------------------- + Delete on tmp + CTE q + -> Delete on tmp t + -> Nested Loop + -> Seq Scan on tmp t + Filter: (id = 2) + -> Seq Scan on tmp2_2 tmp2 + Filter: (id = 2) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(11 rows) + +/* Test special rule for CTE; CTE in quals (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); + QUERY PLAN +-------------------------------------------------------------- + Delete on tmp t + -> Nested Loop Semi Join + -> Seq Scan on tmp t + -> Custom Scan (RuntimeAppend) + Prune by: ((tmp2.id < 3) AND (t.id = tmp2.id)) + -> Seq Scan on tmp2_1 tmp2 + Filter: (t.id = id) + -> Seq Scan on tmp2_2 tmp2 + Filter: (t.id = id) +(9 rows) + +BEGIN; +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); +ROLLBACK; +DROP TABLE test.tmp CASCADE; +DROP TABLE test.tmp2 CASCADE; +NOTICE: drop cascades to 11 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 13 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_views_3.out b/expected/pathman_views_3.out index cf5ca58e..ae50bcb3 100644 --- a/expected/pathman_views_3.out +++ b/expected/pathman_views_3.out @@ -39,33 +39,33 @@ on views.abc for each row execute procedure views.disable_modification(); /* Test SELECT */ explain (costs off) select * from views.abc; - QUERY PLAN --------------------------- + QUERY PLAN +---------------------------------- Append - -> Seq Scan on _abc_0 - -> Seq Scan on _abc_1 - -> Seq Scan on _abc_2 - -> Seq Scan on _abc_3 - -> Seq Scan on _abc_4 - -> Seq Scan on _abc_5 - -> Seq Scan on _abc_6 - -> Seq Scan on _abc_7 - -> Seq Scan on _abc_8 - -> Seq Scan on _abc_9 + -> Seq Scan on _abc_0 _abc_1 + -> Seq Scan on _abc_1 _abc_2 + -> Seq Scan on _abc_2 _abc_3 + -> Seq Scan on _abc_3 _abc_4 + -> Seq Scan on _abc_4 _abc_5 + -> Seq Scan on _abc_5 _abc_6 + -> Seq Scan on _abc_6 _abc_7 + -> Seq Scan on _abc_7 _abc_8 + -> Seq Scan on _abc_8 _abc_9 + -> Seq Scan on _abc_9 _abc_10 (11 rows) explain (costs off) select * from views.abc where id = 1; - QUERY PLAN --------------------- - Seq Scan on _abc_0 + QUERY PLAN +------------------------- + Seq Scan on _abc_0 _abc Filter: (id = 1) (2 rows) explain (costs off) select * from views.abc where id = 1 for update; - QUERY PLAN --------------------------- + QUERY PLAN +------------------------------- LockRows - -> Seq Scan on _abc_0 + -> Seq Scan on _abc_0 _abc Filter: (id = 1) (3 rows) @@ -93,14 +93,14 @@ insert into views.abc values (1); ERROR: INSERT /* Test UPDATE */ explain (costs off) update views.abc set id = 2 where id = 1 or id = 2; - QUERY PLAN --------------------------------------- + QUERY PLAN +--------------------------------------------- Update on abc -> Result -> Append - -> Seq Scan on _abc_0 + -> Seq Scan on _abc_0 _abc_1 Filter: (id = 1) - -> Seq Scan on _abc_6 + -> Seq Scan on _abc_6 _abc_2 Filter: (id = 2) (7 rows) @@ -108,14 +108,14 @@ update views.abc set id = 2 where id = 1 or id = 2; ERROR: UPDATE /* Test DELETE */ explain (costs off) delete from views.abc where id = 1 or id = 2; - QUERY PLAN --------------------------------------- + QUERY PLAN +--------------------------------------------- Delete on abc -> Result -> Append - -> Seq Scan on _abc_0 + -> Seq Scan on _abc_0 _abc_1 Filter: (id = 1) - -> Seq Scan on _abc_6 + -> Seq Scan on _abc_6 _abc_2 Filter: (id = 2) (7 rows) @@ -125,43 +125,43 @@ ERROR: DELETE create view views.abc_union as table views._abc union table views._abc_add; create view views.abc_union_all as table views._abc union all table views._abc_add; explain (costs off) table views.abc_union; - QUERY PLAN --------------------------------------- + QUERY PLAN +---------------------------------------------- HashAggregate - Group Key: _abc_0.id + Group Key: _abc.id -> Append -> Append - -> Seq Scan on _abc_0 - -> Seq Scan on _abc_1 - -> Seq Scan on _abc_2 - -> Seq Scan on _abc_3 - -> Seq Scan on _abc_4 - -> Seq Scan on _abc_5 - -> Seq Scan on _abc_6 - -> Seq Scan on _abc_7 - -> Seq Scan on _abc_8 - -> Seq Scan on _abc_9 + -> Seq Scan on _abc_0 _abc_1 + -> Seq Scan on _abc_1 _abc_2 + -> Seq Scan on _abc_2 _abc_3 + -> Seq Scan on _abc_3 _abc_4 + -> Seq Scan on _abc_4 _abc_5 + -> Seq Scan on _abc_5 _abc_6 + -> Seq Scan on _abc_6 _abc_7 + -> Seq Scan on _abc_7 _abc_8 + -> Seq Scan on _abc_8 _abc_9 + -> Seq Scan on _abc_9 _abc_10 -> Seq Scan on _abc_add (15 rows) explain (costs off) select * from views.abc_union where id = 5; - QUERY PLAN ----------------------------------------- + QUERY PLAN +------------------------------------------- Unique -> Sort - Sort Key: _abc_8.id + Sort Key: _abc.id -> Append - -> Seq Scan on _abc_8 + -> Seq Scan on _abc_8 _abc Filter: (id = 5) -> Seq Scan on _abc_add Filter: (id = 5) (8 rows) explain (costs off) table views.abc_union_all; - QUERY PLAN ----------------------------- + QUERY PLAN +------------------------------- Append - -> Seq Scan on _abc_0 + -> Seq Scan on _abc_0 _abc -> Seq Scan on _abc_1 -> Seq Scan on _abc_2 -> Seq Scan on _abc_3 @@ -175,10 +175,10 @@ explain (costs off) table views.abc_union_all; (12 rows) explain (costs off) select * from views.abc_union_all where id = 5; - QUERY PLAN ----------------------------- + QUERY PLAN +------------------------------- Append - -> Seq Scan on _abc_8 + -> Seq Scan on _abc_8 _abc Filter: (id = 5) -> Seq Scan on _abc_add Filter: (id = 5) diff --git a/expected/pathman_views_4.out b/expected/pathman_views_4.out new file mode 100644 index 00000000..8fde5770 --- /dev/null +++ b/expected/pathman_views_4.out @@ -0,0 +1,191 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_views_2.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA views; +/* create a partitioned table */ +create table views._abc(id int4 not null); +select create_hash_partitions('views._abc', 'id', 10); + create_hash_partitions +------------------------ + 10 +(1 row) + +insert into views._abc select generate_series(1, 100); +/* create a dummy table */ +create table views._abc_add (like views._abc); +vacuum analyze; +/* create a facade view */ +create view views.abc as select * from views._abc; +create or replace function views.disable_modification() +returns trigger as +$$ +BEGIN + RAISE EXCEPTION '%', TG_OP; + RETURN NULL; +END; +$$ +language 'plpgsql'; +create trigger abc_mod_tr +instead of insert or update or delete +on views.abc for each row +execute procedure views.disable_modification(); +/* Test SELECT */ +explain (costs off) select * from views.abc; + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on _abc_0 _abc_1 + -> Seq Scan on _abc_1 _abc_2 + -> Seq Scan on _abc_2 _abc_3 + -> Seq Scan on _abc_3 _abc_4 + -> Seq Scan on _abc_4 _abc_5 + -> Seq Scan on _abc_5 _abc_6 + -> Seq Scan on _abc_6 _abc_7 + -> Seq Scan on _abc_7 _abc_8 + -> Seq Scan on _abc_8 _abc_9 + -> Seq Scan on _abc_9 _abc_10 +(11 rows) + +explain (costs off) select * from views.abc where id = 1; + QUERY PLAN +------------------------- + Seq Scan on _abc_0 _abc + Filter: (id = 1) +(2 rows) + +explain (costs off) select * from views.abc where id = 1 for update; + QUERY PLAN +------------------------------- + LockRows + -> Seq Scan on _abc_0 _abc + Filter: (id = 1) +(3 rows) + +select * from views.abc where id = 1 for update; + id +---- + 1 +(1 row) + +select count (*) from views.abc; + count +------- + 100 +(1 row) + +/* Test INSERT */ +explain (costs off) insert into views.abc values (1); + QUERY PLAN +--------------- + Insert on abc + -> Result +(2 rows) + +insert into views.abc values (1); +ERROR: INSERT +/* Test UPDATE */ +explain (costs off) update views.abc set id = 2 where id = 1 or id = 2; + QUERY PLAN +--------------------------------------------- + Update on abc + -> Result + -> Append + -> Seq Scan on _abc_0 _abc_1 + Filter: (id = 1) + -> Seq Scan on _abc_6 _abc_2 + Filter: (id = 2) +(7 rows) + +update views.abc set id = 2 where id = 1 or id = 2; +ERROR: UPDATE +/* Test DELETE */ +explain (costs off) delete from views.abc where id = 1 or id = 2; + QUERY PLAN +--------------------------------------------- + Delete on abc + -> Result + -> Append + -> Seq Scan on _abc_0 _abc_1 + Filter: (id = 1) + -> Seq Scan on _abc_6 _abc_2 + Filter: (id = 2) +(7 rows) + +delete from views.abc where id = 1 or id = 2; +ERROR: DELETE +/* Test SELECT with UNION */ +create view views.abc_union as table views._abc union table views._abc_add; +create view views.abc_union_all as table views._abc union all table views._abc_add; +explain (costs off) table views.abc_union; + QUERY PLAN +---------------------------------------------- + HashAggregate + Group Key: _abc.id + -> Append + -> Append + -> Seq Scan on _abc_0 _abc_1 + -> Seq Scan on _abc_1 _abc_2 + -> Seq Scan on _abc_2 _abc_3 + -> Seq Scan on _abc_3 _abc_4 + -> Seq Scan on _abc_4 _abc_5 + -> Seq Scan on _abc_5 _abc_6 + -> Seq Scan on _abc_6 _abc_7 + -> Seq Scan on _abc_7 _abc_8 + -> Seq Scan on _abc_8 _abc_9 + -> Seq Scan on _abc_9 _abc_10 + -> Seq Scan on _abc_add +(15 rows) + +explain (costs off) select * from views.abc_union where id = 5; + QUERY PLAN +------------------------------------- + HashAggregate + Group Key: _abc.id + -> Append + -> Seq Scan on _abc_8 _abc + Filter: (id = 5) + -> Seq Scan on _abc_add + Filter: (id = 5) +(7 rows) + +explain (costs off) table views.abc_union_all; + QUERY PLAN +------------------------------- + Append + -> Seq Scan on _abc_0 _abc + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 + -> Seq Scan on _abc_add +(12 rows) + +explain (costs off) select * from views.abc_union_all where id = 5; + QUERY PLAN +------------------------------- + Append + -> Seq Scan on _abc_8 _abc + Filter: (id = 5) + -> Seq Scan on _abc_add + Filter: (id = 5) +(5 rows) + +DROP TABLE views._abc CASCADE; +NOTICE: drop cascades to 13 other objects +DROP TABLE views._abc_add CASCADE; +DROP FUNCTION views.disable_modification(); +DROP SCHEMA views; +DROP EXTENSION pg_pathman; diff --git a/src/include/pathman.h b/src/include/pathman.h index b9acfe59..28f6ef30 100644 --- a/src/include/pathman.h +++ b/src/include/pathman.h @@ -118,7 +118,8 @@ Index append_child_relation(PlannerInfo *root, * Copied from PostgreSQL (prepunion.c) */ void make_inh_translation_list(Relation oldrelation, Relation newrelation, - Index newvarno, List **translated_vars); + Index newvarno, List **translated_vars, + AppendRelInfo *appinfo); Bitmapset *translate_col_privs(const Bitmapset *parent_privs, List *translated_vars); diff --git a/src/partition_creation.c b/src/partition_creation.c index b42372b3..b2d94794 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -995,7 +995,7 @@ postprocess_child_table_and_atts(Oid parent_relid, Oid partition_relid) parent_rel = heap_open_compat(parent_relid, NoLock); partition_rel = heap_open_compat(partition_relid, NoLock); - make_inh_translation_list(parent_rel, partition_rel, 0, &translated_vars); + make_inh_translation_list(parent_rel, partition_rel, 0, &translated_vars, NULL); heap_close_compat(parent_rel, NoLock); heap_close_compat(partition_rel, NoLock); diff --git a/src/partition_filter.c b/src/partition_filter.c index 0ef84e61..3a72a70d 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -307,7 +307,7 @@ scan_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid) child_rel = heap_open_compat(partid, NoLock); /* Build Var translation list for 'inserted_cols' */ - make_inh_translation_list(base_rel, child_rel, 0, &translated_vars); + make_inh_translation_list(base_rel, child_rel, 0, &translated_vars, NULL); /* Create RangeTblEntry for partition */ child_rte = makeNode(RangeTblEntry); diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 3b99a7e7..0f150bba 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -507,6 +507,11 @@ append_child_relation(PlannerInfo *root, ListCell *lc1, *lc2; LOCKMODE lockmode; +#if PG_VERSION_NUM >= 130000 /* see commit 55a1954d */ + TupleDesc child_tupdesc; + List *parent_colnames; + List *child_colnames; +#endif /* Choose a correct lock mode */ if (parent_rti == root->parse->resultRelation) @@ -538,7 +543,12 @@ append_child_relation(PlannerInfo *root, child_relation = heap_open_compat(child_oid, NoLock); /* Create RangeTblEntry for child relation */ +#if PG_VERSION_NUM >= 130000 /* see commit 55a1954d */ + child_rte = makeNode(RangeTblEntry); + memcpy(child_rte, parent_rte, sizeof(RangeTblEntry)); +#else child_rte = copyObject(parent_rte); +#endif child_rte->relid = child_oid; child_rte->relkind = child_relation->rd_rel->relkind; child_rte->requiredPerms = 0; /* perform all checks on parent */ @@ -560,7 +570,56 @@ append_child_relation(PlannerInfo *root, appinfo->child_reltype = RelationGetDescr(child_relation)->tdtypeid; make_inh_translation_list(parent_relation, child_relation, child_rti, - &appinfo->translated_vars); + &appinfo->translated_vars, appinfo); + +#if PG_VERSION_NUM >= 130000 /* see commit 55a1954d */ + /* tablesample is probably null, but copy it */ + child_rte->tablesample = copyObject(parent_rte->tablesample); + + /* + * Construct an alias clause for the child, which we can also use as eref. + * This is important so that EXPLAIN will print the right column aliases + * for child-table columns. (Since ruleutils.c doesn't have any easy way + * to reassociate parent and child columns, we must get the child column + * aliases right to start with. Note that setting childrte->alias forces + * ruleutils.c to use these column names, which it otherwise would not.) + */ + child_tupdesc = RelationGetDescr(child_relation); + parent_colnames = parent_rte->eref->colnames; + child_colnames = NIL; + for (int cattno = 0; cattno < child_tupdesc->natts; cattno++) + { + Form_pg_attribute att = TupleDescAttr(child_tupdesc, cattno); + const char *attname; + + if (att->attisdropped) + { + /* Always insert an empty string for a dropped column */ + attname = ""; + } + else if (appinfo->parent_colnos[cattno] > 0 && + appinfo->parent_colnos[cattno] <= list_length(parent_colnames)) + { + /* Duplicate the query-assigned name for the parent column */ + attname = strVal(list_nth(parent_colnames, + appinfo->parent_colnos[cattno] - 1)); + } + else + { + /* New column, just use its real name */ + attname = NameStr(att->attname); + } + child_colnames = lappend(child_colnames, makeString(pstrdup(attname))); + } + + /* + * We just duplicate the parent's table alias name for each child. If the + * plan gets printed, ruleutils.c has to sort out unique table aliases to + * use, which it can handle. + */ + child_rte->alias = child_rte->eref = makeAlias(parent_rte->eref->aliasname, + child_colnames); +#endif /* Now append 'appinfo' to 'root->append_rel_list' */ root->append_rel_list = lappend(root->append_rel_list, appinfo); @@ -627,6 +686,14 @@ append_child_relation(PlannerInfo *root, child_rte->updatedCols = translate_col_privs(parent_rte->updatedCols, appinfo->translated_vars); } +#if PG_VERSION_NUM >= 130000 /* see commit 55a1954d */ + else + { + child_rte->selectedCols = bms_copy(parent_rte->selectedCols); + child_rte->insertedCols = bms_copy(parent_rte->insertedCols); + child_rte->updatedCols = bms_copy(parent_rte->updatedCols); + } +#endif /* Here and below we assume that parent RelOptInfo exists */ AssertState(parent_rel); @@ -1945,7 +2012,8 @@ translate_col_privs(const Bitmapset *parent_privs, */ void make_inh_translation_list(Relation oldrelation, Relation newrelation, - Index newvarno, List **translated_vars) + Index newvarno, List **translated_vars, + AppendRelInfo *appinfo) { List *vars = NIL; TupleDesc old_tupdesc = RelationGetDescr(oldrelation); @@ -1953,6 +2021,17 @@ make_inh_translation_list(Relation oldrelation, Relation newrelation, int oldnatts = old_tupdesc->natts; int newnatts = new_tupdesc->natts; int old_attno; +#if PG_VERSION_NUM >= 130000 /* see commit ce76c0ba */ + AttrNumber *pcolnos = NULL; + + if (appinfo) + { + /* Initialize reverse-translation array with all entries zero */ + appinfo->num_child_cols = newnatts; + appinfo->parent_colnos = pcolnos = + (AttrNumber *) palloc0(newnatts * sizeof(AttrNumber)); + } +#endif for (old_attno = 0; old_attno < oldnatts; old_attno++) { @@ -1987,6 +2066,10 @@ make_inh_translation_list(Relation oldrelation, Relation newrelation, atttypmod, attcollation, 0)); +#if PG_VERSION_NUM >= 130000 + if (pcolnos) + pcolnos[old_attno] = old_attno + 1; +#endif continue; } @@ -2044,6 +2127,10 @@ make_inh_translation_list(Relation oldrelation, Relation newrelation, atttypmod, attcollation, 0)); +#if PG_VERSION_NUM >= 130000 + if (pcolnos) + pcolnos[new_attno] = old_attno + 1; +#endif } *translated_vars = vars; diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index b321d9e6..027fd4e1 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -609,7 +609,7 @@ handle_modification_query(Query *parse, transform_query_cxt *context) child_rel = heap_open_compat(child, NoLock); parent_rel = heap_open_compat(parent, NoLock); - make_inh_translation_list(parent_rel, child_rel, 0, &translated_vars); + make_inh_translation_list(parent_rel, child_rel, 0, &translated_vars, NULL); /* Perform some additional adjustments */ if (!inh_translation_list_is_trivial(translated_vars)) From 2b286c48f7e43f8e637a4828b1b809546368db3f Mon Sep 17 00:00:00 2001 From: Marina Polyakova Date: Wed, 14 Dec 2022 09:07:16 +0300 Subject: [PATCH 484/528] Remove AssertArg and AssertState See the commit b1099eca8f38ff5cfaf0901bb91cb6a22f909bc6 (Remove AssertArg and AssertState) in PostgreSQL 16. --- src/compat/pg_compat.c | 2 +- src/init.c | 12 ++++++------ src/nodes_common.c | 2 +- src/partition_creation.c | 2 +- src/pg_pathman.c | 2 +- src/relation_info.c | 2 +- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/src/compat/pg_compat.c b/src/compat/pg_compat.c index 7afdd99a..216fd382 100644 --- a/src/compat/pg_compat.c +++ b/src/compat/pg_compat.c @@ -234,7 +234,7 @@ McxtStatsInternal(MemoryContext context, int level, MemoryContextCounters local_totals; MemoryContext child; - AssertArg(MemoryContextIsValid(context)); + Assert(MemoryContextIsValid(context)); /* Examine the context itself */ #if PG_VERSION_NUM >= 140000 diff --git a/src/init.c b/src/init.c index 99b79f55..9f72bcb7 100644 --- a/src/init.c +++ b/src/init.c @@ -569,7 +569,7 @@ find_inheritance_children_array(Oid parent_relid, char * build_check_constraint_name_relid_internal(Oid relid) { - AssertArg(OidIsValid(relid)); + Assert(OidIsValid(relid)); return build_check_constraint_name_relname_internal(get_rel_name(relid)); } @@ -580,7 +580,7 @@ build_check_constraint_name_relid_internal(Oid relid) char * build_check_constraint_name_relname_internal(const char *relname) { - AssertArg(relname != NULL); + Assert(relname != NULL); return psprintf("pathman_%s_check", relname); } @@ -591,7 +591,7 @@ build_check_constraint_name_relname_internal(const char *relname) char * build_sequence_name_relid_internal(Oid relid) { - AssertArg(OidIsValid(relid)); + Assert(OidIsValid(relid)); return build_sequence_name_relname_internal(get_rel_name(relid)); } @@ -602,7 +602,7 @@ build_sequence_name_relid_internal(Oid relid) char * build_sequence_name_relname_internal(const char *relname) { - AssertArg(relname != NULL); + Assert(relname != NULL); return psprintf("%s_seq", relname); } @@ -613,7 +613,7 @@ build_sequence_name_relname_internal(const char *relname) char * build_update_trigger_name_internal(Oid relid) { - AssertArg(OidIsValid(relid)); + Assert(OidIsValid(relid)); return psprintf("%s_upd_trig", get_rel_name(relid)); } @@ -624,7 +624,7 @@ build_update_trigger_name_internal(Oid relid) char * build_update_trigger_func_name_internal(Oid relid) { - AssertArg(OidIsValid(relid)); + Assert(OidIsValid(relid)); return psprintf("%s_upd_trig_func", get_rel_name(relid)); } diff --git a/src/nodes_common.c b/src/nodes_common.c index b6bf24cb..a6fecb51 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -59,7 +59,7 @@ transform_plans_into_states(RuntimeAppendState *scan_state, ChildScanCommon child; PlanState *ps; - AssertArg(selected_plans); + Assert(selected_plans); child = selected_plans[i]; /* Create new node since this plan hasn't been used yet */ diff --git a/src/partition_creation.c b/src/partition_creation.c index b2d94794..eb438b91 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -2035,7 +2035,7 @@ build_partitioning_expression(Oid parent_relid, if (columns) { /* Column list should be empty */ - AssertArg(*columns == NIL); + Assert(*columns == NIL); extract_column_names(expr, columns); } diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 0f150bba..34600249 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -696,7 +696,7 @@ append_child_relation(PlannerInfo *root, #endif /* Here and below we assume that parent RelOptInfo exists */ - AssertState(parent_rel); + Assert(parent_rel); /* Adjust join quals for this child */ child_rel->joininfo = (List *) adjust_appendrel_attrs_compat(root, diff --git a/src/relation_info.c b/src/relation_info.c index 90e30d0e..e3ba540c 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -304,7 +304,7 @@ invalidate_psin_entry(PartStatusInfo *psin) void close_pathman_relation_info(PartRelationInfo *prel) { - AssertArg(prel); + Assert(prel); (void) resowner_prel_del(prel); } From e32efa8bd6bc6159b120326c5128dd7e1419e03b Mon Sep 17 00:00:00 2001 From: Marina Polyakova Date: Wed, 14 Dec 2022 09:11:03 +0300 Subject: [PATCH 485/528] Fix pg_pathman_enable_partition_router initial value Thus it is equal to its boot value. See the commit a73952b795632b2cf5acada8476e7cf75857e9be (Add check on initial and boot values when loading GUCs) in PostgreSQL 16. --- src/partition_router.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/partition_router.c b/src/partition_router.c index eefc44bf..2e982299 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -63,7 +63,7 @@ -bool pg_pathman_enable_partition_router = true; +bool pg_pathman_enable_partition_router = false; CustomScanMethods partition_router_plan_methods; CustomExecMethods partition_router_exec_methods; From 364d200e647eb41c5b686a87b82c5a86d7a58748 Mon Sep 17 00:00:00 2001 From: Marina Polyakova Date: Wed, 14 Dec 2022 09:28:54 +0300 Subject: [PATCH 486/528] Avoid making commutatively-duplicate clauses in EquivalenceClasses. See the commit a5fc46414deb7cbcd4cec1275efac69b9ac10500 (Avoid making commutatively-duplicate clauses in EquivalenceClasses.) in PostgreSQL 16. --- expected/pathman_join_clause_4.out | 161 +++++++++ expected/pathman_lateral_4.out | 128 ++++++++ expected/pathman_only_3.out | 281 ++++++++++++++++ expected/pathman_runtime_nodes_1.out | 468 +++++++++++++++++++++++++++ 4 files changed, 1038 insertions(+) create mode 100644 expected/pathman_join_clause_4.out create mode 100644 expected/pathman_lateral_4.out create mode 100644 expected/pathman_only_3.out create mode 100644 expected/pathman_runtime_nodes_1.out diff --git a/expected/pathman_join_clause_4.out b/expected/pathman_join_clause_4.out new file mode 100644 index 00000000..17791fb9 --- /dev/null +++ b/expected/pathman_join_clause_4.out @@ -0,0 +1,161 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +/* + * Test push down a join clause into child nodes of append + */ +/* create test tables */ +CREATE TABLE test.fk ( + id1 INT NOT NULL, + id2 INT NOT NULL, + start_key INT, + end_key INT, + PRIMARY KEY (id1, id2)); +CREATE TABLE test.mytbl ( + id1 INT NOT NULL, + id2 INT NOT NULL, + key INT NOT NULL, + CONSTRAINT fk_fk FOREIGN KEY (id1, id2) REFERENCES test.fk(id1, id2), + PRIMARY KEY (id1, key)); +SELECT pathman.create_hash_partitions('test.mytbl', 'id1', 8); + create_hash_partitions +------------------------ + 8 +(1 row) + +/* ...fill out with test data */ +INSERT INTO test.fk VALUES (1, 1); +INSERT INTO test.mytbl VALUES (1, 1, 5), (1, 1, 6); +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; +/* run test queries */ +EXPLAIN (COSTS OFF) /* test plan */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Nested Loop + -> Seq Scan on fk + -> Custom Scan (RuntimeAppend) + Prune by: (m.id1 = fk.id1) + -> Seq Scan on mytbl_0 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_1 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_2 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_3 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_4 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_5 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_6 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_7 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) +(20 rows) + +/* test joint data */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + tableoid | id1 | id2 | key | start_key | end_key +--------------+-----+-----+-----+-----------+--------- + test.mytbl_6 | 1 | 1 | 5 | | +(1 row) + +/* + * Test case by @dimarick + */ +CREATE TABLE test.parent ( + id SERIAL NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child_nopart ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +INSERT INTO test.parent (owner_id) VALUES (1), (2), (3), (3); +INSERT INTO test.child (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +INSERT INTO test.child_nopart (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +SELECT pathman.create_hash_partitions('test.child', 'owner_id', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; +/* Query #1 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +----------------------------------------------------------------------------------------------------- + Nested Loop Left Join + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Custom Scan (RuntimeAppend) + Prune by: ((child.owner_id = 3) AND (child.owner_id = parent.owner_id)) + -> Seq Scan on child_1 child + Filter: ((owner_id = 3) AND (owner_id = parent.owner_id) AND (parent_id = parent.id)) +(7 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + +/* Query #2 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +---------------------------------------------------------------------- + Nested Loop Left Join + Join Filter: (child.parent_id = parent.id) + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Seq Scan on child_1 child + Filter: (owner_id = 3) +(6 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + +DROP TABLE test.child CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE test.child_nopart CASCADE; +DROP TABLE test.mytbl CASCADE; +NOTICE: drop cascades to 8 other objects +DROP TABLE test.fk CASCADE; +DROP TABLE test.parent CASCADE; +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_lateral_4.out b/expected/pathman_lateral_4.out new file mode 100644 index 00000000..d35da608 --- /dev/null +++ b/expected/pathman_lateral_4.out @@ -0,0 +1,128 @@ +/* + * Sometimes join selectivity improvements patches in pgpro force nested loop + * members swap -- in pathman_lateral_1.out and pathman_lateral_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_lateral; +/* create table partitioned by HASH */ +create table test_lateral.data(id int8 not null); +select create_hash_partitions('test_lateral.data', 'id', 10); + create_hash_partitions +------------------------ + 10 +(1 row) + +insert into test_lateral.data select generate_series(1, 10000); +VACUUM ANALYZE; +set enable_hashjoin = off; +set enable_mergejoin = off; +/* all credits go to Ivan Frolkov */ +explain (costs off) +select * from + test_lateral.data as t1, + lateral(select * from test_lateral.data as t2 where t2.id > t1.id) t2, + lateral(select * from test_lateral.data as t3 where t3.id = t2.id + t1.id) t3 + where t1.id between 1 and 100 and + t2.id between 2 and 299 and + t1.id > t2.id and + exists(select * from test_lateral.data t + where t1.id = t2.id and t.id = t3.id); + QUERY PLAN +-------------------------------------------------------------------------------------------- + Nested Loop + -> Nested Loop + Join Filter: ((t2.id + t1.id) = t.id) + -> HashAggregate + Group Key: t.id + -> Append + -> Seq Scan on data_0 t_1 + -> Seq Scan on data_1 t_2 + -> Seq Scan on data_2 t_3 + -> Seq Scan on data_3 t_4 + -> Seq Scan on data_4 t_5 + -> Seq Scan on data_5 t_6 + -> Seq Scan on data_6 t_7 + -> Seq Scan on data_7 t_8 + -> Seq Scan on data_8 t_9 + -> Seq Scan on data_9 t_10 + -> Materialize + -> Nested Loop + Join Filter: ((t2.id > t1.id) AND (t1.id > t2.id) AND (t1.id = t2.id)) + -> Append + -> Seq Scan on data_0 t2_1 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_1 t2_2 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_2 t2_3 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_3 t2_4 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_4 t2_5 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_5 t2_6 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_6 t2_7 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_7 t2_8 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_8 t2_9 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_9 t2_10 + Filter: ((id >= 2) AND (id <= 299)) + -> Materialize + -> Append + -> Seq Scan on data_0 t1_1 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_1 t1_2 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_2 t1_3 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_3 t1_4 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_4 t1_5 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_5 t1_6 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_6 t1_7 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_7 t1_8 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_8 t1_9 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_9 t1_10 + Filter: ((id >= 1) AND (id <= 100)) + -> Custom Scan (RuntimeAppend) + Prune by: (t3.id = t.id) + -> Seq Scan on data_0 t3 + Filter: (t.id = id) + -> Seq Scan on data_1 t3 + Filter: (t.id = id) + -> Seq Scan on data_2 t3 + Filter: (t.id = id) + -> Seq Scan on data_3 t3 + Filter: (t.id = id) + -> Seq Scan on data_4 t3 + Filter: (t.id = id) + -> Seq Scan on data_5 t3 + Filter: (t.id = id) + -> Seq Scan on data_6 t3 + Filter: (t.id = id) + -> Seq Scan on data_7 t3 + Filter: (t.id = id) + -> Seq Scan on data_8 t3 + Filter: (t.id = id) + -> Seq Scan on data_9 t3 + Filter: (t.id = id) +(84 rows) + +set enable_hashjoin = on; +set enable_mergejoin = on; +DROP TABLE test_lateral.data CASCADE; +NOTICE: drop cascades to 10 other objects +DROP SCHEMA test_lateral; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_only_3.out b/expected/pathman_only_3.out new file mode 100644 index 00000000..2f2fcc75 --- /dev/null +++ b/expected/pathman_only_3.out @@ -0,0 +1,281 @@ +/* + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- + * + * Since 12 (608b167f9f), CTEs which are scanned once are no longer an + * optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_only_1.out instead. + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_only; +/* Test special case: ONLY statement with not-ONLY for partitioned table */ +CREATE TABLE test_only.from_only_test(val INT NOT NULL); +INSERT INTO test_only.from_only_test SELECT generate_series(1, 20); +SELECT create_range_partitions('test_only.from_only_test', 'val', 1, 2); + create_range_partitions +------------------------- + 10 +(1 row) + +VACUUM ANALYZE; +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test; + QUERY PLAN +------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Seq Scan on from_only_test + -> Append + -> Seq Scan on from_only_test_1 from_only_test_2 + -> Seq Scan on from_only_test_2 from_only_test_3 + -> Seq Scan on from_only_test_3 from_only_test_4 + -> Seq Scan on from_only_test_4 from_only_test_5 + -> Seq Scan on from_only_test_5 from_only_test_6 + -> Seq Scan on from_only_test_6 from_only_test_7 + -> Seq Scan on from_only_test_7 from_only_test_8 + -> Seq Scan on from_only_test_8 from_only_test_9 + -> Seq Scan on from_only_test_9 from_only_test_10 + -> Seq Scan on from_only_test_10 from_only_test_11 +(15 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +UNION SELECT * FROM ONLY test_only.from_only_test; + QUERY PLAN +---------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Seq Scan on from_only_test from_only_test_11 +(15 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test +UNION SELECT * FROM ONLY test_only.from_only_test; + QUERY PLAN +------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Append + -> Seq Scan on from_only_test_1 from_only_test_12 + -> Seq Scan on from_only_test_2 from_only_test_13 + -> Seq Scan on from_only_test_3 from_only_test_14 + -> Seq Scan on from_only_test_4 from_only_test_15 + -> Seq Scan on from_only_test_5 from_only_test_16 + -> Seq Scan on from_only_test_6 from_only_test_17 + -> Seq Scan on from_only_test_7 from_only_test_18 + -> Seq Scan on from_only_test_8 from_only_test_19 + -> Seq Scan on from_only_test_9 from_only_test_20 + -> Seq Scan on from_only_test_10 from_only_test_21 + -> Seq Scan on from_only_test from_only_test_22 +(26 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test; + QUERY PLAN +------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Seq Scan on from_only_test + -> Append + -> Seq Scan on from_only_test_1 from_only_test_2 + -> Seq Scan on from_only_test_2 from_only_test_3 + -> Seq Scan on from_only_test_3 from_only_test_4 + -> Seq Scan on from_only_test_4 from_only_test_5 + -> Seq Scan on from_only_test_5 from_only_test_6 + -> Seq Scan on from_only_test_6 from_only_test_7 + -> Seq Scan on from_only_test_7 from_only_test_8 + -> Seq Scan on from_only_test_8 from_only_test_9 + -> Seq Scan on from_only_test_9 from_only_test_10 + -> Seq Scan on from_only_test_10 from_only_test_11 + -> Append + -> Seq Scan on from_only_test_1 from_only_test_13 + -> Seq Scan on from_only_test_2 from_only_test_14 + -> Seq Scan on from_only_test_3 from_only_test_15 + -> Seq Scan on from_only_test_4 from_only_test_16 + -> Seq Scan on from_only_test_5 from_only_test_17 + -> Seq Scan on from_only_test_6 from_only_test_18 + -> Seq Scan on from_only_test_7 from_only_test_19 + -> Seq Scan on from_only_test_8 from_only_test_20 + -> Seq Scan on from_only_test_9 from_only_test_21 + -> Seq Scan on from_only_test_10 from_only_test_22 +(26 rows) + +/* not ok, ONLY|non-ONLY in one query (this is not the case for PgPro) */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test a +JOIN ONLY test_only.from_only_test b USING(val); + QUERY PLAN +--------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test b + -> Custom Scan (RuntimeAppend) + Prune by: (a.val = b.val) + -> Seq Scan on from_only_test_1 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_2 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_3 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_4 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_5 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_6 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_7 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_8 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_9 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_10 a + Filter: (b.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM test_only.from_only_test), + q2 AS (SELECT * FROM ONLY test_only.from_only_test) +SELECT * FROM q1 JOIN q2 USING(val); + QUERY PLAN +--------------------------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test from_only_test_1 + -> Custom Scan (RuntimeAppend) + Prune by: (from_only_test.val = from_only_test_1.val) + -> Seq Scan on from_only_test_1 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (from_only_test_1.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM ONLY test_only.from_only_test) +SELECT * FROM test_only.from_only_test JOIN q1 USING(val); + QUERY PLAN +--------------------------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test from_only_test_1 + -> Custom Scan (RuntimeAppend) + Prune by: (from_only_test.val = from_only_test_1.val) + -> Seq Scan on from_only_test_1 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (from_only_test_1.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +WHERE val = (SELECT val FROM ONLY test_only.from_only_test + ORDER BY val ASC + LIMIT 1); + QUERY PLAN +----------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (from_only_test.val = $0) + InitPlan 1 (returns $0) + -> Limit + -> Sort + Sort Key: from_only_test_1.val + -> Seq Scan on from_only_test from_only_test_1 + -> Seq Scan on from_only_test_1 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (val = $0) +(27 rows) + +DROP TABLE test_only.from_only_test CASCADE; +NOTICE: drop cascades to 11 other objects +DROP SCHEMA test_only; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_runtime_nodes_1.out b/expected/pathman_runtime_nodes_1.out new file mode 100644 index 00000000..65382269 --- /dev/null +++ b/expected/pathman_runtime_nodes_1.out @@ -0,0 +1,468 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +/* + * Test RuntimeAppend + */ +create or replace function test.pathman_assert(smt bool, error_msg text) returns text as $$ +begin + if not smt then + raise exception '%', error_msg; + end if; + + return 'ok'; +end; +$$ language plpgsql; +create or replace function test.pathman_equal(a text, b text, error_msg text) returns text as $$ +begin + if a != b then + raise exception '''%'' is not equal to ''%'', %', a, b, error_msg; + end if; + + return 'equal'; +end; +$$ language plpgsql; +create or replace function test.pathman_test(query text) returns jsonb as $$ +declare + plan jsonb; +begin + execute 'explain (analyze, format json)' || query into plan; + + return plan; +end; +$$ language plpgsql; +create or replace function test.pathman_test_1() returns text as $$ +declare + plan jsonb; + num int; +begin + plan = test.pathman_test('select * from test.runtime_test_1 where id = (select * from test.run_values limit 1)'); + + perform test.pathman_equal((plan->0->'Plan'->'Node Type')::text, + '"Custom Scan"', + 'wrong plan type'); + + perform test.pathman_equal((plan->0->'Plan'->'Custom Plan Provider')::text, + '"RuntimeAppend"', + 'wrong plan provider'); + + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Relation Name')::text, + format('"runtime_test_1_%s"', pathman.get_hash_part_idx(hashint4(1), 6)), + 'wrong partition'); + + select count(*) from jsonb_array_elements_text(plan->0->'Plan'->'Plans') into num; + perform test.pathman_equal(num::text, '2', 'expected 2 child plans for custom scan'); + + return 'ok'; +end; +$$ language plpgsql +set pg_pathman.enable = true +set enable_mergejoin = off +set enable_hashjoin = off; +create or replace function test.pathman_test_2() returns text as $$ +declare + plan jsonb; + num int; + c text; +begin + plan = test.pathman_test('select * from test.runtime_test_1 where id = any (select * from test.run_values limit 4)'); + + perform test.pathman_equal((plan->0->'Plan'->'Node Type')::text, + '"Nested Loop"', + 'wrong plan type'); + + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Node Type')::text, + '"Custom Scan"', + 'wrong plan type'); + + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Custom Plan Provider')::text, + '"RuntimeAppend"', + 'wrong plan provider'); + + select count(*) from jsonb_array_elements_text(plan->0->'Plan'->'Plans'->1->'Plans') into num; + perform test.pathman_equal(num::text, '4', 'expected 4 child plans for custom scan'); + + execute 'select string_agg(y.z, '','') from + (select (x->''Relation Name'')::text as z from + jsonb_array_elements($1->0->''Plan''->''Plans''->1->''Plans'') x + order by x->''Relation Name'') y' + into c using plan; + perform test.pathman_equal(c, '"runtime_test_1_2","runtime_test_1_3","runtime_test_1_4","runtime_test_1_5"', + 'wrong partitions'); + + for i in 0..3 loop + num = plan->0->'Plan'->'Plans'->1->'Plans'->i->'Actual Loops'; + perform test.pathman_equal(num::text, '1', 'expected 1 loop'); + end loop; + + return 'ok'; +end; +$$ language plpgsql +set pg_pathman.enable = true +set enable_mergejoin = off +set enable_hashjoin = off; +create or replace function test.pathman_test_3() returns text as $$ +declare + plan jsonb; + num int; +begin + plan = test.pathman_test('select * from test.runtime_test_1 a join test.run_values b on a.id = b.val'); + + perform test.pathman_equal((plan->0->'Plan'->'Node Type')::text, + '"Nested Loop"', + 'wrong plan type'); + + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Node Type')::text, + '"Custom Scan"', + 'wrong plan type'); + + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Custom Plan Provider')::text, + '"RuntimeAppend"', + 'wrong plan provider'); + + select count(*) from jsonb_array_elements_text(plan->0->'Plan'->'Plans'->1->'Plans') into num; + perform test.pathman_equal(num::text, '6', 'expected 6 child plans for custom scan'); + + for i in 0..5 loop + num = plan->0->'Plan'->'Plans'->1->'Plans'->i->'Actual Loops'; + perform test.pathman_assert(num > 0 and num <= 1718, 'expected no more than 1718 loops'); + end loop; + + return 'ok'; +end; +$$ language plpgsql +set pg_pathman.enable = true +set enable_mergejoin = off +set enable_hashjoin = off; +create or replace function test.pathman_test_4() returns text as $$ +declare + plan jsonb; + num int; +begin + plan = test.pathman_test('select * from test.category c, lateral' || + '(select * from test.runtime_test_2 g where g.category_id = c.id order by rating limit 4) as tg'); + + perform test.pathman_equal((plan->0->'Plan'->'Node Type')::text, + '"Nested Loop"', + 'wrong plan type'); + + /* Limit -> Custom Scan */ + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->0->'Node Type')::text, + '"Custom Scan"', + 'wrong plan type'); + + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->0->'Custom Plan Provider')::text, + '"RuntimeMergeAppend"', + 'wrong plan provider'); + + select count(*) from jsonb_array_elements_text(plan->0->'Plan'->'Plans'->1->'Plans'->0->'Plans') into num; + perform test.pathman_equal(num::text, '4', 'expected 4 child plans for custom scan'); + + for i in 0..3 loop + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Plans'->0->'Plans'->i->'Relation Name')::text, + format('"runtime_test_2_%s"', pathman.get_hash_part_idx(hashint4(i + 1), 6)), + 'wrong partition'); + + num = plan->0->'Plan'->'Plans'->1->'Plans'->0->'Plans'->i->'Actual Loops'; + perform test.pathman_assert(num = 1, 'expected no more than 1 loops'); + end loop; + + return 'ok'; +end; +$$ language plpgsql +set pg_pathman.enable = true +set enable_mergejoin = off +set enable_hashjoin = off; +create or replace function test.pathman_test_5() returns text as $$ +declare + res record; +begin + select + from test.runtime_test_3 + where id = (select * from test.vals order by val limit 1) + limit 1 + into res; /* test empty tlist */ + + + select id * 2, id, 17 + from test.runtime_test_3 + where id = (select * from test.vals order by val limit 1) + limit 1 + into res; /* test computations */ + + + select test.vals.* from test.vals, lateral (select from test.runtime_test_3 + where id = test.vals.val) as q + into res; /* test lateral */ + + + select id, generate_series(1, 2) gen, val + from test.runtime_test_3 + where id = (select * from test.vals order by val limit 1) + order by id, gen, val + offset 1 limit 1 + into res; /* without IndexOnlyScan */ + + perform test.pathman_equal(res.id::text, '1', 'id is incorrect (t2)'); + perform test.pathman_equal(res.gen::text, '2', 'gen is incorrect (t2)'); + perform test.pathman_equal(res.val::text, 'k = 1', 'val is incorrect (t2)'); + + + select id + from test.runtime_test_3 + where id = any (select * from test.vals order by val limit 5) + order by id + offset 3 limit 1 + into res; /* with IndexOnlyScan */ + + perform test.pathman_equal(res.id::text, '4', 'id is incorrect (t3)'); + + + select v.val v1, generate_series(2, 2) gen, t.val v2 + from test.runtime_test_3 t join test.vals v on id = v.val + order by v1, gen, v2 + limit 1 + into res; + + perform test.pathman_equal(res.v1::text, '1', 'v1 is incorrect (t4)'); + perform test.pathman_equal(res.gen::text, '2', 'gen is incorrect (t4)'); + perform test.pathman_equal(res.v2::text, 'k = 1', 'v2 is incorrect (t4)'); + + return 'ok'; +end; +$$ language plpgsql +set pg_pathman.enable = true +set enable_hashjoin = off +set enable_mergejoin = off; +create table test.run_values as select generate_series(1, 10000) val; +create table test.runtime_test_1(id serial primary key, val real); +insert into test.runtime_test_1 select generate_series(1, 10000), random(); +select pathman.create_hash_partitions('test.runtime_test_1', 'id', 6); + create_hash_partitions +------------------------ + 6 +(1 row) + +create table test.category as (select id, 'cat' || id::text as name from generate_series(1, 4) id); +create table test.runtime_test_2 (id serial, category_id int not null, name text, rating real); +insert into test.runtime_test_2 (select id, (id % 6) + 1 as category_id, 'good' || id::text as name, random() as rating from generate_series(1, 100000) id); +create index on test.runtime_test_2 (category_id, rating); +select pathman.create_hash_partitions('test.runtime_test_2', 'category_id', 6); + create_hash_partitions +------------------------ + 6 +(1 row) + +create table test.vals as (select generate_series(1, 10000) as val); +create table test.runtime_test_3(val text, id serial not null); +insert into test.runtime_test_3(id, val) select * from generate_series(1, 10000) k, format('k = %s', k); +select pathman.create_hash_partitions('test.runtime_test_3', 'id', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +create index on test.runtime_test_3 (id); +create index on test.runtime_test_3_0 (id); +create table test.runtime_test_4(val text, id int not null); +insert into test.runtime_test_4(id, val) select * from generate_series(1, 10000) k, md5(k::text); +select pathman.create_range_partitions('test.runtime_test_4', 'id', 1, 2000); + create_range_partitions +------------------------- + 5 +(1 row) + +VACUUM ANALYZE; +set pg_pathman.enable_runtimeappend = on; +set pg_pathman.enable_runtimemergeappend = on; +select test.pathman_test_1(); /* RuntimeAppend (select ... where id = (subquery)) */ + pathman_test_1 +---------------- + ok +(1 row) + +select test.pathman_test_2(); /* RuntimeAppend (select ... where id = any(subquery)) */ + pathman_test_2 +---------------- + ok +(1 row) + +select test.pathman_test_3(); /* RuntimeAppend (a join b on a.id = b.val) */ + pathman_test_3 +---------------- + ok +(1 row) + +select test.pathman_test_4(); /* RuntimeMergeAppend (lateral) */ + pathman_test_4 +---------------- + ok +(1 row) + +select test.pathman_test_5(); /* projection tests for RuntimeXXX nodes */ + pathman_test_5 +---------------- + ok +(1 row) + +/* RuntimeAppend (join, enabled parent) */ +select pathman.set_enable_parent('test.runtime_test_1', true); + set_enable_parent +------------------- + +(1 row) + +explain (costs off) +select from test.runtime_test_1 as t1 +join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; + QUERY PLAN +-------------------------------------------------------------------------------- + Nested Loop + -> Limit + -> Seq Scan on run_values + -> Custom Scan (RuntimeAppend) + Prune by: (t1.id = run_values.val) + -> Seq Scan on runtime_test_1 t1 + Filter: (id = run_values.val) + -> Index Only Scan using runtime_test_1_0_pkey on runtime_test_1_0 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_1_pkey on runtime_test_1_1 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_2_pkey on runtime_test_1_2 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_3_pkey on runtime_test_1_3 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_4_pkey on runtime_test_1_4 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_5_pkey on runtime_test_1_5 t1 + Index Cond: (id = run_values.val) +(19 rows) + +select from test.runtime_test_1 as t1 +join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; +-- +(4 rows) + +/* RuntimeAppend (join, disabled parent) */ +select pathman.set_enable_parent('test.runtime_test_1', false); + set_enable_parent +------------------- + +(1 row) + +explain (costs off) +select from test.runtime_test_1 as t1 +join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; + QUERY PLAN +-------------------------------------------------------------------------------- + Nested Loop + -> Limit + -> Seq Scan on run_values + -> Custom Scan (RuntimeAppend) + Prune by: (t1.id = run_values.val) + -> Index Only Scan using runtime_test_1_0_pkey on runtime_test_1_0 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_1_pkey on runtime_test_1_1 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_2_pkey on runtime_test_1_2 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_3_pkey on runtime_test_1_3 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_4_pkey on runtime_test_1_4 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_5_pkey on runtime_test_1_5 t1 + Index Cond: (id = run_values.val) +(17 rows) + +select from test.runtime_test_1 as t1 +join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; +-- +(4 rows) + +/* RuntimeAppend (join, additional projections) */ +select generate_series(1, 2) from test.runtime_test_1 as t1 +join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; + generate_series +----------------- + 1 + 2 + 1 + 2 + 1 + 2 + 1 + 2 +(8 rows) + +/* RuntimeAppend (select ... where id = ANY (subquery), missing partitions) */ +select count(*) = 0 from pathman.pathman_partition_list +where parent = 'test.runtime_test_4'::regclass and coalesce(range_min::int, 1) < 0; + ?column? +---------- + t +(1 row) + +/* RuntimeAppend (check that dropped columns don't break tlists) */ +create table test.dropped_cols(val int4 not null); +select pathman.create_hash_partitions('test.dropped_cols', 'val', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +insert into test.dropped_cols select generate_series(1, 100); +alter table test.dropped_cols add column new_col text; /* add column */ +alter table test.dropped_cols drop column new_col; /* drop column! */ +explain (costs off) select * from generate_series(1, 10) f(id), lateral (select count(1) FILTER (WHERE true) from test.dropped_cols where val = f.id) c; + QUERY PLAN +----------------------------------------------------------- + Nested Loop + -> Function Scan on generate_series f + -> Aggregate + -> Custom Scan (RuntimeAppend) + Prune by: (dropped_cols.val = f.id) + -> Seq Scan on dropped_cols_0 dropped_cols + Filter: (val = f.id) + -> Seq Scan on dropped_cols_1 dropped_cols + Filter: (val = f.id) + -> Seq Scan on dropped_cols_2 dropped_cols + Filter: (val = f.id) + -> Seq Scan on dropped_cols_3 dropped_cols + Filter: (val = f.id) +(13 rows) + +drop table test.dropped_cols cascade; +NOTICE: drop cascades to 4 other objects +set enable_hashjoin = off; +set enable_mergejoin = off; +select from test.runtime_test_4 +where id = any (select generate_series(-10, -1)); /* should be empty */ +-- +(0 rows) + +set enable_hashjoin = on; +set enable_mergejoin = on; +DROP TABLE test.vals CASCADE; +DROP TABLE test.category CASCADE; +DROP TABLE test.run_values CASCADE; +DROP TABLE test.runtime_test_1 CASCADE; +NOTICE: drop cascades to 6 other objects +DROP TABLE test.runtime_test_2 CASCADE; +NOTICE: drop cascades to 6 other objects +DROP TABLE test.runtime_test_3 CASCADE; +NOTICE: drop cascades to 4 other objects +DROP TABLE test.runtime_test_4 CASCADE; +NOTICE: drop cascades to 6 other objects +DROP FUNCTION test.pathman_assert(bool, text); +DROP FUNCTION test.pathman_equal(text, text, text); +DROP FUNCTION test.pathman_test(text); +DROP FUNCTION test.pathman_test_1(); +DROP FUNCTION test.pathman_test_2(); +DROP FUNCTION test.pathman_test_3(); +DROP FUNCTION test.pathman_test_4(); +DROP FUNCTION test.pathman_test_5(); +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; From 874412561e9d406547ef04f6ac3cc2d34e8c37f5 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Tue, 22 Nov 2022 00:41:16 +0300 Subject: [PATCH 487/528] [PGPRO-7417] Added 'volatile' modifier for local variables that are modified in PG_TRY and read in PG_CATCH/PG_FINALLY --- src/include/init.h | 4 ++-- src/init.c | 4 ++-- src/pl_funcs.c | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/include/init.h b/src/include/init.h index f2234c8f..58335c46 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -171,8 +171,8 @@ void *pathman_cache_search_relid(HTAB *cache_table, /* * Save and restore PathmanInitState. */ -void save_pathman_init_state(PathmanInitState *temp_init_state); -void restore_pathman_init_state(const PathmanInitState *temp_init_state); +void save_pathman_init_state(volatile PathmanInitState *temp_init_state); +void restore_pathman_init_state(const volatile PathmanInitState *temp_init_state); /* * Create main GUC variables. diff --git a/src/init.c b/src/init.c index 9f72bcb7..bdec28fd 100644 --- a/src/init.c +++ b/src/init.c @@ -134,13 +134,13 @@ pathman_cache_search_relid(HTAB *cache_table, */ void -save_pathman_init_state(PathmanInitState *temp_init_state) +save_pathman_init_state(volatile PathmanInitState *temp_init_state) { *temp_init_state = pathman_init_state; } void -restore_pathman_init_state(const PathmanInitState *temp_init_state) +restore_pathman_init_state(const volatile PathmanInitState *temp_init_state) { /* * initialization_needed is not restored: it is not just a setting but diff --git a/src/pl_funcs.c b/src/pl_funcs.c index b638fc47..809884c2 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -796,7 +796,7 @@ add_to_pathman_config(PG_FUNCTION_ARGS) Oid expr_type; - PathmanInitState init_state; + volatile PathmanInitState init_state; if (!IsPathmanReady()) elog(ERROR, "pg_pathman is disabled"); From 47806e7f69935caaa86f40e87cf215cb90aaf9a3 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Thu, 22 Dec 2022 05:10:34 +0300 Subject: [PATCH 488/528] [PGPRO-7585] Fixes for v16 due to vanilla changes Tags: pg_pathman Caused by: - ad86d159b6: Add 'missing_ok' argument to build_attrmap_by_name - a61b1f7482: Rework query relation permission checking - b5d6382496: Provide per-table permissions for vacuum and analyze --- expected/pathman_permissions_1.out | 263 +++++++++++++++++++++++++++++ src/include/partition_filter.h | 8 +- src/partition_filter.c | 68 +++++++- src/pg_pathman.c | 7 + src/pl_funcs.c | 5 +- src/planner_tree_modification.c | 27 +++ src/utility_stmt_hooking.c | 47 +++++- 7 files changed, 410 insertions(+), 15 deletions(-) create mode 100644 expected/pathman_permissions_1.out diff --git a/expected/pathman_permissions_1.out b/expected/pathman_permissions_1.out new file mode 100644 index 00000000..c7e04210 --- /dev/null +++ b/expected/pathman_permissions_1.out @@ -0,0 +1,263 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA permissions; +CREATE ROLE user1 LOGIN; +CREATE ROLE user2 LOGIN; +GRANT USAGE, CREATE ON SCHEMA permissions TO user1; +GRANT USAGE, CREATE ON SCHEMA permissions TO user2; +/* Switch to #1 */ +SET ROLE user1; +CREATE TABLE permissions.user1_table(id serial, a int); +INSERT INTO permissions.user1_table SELECT g, g FROM generate_series(1, 20) as g; +/* Should fail (can't SELECT) */ +SET ROLE user2; +DO $$ +BEGIN + SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); +EXCEPTION + WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; +END$$; +NOTICE: Insufficient priviliges +/* Grant SELECT to user2 */ +SET ROLE user1; +GRANT SELECT ON permissions.user1_table TO user2; +/* Should fail (don't own parent) */ +SET ROLE user2; +DO $$ +BEGIN + SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); +EXCEPTION + WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; +END$$; +NOTICE: Insufficient priviliges +/* Should be ok */ +SET ROLE user1; +SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +/* Should be able to see */ +SET ROLE user2; +SELECT * FROM pathman_config; + partrel | expr | parttype | range_interval +-------------------------+------+----------+---------------- + permissions.user1_table | id | 2 | 10 +(1 row) + +SELECT * FROM pathman_config_params; + partrel | enable_parent | auto | init_callback | spawn_using_bgw +-------------------------+---------------+------+---------------+----------------- + permissions.user1_table | f | t | | f +(1 row) + +/* Should fail */ +SET ROLE user2; +SELECT set_enable_parent('permissions.user1_table', true); +WARNING: only the owner or superuser can change partitioning configuration of table "user1_table" +ERROR: new row violates row-level security policy for table "pathman_config_params" +SELECT set_auto('permissions.user1_table', false); +WARNING: only the owner or superuser can change partitioning configuration of table "user1_table" +ERROR: new row violates row-level security policy for table "pathman_config_params" +/* Should fail */ +SET ROLE user2; +DELETE FROM pathman_config +WHERE partrel = 'permissions.user1_table'::regclass; +WARNING: only the owner or superuser can change partitioning configuration of table "user1_table" +/* No rights to insert, should fail */ +SET ROLE user2; +DO $$ +BEGIN + INSERT INTO permissions.user1_table (id, a) VALUES (35, 0); +EXCEPTION + WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; +END$$; +NOTICE: Insufficient priviliges +/* No rights to create partitions (need INSERT privilege) */ +SET ROLE user2; +SELECT prepend_range_partition('permissions.user1_table'); +ERROR: permission denied for parent relation "user1_table" +/* Allow user2 to create partitions */ +SET ROLE user1; +GRANT INSERT ON permissions.user1_table TO user2; +GRANT UPDATE(a) ON permissions.user1_table TO user2; /* per-column ACL */ +/* Should be able to prepend a partition */ +SET ROLE user2; +SELECT prepend_range_partition('permissions.user1_table'); + prepend_range_partition +--------------------------- + permissions.user1_table_4 +(1 row) + +SELECT attname, attacl FROM pg_attribute +WHERE attrelid = (SELECT "partition" FROM pathman_partition_list + WHERE parent = 'permissions.user1_table'::REGCLASS + ORDER BY range_min::int ASC /* prepend */ + LIMIT 1) +ORDER BY attname; /* check ACL for each column */ + attname | attacl +----------+----------------- + a | {user2=w/user1} + cmax | + cmin | + ctid | + id | + tableoid | + xmax | + xmin | +(8 rows) + +/* Have rights, should be ok (parent's ACL is shared by new children) */ +SET ROLE user2; +INSERT INTO permissions.user1_table (id, a) VALUES (35, 0) RETURNING *; + id | a +----+--- + 35 | 0 +(1 row) + +SELECT relname, relacl FROM pg_class +WHERE oid = ANY (SELECT "partition" FROM pathman_partition_list + WHERE parent = 'permissions.user1_table'::REGCLASS + ORDER BY range_max::int DESC /* append */ + LIMIT 3) +ORDER BY relname; /* we also check ACL for "user1_table_2" */ + relname | relacl +---------------+---------------------------------------- + user1_table_2 | {user1=arwdDxtvz/user1,user2=r/user1} + user1_table_5 | {user1=arwdDxtvz/user1,user2=ar/user1} + user1_table_6 | {user1=arwdDxtvz/user1,user2=ar/user1} +(3 rows) + +/* Try to drop partition, should fail */ +DO $$ +BEGIN + SELECT drop_range_partition('permissions.user1_table_4'); +EXCEPTION + WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; +END$$; +NOTICE: Insufficient priviliges +/* Disable automatic partition creation */ +SET ROLE user1; +SELECT set_auto('permissions.user1_table', false); + set_auto +---------- + +(1 row) + +/* Partition creation, should fail */ +SET ROLE user2; +INSERT INTO permissions.user1_table (id, a) VALUES (55, 0) RETURNING *; +ERROR: no suitable partition for key '55' +/* Finally drop partitions */ +SET ROLE user1; +SELECT drop_partitions('permissions.user1_table'); +NOTICE: 10 rows copied from permissions.user1_table_1 +NOTICE: 10 rows copied from permissions.user1_table_2 +NOTICE: 0 rows copied from permissions.user1_table_4 +NOTICE: 0 rows copied from permissions.user1_table_5 +NOTICE: 1 rows copied from permissions.user1_table_6 + drop_partitions +----------------- + 5 +(1 row) + +/* Switch to #2 */ +SET ROLE user2; +/* Test ddl event trigger */ +CREATE TABLE permissions.user2_table(id serial); +SELECT create_hash_partitions('permissions.user2_table', 'id', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +INSERT INTO permissions.user2_table SELECT generate_series(1, 30); +SELECT drop_partitions('permissions.user2_table'); +NOTICE: 9 rows copied from permissions.user2_table_0 +NOTICE: 11 rows copied from permissions.user2_table_1 +NOTICE: 10 rows copied from permissions.user2_table_2 + drop_partitions +----------------- + 3 +(1 row) + +/* Switch to #1 */ +SET ROLE user1; +CREATE TABLE permissions.dropped_column(a int, val int not null, b int, c int); +INSERT INTO permissions.dropped_column SELECT i,i,i,i FROM generate_series(1, 30) i; +GRANT SELECT(val), INSERT(val) ON permissions.dropped_column TO user2; +SELECT create_range_partitions('permissions.dropped_column', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT attrelid::regclass, attname, attacl FROM pg_attribute +WHERE attrelid = ANY (SELECT "partition" FROM pathman_partition_list + WHERE parent = 'permissions.dropped_column'::REGCLASS) + AND attacl IS NOT NULL +ORDER BY attrelid::regclass::text; /* check ACL for each column */ + attrelid | attname | attacl +------------------------------+---------+------------------ + permissions.dropped_column_1 | val | {user2=ar/user1} + permissions.dropped_column_2 | val | {user2=ar/user1} + permissions.dropped_column_3 | val | {user2=ar/user1} +(3 rows) + +ALTER TABLE permissions.dropped_column DROP COLUMN a; /* DROP "a" */ +SELECT append_range_partition('permissions.dropped_column'); + append_range_partition +------------------------------ + permissions.dropped_column_4 +(1 row) + +SELECT attrelid::regclass, attname, attacl FROM pg_attribute +WHERE attrelid = ANY (SELECT "partition" FROM pathman_partition_list + WHERE parent = 'permissions.dropped_column'::REGCLASS) + AND attacl IS NOT NULL +ORDER BY attrelid::regclass::text; /* check ACL for each column (+1 partition) */ + attrelid | attname | attacl +------------------------------+---------+------------------ + permissions.dropped_column_1 | val | {user2=ar/user1} + permissions.dropped_column_2 | val | {user2=ar/user1} + permissions.dropped_column_3 | val | {user2=ar/user1} + permissions.dropped_column_4 | val | {user2=ar/user1} +(4 rows) + +ALTER TABLE permissions.dropped_column DROP COLUMN b; /* DROP "b" */ +SELECT append_range_partition('permissions.dropped_column'); + append_range_partition +------------------------------ + permissions.dropped_column_5 +(1 row) + +SELECT attrelid::regclass, attname, attacl FROM pg_attribute +WHERE attrelid = ANY (SELECT "partition" FROM pathman_partition_list + WHERE parent = 'permissions.dropped_column'::REGCLASS) + AND attacl IS NOT NULL +ORDER BY attrelid::regclass::text; /* check ACL for each column (+1 partition) */ + attrelid | attname | attacl +------------------------------+---------+------------------ + permissions.dropped_column_1 | val | {user2=ar/user1} + permissions.dropped_column_2 | val | {user2=ar/user1} + permissions.dropped_column_3 | val | {user2=ar/user1} + permissions.dropped_column_4 | val | {user2=ar/user1} + permissions.dropped_column_5 | val | {user2=ar/user1} +(5 rows) + +DROP TABLE permissions.dropped_column CASCADE; +NOTICE: drop cascades to 6 other objects +/* Finally reset user */ +RESET ROLE; +DROP OWNED BY user1; +DROP OWNED BY user2; +DROP USER user1; +DROP USER user2; +DROP SCHEMA permissions; +DROP EXTENSION pg_pathman; diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index 0c912abe..d3c2c482 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -101,6 +101,9 @@ struct ResultPartsStorage PartRelationInfo *prel; ExprState *prel_expr_state; ExprContext *prel_econtext; +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + ResultRelInfo *init_rri; /* first initialized ResultRelInfo */ +#endif }; typedef struct @@ -167,7 +170,7 @@ void init_result_parts_storage(ResultPartsStorage *parts_storage, void fini_result_parts_storage(ResultPartsStorage *parts_storage); /* Find ResultRelInfo holder in storage */ -ResultRelInfoHolder * scan_result_parts_storage(ResultPartsStorage *storage, Oid partid); +ResultRelInfoHolder * scan_result_parts_storage(EState *estate, ResultPartsStorage *storage, Oid partid); /* Refresh PartRelationInfo in storage */ PartRelationInfo * refresh_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid); @@ -186,7 +189,8 @@ Oid * find_partitions_for_value(Datum value, Oid value_type, const PartRelationInfo *prel, int *nparts); -ResultRelInfoHolder *select_partition_for_insert(ResultPartsStorage *parts_storage, +ResultRelInfoHolder *select_partition_for_insert(EState *estate, + ResultPartsStorage *parts_storage, TupleTableSlot *slot); Plan * make_partition_filter(Plan *subplan, diff --git a/src/partition_filter.c b/src/partition_filter.c index 3a72a70d..a267c702 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -27,6 +27,9 @@ #include "foreign/fdwapi.h" #include "foreign/foreign.h" #include "nodes/nodeFuncs.h" +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ +#include "parser/parse_relation.h" +#endif #include "rewrite/rewriteManip.h" #include "utils/guc.h" #include "utils/memutils.h" @@ -257,7 +260,8 @@ fini_result_parts_storage(ResultPartsStorage *parts_storage) /* Find a ResultRelInfo for the partition using ResultPartsStorage */ ResultRelInfoHolder * -scan_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid) +scan_result_parts_storage(EState *estate, ResultPartsStorage *parts_storage, + Oid partid) { #define CopyToResultRelInfo(field_name) \ ( child_result_rel_info->field_name = parts_storage->base_rri->field_name ) @@ -280,6 +284,12 @@ scan_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid) ResultRelInfo *child_result_rel_info; List *translated_vars; MemoryContext old_mcxt; +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + RTEPermissionInfo *parent_perminfo, + *child_perminfo; + /* ResultRelInfo of partitioned table. */ + RangeTblEntry *init_rte; +#endif /* Lock partition and check if it exists */ LockRelationOid(partid, parts_storage->head_open_lock_mode); @@ -306,15 +316,41 @@ scan_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid) /* Open child relation and check if it is a valid target */ child_rel = heap_open_compat(partid, NoLock); - /* Build Var translation list for 'inserted_cols' */ - make_inh_translation_list(base_rel, child_rel, 0, &translated_vars, NULL); - /* Create RangeTblEntry for partition */ child_rte = makeNode(RangeTblEntry); child_rte->rtekind = RTE_RELATION; child_rte->relid = partid; child_rte->relkind = child_rel->rd_rel->relkind; child_rte->eref = parent_rte->eref; +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + /* Build Var translation list for 'inserted_cols' */ + make_inh_translation_list(parts_storage->init_rri->ri_RelationDesc, + child_rel, 0, &translated_vars, NULL); + + /* + * Need to use ResultRelInfo of partitioned table 'init_rri' because + * 'base_rri' can be ResultRelInfo of partition without any + * ResultRelInfo, see expand_single_inheritance_child(). + */ + init_rte = rt_fetch(parts_storage->init_rri->ri_RangeTableIndex, + parts_storage->estate->es_range_table); + parent_perminfo = getRTEPermissionInfo(estate->es_rteperminfos, init_rte); + + child_rte->perminfoindex = 0; /* expected by addRTEPermissionInfo() */ + child_perminfo = addRTEPermissionInfo(&estate->es_rteperminfos, child_rte); + child_perminfo->requiredPerms = parent_perminfo->requiredPerms; + child_perminfo->checkAsUser = parent_perminfo->checkAsUser; + child_perminfo->insertedCols = translate_col_privs(parent_perminfo->insertedCols, + translated_vars); + child_perminfo->updatedCols = translate_col_privs(parent_perminfo->updatedCols, + translated_vars); + + /* Check permissions for partition */ + ExecCheckPermissions(list_make1(child_rte), list_make1(child_perminfo), true); +#else + /* Build Var translation list for 'inserted_cols' */ + make_inh_translation_list(base_rel, child_rel, 0, &translated_vars, NULL); + child_rte->requiredPerms = parent_rte->requiredPerms; child_rte->checkAsUser = parent_rte->checkAsUser; child_rte->insertedCols = translate_col_privs(parent_rte->insertedCols, @@ -324,6 +360,7 @@ scan_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid) /* Check permissions for partition */ ExecCheckRTPerms(list_make1(child_rte), true); +#endif /* Append RangeTblEntry to estate->es_range_table */ child_rte_idx = append_rte_to_estate(parts_storage->estate, child_rte, child_rel); @@ -498,7 +535,9 @@ build_part_tuple_map_child(Relation child_rel) child_tupdesc2->tdtypeid = InvalidOid; /* Generate tuple transformation map */ -#if PG_VERSION_NUM >= 130000 +#if PG_VERSION_NUM >= 160000 /* for commit ad86d159b6ab */ + attrMap = build_attrmap_by_name(child_tupdesc1, child_tupdesc2, false); +#elif PG_VERSION_NUM >= 130000 attrMap = build_attrmap_by_name(child_tupdesc1, child_tupdesc2); #else attrMap = convert_tuples_by_name_map(child_tupdesc1, child_tupdesc2, @@ -586,7 +625,8 @@ find_partitions_for_value(Datum value, Oid value_type, * Smart wrapper for scan_result_parts_storage(). */ ResultRelInfoHolder * -select_partition_for_insert(ResultPartsStorage *parts_storage, +select_partition_for_insert(EState *estate, + ResultPartsStorage *parts_storage, TupleTableSlot *slot) { PartRelationInfo *prel = parts_storage->prel; @@ -637,7 +677,7 @@ select_partition_for_insert(ResultPartsStorage *parts_storage, else partition_relid = parts[0]; /* Get ResultRelationInfo holder for the selected partition */ - result = scan_result_parts_storage(parts_storage, partition_relid); + result = scan_result_parts_storage(estate, parts_storage, partition_relid); /* Somebody has dropped or created partitions */ if ((nparts == 0 || result == NULL) && !PrelIsFresh(prel)) @@ -837,6 +877,10 @@ partition_filter_begin(CustomScanState *node, EState *estate, int eflags) state->on_conflict_action != ONCONFLICT_NONE, RPS_RRI_CB(prepare_rri_for_insert, state), RPS_RRI_CB(NULL, NULL)); +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + /* ResultRelInfo of partitioned table. */ + state->result_parts.init_rri = current_rri; +#endif } #if PG_VERSION_NUM >= 140000 @@ -906,7 +950,7 @@ partition_filter_exec(CustomScanState *node) old_mcxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); /* Search for a matching partition */ - rri_holder = select_partition_for_insert(&state->result_parts, slot); + rri_holder = select_partition_for_insert(estate, &state->result_parts, slot); /* Switch back and clean up per-tuple context */ MemoryContextSwitchTo(old_mcxt); @@ -1223,6 +1267,14 @@ prepare_rri_fdw_for_insert(ResultRelInfoHolder *rri_holder, query.targetList = NIL; query.returningList = NIL; +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + /* + * Copy the RTEPermissionInfos into query as well, so that + * add_rte_to_flat_rtable() will work correctly. + */ + query.rteperminfos = estate->es_rteperminfos; +#endif + /* Generate 'query.targetList' using 'tupdesc' */ target_attr = 1; for (i = 0; i < tupdesc->natts; i++) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 34600249..2e8b1d7e 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -551,7 +551,12 @@ append_child_relation(PlannerInfo *root, #endif child_rte->relid = child_oid; child_rte->relkind = child_relation->rd_rel->relkind; +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + /* No permission checking for the child RTE */ + child_rte->perminfoindex = 0; +#else child_rte->requiredPerms = 0; /* perform all checks on parent */ +#endif child_rte->inh = false; /* Add 'child_rte' to rtable and 'root->simple_rte_array' */ @@ -676,6 +681,7 @@ append_child_relation(PlannerInfo *root, } +#if PG_VERSION_NUM < 160000 /* for commit a61b1f74823c */ /* Translate column privileges for this child */ if (parent_rte->relid != child_oid) { @@ -694,6 +700,7 @@ append_child_relation(PlannerInfo *root, child_rte->updatedCols = bms_copy(parent_rte->updatedCols); } #endif +#endif /* PG_VERSION_NUM < 160000 */ /* Here and below we assume that parent RelOptInfo exists */ Assert(parent_rel); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 809884c2..542f99ae 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -725,7 +725,10 @@ is_tuple_convertible(PG_FUNCTION_ARGS) rel2 = heap_open_compat(PG_GETARG_OID(1), AccessShareLock); /* Try to build a conversion map */ -#if PG_VERSION_NUM >= 130000 +#if PG_VERSION_NUM >= 160000 /* for commit ad86d159b6ab */ + map = build_attrmap_by_name(RelationGetDescr(rel1), + RelationGetDescr(rel2), false); +#elif PG_VERSION_NUM >= 130000 map = build_attrmap_by_name(RelationGetDescr(rel1), RelationGetDescr(rel2)); #else diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 027fd4e1..d9d64cfd 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -27,6 +27,9 @@ #include "foreign/fdwapi.h" #include "miscadmin.h" #include "optimizer/clauses.h" +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ +#include "parser/parse_relation.h" +#endif #include "storage/lmgr.h" #include "utils/syscache.h" @@ -578,6 +581,10 @@ handle_modification_query(Query *parse, transform_query_cxt *context) List *translated_vars; adjust_appendrel_varnos_cxt aav_cxt; +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + RTEPermissionInfo *parent_perminfo, + *child_perminfo; +#endif /* Lock 'child' table */ LockRelationOid(child, lockmode); @@ -598,10 +605,24 @@ handle_modification_query(Query *parse, transform_query_cxt *context) return; /* nothing to do here */ } +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + parent_perminfo = getRTEPermissionInfo(parse->rteperminfos, rte); +#endif /* Update RTE's relid and relkind (for FDW) */ rte->relid = child; rte->relkind = child_relkind; +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + /* Copy parent RTEPermissionInfo. */ + rte->perminfoindex = 0; /* expected by addRTEPermissionInfo() */ + child_perminfo = addRTEPermissionInfo(&parse->rteperminfos, rte); + memcpy(child_perminfo, parent_perminfo, sizeof(RTEPermissionInfo)); + + /* Correct RTEPermissionInfo for child. */ + child_perminfo->relid = child; + child_perminfo->inh = false; +#endif + /* HACK: unset the 'inh' flag (no children) */ rte->inh = false; @@ -622,10 +643,16 @@ handle_modification_query(Query *parse, transform_query_cxt *context) aav_cxt.translated_vars = translated_vars; adjust_appendrel_varnos((Node *) parse, &aav_cxt); +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + child_perminfo->selectedCols = translate_col_privs(parent_perminfo->selectedCols, translated_vars); + child_perminfo->insertedCols = translate_col_privs(parent_perminfo->insertedCols, translated_vars); + child_perminfo->updatedCols = translate_col_privs(parent_perminfo->updatedCols, translated_vars); +#else /* Translate column privileges for this child */ rte->selectedCols = translate_col_privs(rte->selectedCols, translated_vars); rte->insertedCols = translate_col_privs(rte->insertedCols, translated_vars); rte->updatedCols = translate_col_privs(rte->updatedCols, translated_vars); +#endif } /* Close relations (should remain locked, though) */ diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 35786092..d1d9010c 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -26,12 +26,18 @@ #include "access/xact.h" #include "catalog/namespace.h" #include "commands/copy.h" +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ +#include "commands/copyfrom_internal.h" +#endif #include "commands/defrem.h" #include "commands/trigger.h" #include "commands/tablecmds.h" #include "foreign/fdwapi.h" #include "miscadmin.h" #include "nodes/makefuncs.h" +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ +#include "parser/parse_relation.h" +#endif #include "utils/builtins.h" #include "utils/lsyscache.h" #include "utils/memutils.h" @@ -414,6 +420,9 @@ PathmanDoCopy(const CopyStmt *stmt, "psql's \\copy command also works for anyone."))); } + pstate = make_parsestate(NULL); + pstate->p_sourcetext = queryString; + /* Check that we have a relation */ if (stmt->relation) { @@ -422,6 +431,9 @@ PathmanDoCopy(const CopyStmt *stmt, List *attnums; ListCell *cur; RangeTblEntry *rte; +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + RTEPermissionInfo *perminfo; +#endif Assert(!stmt->query); @@ -432,11 +444,30 @@ PathmanDoCopy(const CopyStmt *stmt, rte->rtekind = RTE_RELATION; rte->relid = RelationGetRelid(rel); rte->relkind = rel->rd_rel->relkind; +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + pstate->p_rtable = lappend(pstate->p_rtable, rte); + perminfo = addRTEPermissionInfo(&pstate->p_rteperminfos, rte); + perminfo->requiredPerms = required_access; +#else rte->requiredPerms = required_access; +#endif range_table = list_make1(rte); tupDesc = RelationGetDescr(rel); attnums = PathmanCopyGetAttnums(tupDesc, rel, stmt->attlist); +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + foreach(cur, attnums) + { + int attno; + Bitmapset **bms; + + attno = lfirst_int(cur) - FirstLowInvalidHeapAttributeNumber; + bms = is_from ? &perminfo->insertedCols : &perminfo->selectedCols; + + *bms = bms_add_member(*bms, attno); + } + ExecCheckPermissions(pstate->p_rtable, list_make1(perminfo), true); +#else foreach(cur, attnums) { int attnum = lfirst_int(cur) - FirstLowInvalidHeapAttributeNumber; @@ -447,6 +478,7 @@ PathmanDoCopy(const CopyStmt *stmt, rte->selectedCols = bms_add_member(rte->selectedCols, attnum); } ExecCheckRTPerms(range_table, true); +#endif /* Disable COPY FROM if table has RLS */ if (is_from && check_enable_rls(rte->relid, InvalidOid, false) == RLS_ENABLED) @@ -470,9 +502,6 @@ PathmanDoCopy(const CopyStmt *stmt, /* This should never happen (see is_pathman_related_copy()) */ else elog(ERROR, "error in function " CppAsString(PathmanDoCopy)); - pstate = make_parsestate(NULL); - pstate->p_sourcetext = queryString; - if (is_from) { /* check read-only transaction and parallel mode */ @@ -567,6 +596,16 @@ PathmanCopyFrom( RPS_DEFAULT_SPECULATIVE, RPS_RRI_CB(prepare_rri_for_copy, cstate), RPS_RRI_CB(finish_rri_for_copy, NULL)); +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + /* ResultRelInfo of partitioned table. */ + parts_storage.init_rri = parent_rri; + + /* + * Copy the RTEPermissionInfos into estate as well, so that + * scan_result_parts_storage() et al will work correctly. + */ + estate->es_rteperminfos = cstate->rteperminfos; +#endif /* Set up a tuple slot too */ myslot = ExecInitExtraTupleSlotCompat(estate, NULL, &TTSOpsHeapTuple); @@ -629,7 +668,7 @@ PathmanCopyFrom( #endif /* Search for a matching partition */ - rri_holder = select_partition_for_insert(&parts_storage, slot); + rri_holder = select_partition_for_insert(estate, &parts_storage, slot); child_rri = rri_holder->result_rel_info; /* Magic: replace parent's ResultRelInfo with ours */ From bb9f6e49a7643b77126fb2575a96024bba0ae326 Mon Sep 17 00:00:00 2001 From: Marina Polyakova Date: Tue, 10 Jan 2023 19:13:48 +0300 Subject: [PATCH 489/528] Convert the reg* input functions to report (most) errors softly. See the commit 858e776c84f48841e7e16fba7b690b76e54f3675 (Convert the reg* input functions to report (most) errors softly.) in PostgreSQL 16. The function qualified_relnames_to_rangevars is used in the functions create_hash_partitions_internal and create_range_partitions_internal. It looks like these functions should not skip partition names (e.g. in the functions create_hash_partitions and create_range_partitions respectively).. --- src/include/compat/pg_compat.h | 11 +++++++++++ src/utils.c | 2 +- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 80a76d60..4ae249e6 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -1084,6 +1084,17 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, expression_tree_mutator((node), (mutator), (context)) #endif +/* + * stringToQualifiedNameList + */ +#if PG_VERSION_NUM >= 160000 +#define stringToQualifiedNameListCompat(string) \ + stringToQualifiedNameList((string), NULL) +#else +#define stringToQualifiedNameListCompat(string) \ + stringToQualifiedNameList((string)) +#endif + /* * ------------- * Common code diff --git a/src/utils.c b/src/utils.c index 15552f56..6ebfb8a8 100644 --- a/src/utils.c +++ b/src/utils.c @@ -518,7 +518,7 @@ qualified_relnames_to_rangevars(char **relnames, size_t nrelnames) rangevars = palloc(sizeof(RangeVar *) * nrelnames); for (i = 0; i < nrelnames; i++) { - List *nl = stringToQualifiedNameList(relnames[i]); + List *nl = stringToQualifiedNameListCompat(relnames[i]); rangevars[i] = makeRangeVarFromNameList(nl); } From 2d49e88e1cb6c3df338ba82d733e0b2e896d0e15 Mon Sep 17 00:00:00 2001 From: Marina Polyakova Date: Tue, 10 Jan 2023 19:19:57 +0300 Subject: [PATCH 490/528] Add grantable MAINTAIN privilege and pg_maintain role. See the commit 60684dd834a222fefedd49b19d1f0a6189c1632e (Add grantable MAINTAIN privilege and pg_maintain role.) in PostgreSQL 16. Since pathman_permissions_1.out is already in use for PostgreSQL 16+ (see the commit 47806e7f69935caaa86f40e87cf215cb90aaf9a3 [PGPRO-7585] Fixes for v16 due to vanilla changes), do not create pathman_permissions_2.out. --- expected/pathman_permissions_1.out | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/expected/pathman_permissions_1.out b/expected/pathman_permissions_1.out index c7e04210..a50aa524 100644 --- a/expected/pathman_permissions_1.out +++ b/expected/pathman_permissions_1.out @@ -126,11 +126,11 @@ WHERE oid = ANY (SELECT "partition" FROM pathman_partition_list ORDER BY range_max::int DESC /* append */ LIMIT 3) ORDER BY relname; /* we also check ACL for "user1_table_2" */ - relname | relacl ----------------+---------------------------------------- - user1_table_2 | {user1=arwdDxtvz/user1,user2=r/user1} - user1_table_5 | {user1=arwdDxtvz/user1,user2=ar/user1} - user1_table_6 | {user1=arwdDxtvz/user1,user2=ar/user1} + relname | relacl +---------------+--------------------------------------- + user1_table_2 | {user1=arwdDxtm/user1,user2=r/user1} + user1_table_5 | {user1=arwdDxtm/user1,user2=ar/user1} + user1_table_6 | {user1=arwdDxtm/user1,user2=ar/user1} (3 rows) /* Try to drop partition, should fail */ From e939296ebf9ef89c25fec08d9ebd6cbed5f6a9ca Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Tue, 24 Jan 2023 13:00:13 +0300 Subject: [PATCH 491/528] README: update versions list and remove obsolete emails --- README.md | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index d4b8e3bb..43d585ff 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ ### NOTE: this project is not under development anymore -`pg_pathman` supports Postgres versions [9.5..13], but most probably it won't be ported to 14 and later releases. [Native partitioning](https://www.postgresql.org/docs/current/ddl-partitioning.html) is pretty mature now and has almost everything implemented in `pg_pathman`'; we encourage users switching to it. We are still maintaining the project (fixing bugs in supported versions), but no new development is going to happen here. +`pg_pathman` supports Postgres versions [11..15], but most probably it won't be ported to later releases. [Native partitioning](https://www.postgresql.org/docs/current/ddl-partitioning.html) is pretty mature now and has almost everything implemented in `pg_pathman`'; we encourage users switching to it. We are still maintaining the project (fixing bugs in supported versions), but no new development is going to happen here. # pg_pathman @@ -13,8 +13,9 @@ The `pg_pathman` module provides optimized partitioning mechanism and functions The extension is compatible with: - * PostgreSQL 9.5, 9.6, 10, 11, 12, 13; - * Postgres Pro Standard 9.5, 9.6, 10, 11, 12; + * PostgreSQL 11, 12, 13; + * PostgreSQL with core-patch: 14, 15; + * Postgres Pro Standard 11, 12, 13, 14, 15; * Postgres Pro Enterprise; Take a look at our Wiki [out there](https://github.com/postgrespro/pg_pathman/wiki). @@ -789,7 +790,7 @@ Do not hesitate to post your issues, questions and new ideas at the [issues](htt ## Authors [Ildar Musin](https://github.com/zilder) -Alexander Korotkov Postgres Professional Ltd., Russia +[Alexander Korotkov](https://github.com/akorotkov) [Dmitry Ivanov](https://github.com/funbringer) -Maksim Milyutin Postgres Professional Ltd., Russia +[Maksim Milyutin](https://github.com/maksm90) [Ildus Kurbangaliev](https://github.com/ildus) From db83c707475f263b4814103bed6eeebec3be67f5 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Fri, 27 Jan 2023 19:43:22 +0300 Subject: [PATCH 492/528] [PGPRO-7287] New PgproRegisterXactCallback to filter by event kind Tags: pg_pathman --- src/pg_pathman.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 3b99a7e7..6457cdca 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -368,9 +368,13 @@ _PG_init(void) init_partition_overseer_static_data(); #if defined(PGPRO_EE) && PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 150000 /* Callbacks for reload relcache for ATX transactions */ + PgproRegisterXactCallback(pathman_xact_cb, NULL, XACT_EVENT_KIND_VANILLA | XACT_EVENT_KIND_ATX); +#else RegisterXactCallback(pathman_xact_cb, NULL); #endif +#endif } #if PG_VERSION_NUM >= 150000 /* for commit 4f2400cb3f10 */ From bcf2424f6d6ffe75c240adbcfd54d21d238cc750 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Sat, 4 Feb 2023 00:42:58 +0300 Subject: [PATCH 493/528] [PGPRO-7742] Use PgproRegisterXactCallback for all EE-versions Tags: pg_pathman --- src/pg_pathman.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index d902d5d4..94cfce84 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -367,13 +367,9 @@ _PG_init(void) init_partition_router_static_data(); init_partition_overseer_static_data(); -#if defined(PGPRO_EE) && PG_VERSION_NUM >= 100000 -#if PG_VERSION_NUM >= 150000 +#ifdef PGPRO_EE /* Callbacks for reload relcache for ATX transactions */ PgproRegisterXactCallback(pathman_xact_cb, NULL, XACT_EVENT_KIND_VANILLA | XACT_EVENT_KIND_ATX); -#else - RegisterXactCallback(pathman_xact_cb, NULL); -#endif #endif } From 96254aa04e5f40e37e8f7e577a04e354eec92571 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Mon, 6 Mar 2023 19:37:14 +0300 Subject: [PATCH 494/528] Fix for REL_14_STABLE/REL_15_STABLE diffs --- patches/REL_14_STABLE-pg_pathman-core.diff | 64 ++++++++++---------- patches/REL_15_STABLE-pg_pathman-core.diff | 70 +++++++++++----------- 2 files changed, 67 insertions(+), 67 deletions(-) diff --git a/patches/REL_14_STABLE-pg_pathman-core.diff b/patches/REL_14_STABLE-pg_pathman-core.diff index 751095aa..57576c44 100644 --- a/patches/REL_14_STABLE-pg_pathman-core.diff +++ b/patches/REL_14_STABLE-pg_pathman-core.diff @@ -11,7 +11,7 @@ index f27e458482..ea47c341c1 100644 pg_stat_statements \ pg_surgery \ diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c -index ca6f6d57d3..8ab313b910 100644 +index bf551b0395..10d2044ae6 100644 --- a/src/backend/access/transam/xact.c +++ b/src/backend/access/transam/xact.c @@ -76,7 +76,7 @@ int DefaultXactIsoLevel = XACT_READ_COMMITTED; @@ -24,7 +24,7 @@ index ca6f6d57d3..8ab313b910 100644 bool DefaultXactDeferrable = false; bool XactDeferrable; diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c -index 5483dee650..e2864e6ae9 100644 +index 6b63f93e6d..060146d127 100644 --- a/src/backend/executor/execExprInterp.c +++ b/src/backend/executor/execExprInterp.c @@ -1799,6 +1799,16 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) @@ -77,10 +77,10 @@ index b3ce4bae53..8f2bb12542 100644 * ResultRelInfos needed by subplans are initialized from scratch when the * subplans themselves are initialized. diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c -index d328856ae5..27235ec869 100644 +index 0780554246..a90f3a495d 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c -@@ -450,7 +450,7 @@ ExecInitInsertProjection(ModifyTableState *mtstate, +@@ -510,7 +510,7 @@ ExecInitInsertProjection(ModifyTableState *mtstate, * This is also a convenient place to verify that the output of an UPDATE * matches the target table (ExecBuildUpdateProjection does that). */ @@ -89,15 +89,15 @@ index d328856ae5..27235ec869 100644 ExecInitUpdateProjection(ModifyTableState *mtstate, ResultRelInfo *resultRelInfo) { -@@ -2363,6 +2363,7 @@ ExecModifyTable(PlanState *pstate) - PartitionTupleRouting *proute = node->mt_partition_tuple_routing; - List *relinfos = NIL; - ListCell *lc; +@@ -2487,6 +2487,7 @@ ExecModifyTable(PlanState *pstate) + ItemPointerData tuple_ctid; + HeapTupleData oldtupdata; + HeapTuple oldtuple; + ResultRelInfo *saved_resultRelInfo; CHECK_FOR_INTERRUPTS(); -@@ -2400,12 +2401,23 @@ ExecModifyTable(PlanState *pstate) +@@ -2524,12 +2525,23 @@ ExecModifyTable(PlanState *pstate) resultRelInfo = node->resultRelInfo + node->mt_lastResultIndex; subplanstate = outerPlanState(node); @@ -111,7 +111,7 @@ index d328856ae5..27235ec869 100644 for (;;) { + /* -+ * "es_original_tuple" should contain original modified tuple (new ++ * "es_original_tuple" should contains original modified tuple (new + * values of the changed columns plus row identity information such as + * CTID) in case tuple planSlot is replaced in pg_pathman to new value + * in call "ExecProcNode(subplanstate)". @@ -121,7 +121,7 @@ index d328856ae5..27235ec869 100644 /* * Reset the per-output-tuple exprcontext. This is needed because * triggers expect to use that context as workspace. It's a bit ugly -@@ -2439,7 +2451,9 @@ ExecModifyTable(PlanState *pstate) +@@ -2563,7 +2575,9 @@ ExecModifyTable(PlanState *pstate) bool isNull; Oid resultoid; @@ -132,7 +132,7 @@ index d328856ae5..27235ec869 100644 &isNull); if (isNull) elog(ERROR, "tableoid is NULL"); -@@ -2458,6 +2472,8 @@ ExecModifyTable(PlanState *pstate) +@@ -2582,6 +2596,8 @@ ExecModifyTable(PlanState *pstate) if (resultRelInfo->ri_usesFdwDirectModify) { Assert(resultRelInfo->ri_projectReturning); @@ -141,7 +141,7 @@ index d328856ae5..27235ec869 100644 /* * A scan slot containing the data that was actually inserted, -@@ -2467,6 +2483,7 @@ ExecModifyTable(PlanState *pstate) +@@ -2591,6 +2607,7 @@ ExecModifyTable(PlanState *pstate) */ slot = ExecProcessReturning(resultRelInfo, NULL, planSlot); @@ -149,7 +149,7 @@ index d328856ae5..27235ec869 100644 return slot; } -@@ -2496,7 +2513,8 @@ ExecModifyTable(PlanState *pstate) +@@ -2620,7 +2637,8 @@ ExecModifyTable(PlanState *pstate) { /* ri_RowIdAttNo refers to a ctid attribute */ Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)); @@ -159,7 +159,7 @@ index d328856ae5..27235ec869 100644 resultRelInfo->ri_RowIdAttNo, &isNull); /* shouldn't ever get a null result... */ -@@ -2526,7 +2544,8 @@ ExecModifyTable(PlanState *pstate) +@@ -2650,7 +2668,8 @@ ExecModifyTable(PlanState *pstate) */ else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)) { @@ -169,7 +169,7 @@ index d328856ae5..27235ec869 100644 resultRelInfo->ri_RowIdAttNo, &isNull); /* shouldn't ever get a null result... */ -@@ -2557,8 +2576,12 @@ ExecModifyTable(PlanState *pstate) +@@ -2681,8 +2700,12 @@ ExecModifyTable(PlanState *pstate) /* Initialize projection info if first time for this table */ if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) ExecInitInsertProjection(node, resultRelInfo); @@ -184,7 +184,7 @@ index d328856ae5..27235ec869 100644 estate, node->canSetTag); break; case CMD_UPDATE: -@@ -2566,37 +2589,45 @@ ExecModifyTable(PlanState *pstate) +@@ -2690,37 +2713,45 @@ ExecModifyTable(PlanState *pstate) if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) ExecInitUpdateProjection(node, resultRelInfo); @@ -253,7 +253,7 @@ index d328856ae5..27235ec869 100644 planSlot, &node->mt_epqstate, estate, true, /* processReturning */ node->canSetTag, -@@ -2613,7 +2644,10 @@ ExecModifyTable(PlanState *pstate) +@@ -2737,7 +2768,10 @@ ExecModifyTable(PlanState *pstate) * the work on next call. */ if (slot) @@ -264,7 +264,7 @@ index d328856ae5..27235ec869 100644 } /* -@@ -2642,6 +2676,7 @@ ExecModifyTable(PlanState *pstate) +@@ -2753,6 +2787,7 @@ ExecModifyTable(PlanState *pstate) node->mt_done = true; @@ -272,7 +272,7 @@ index d328856ae5..27235ec869 100644 return NULL; } -@@ -2716,6 +2751,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) +@@ -2827,6 +2862,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) ListCell *l; int i; Relation rel; @@ -280,7 +280,7 @@ index d328856ae5..27235ec869 100644 /* check for unsupported flags */ Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); -@@ -2812,6 +2848,13 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) +@@ -2923,6 +2959,13 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) i++; } @@ -294,8 +294,8 @@ index d328856ae5..27235ec869 100644 /* * Now we may initialize the subplan. */ -@@ -2884,6 +2927,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) - } +@@ -3004,6 +3047,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) + ExecInitStoredGenerated(resultRelInfo, estate, operation); } + estate->es_result_relation_info = saved_resultRelInfo; @@ -304,7 +304,7 @@ index d328856ae5..27235ec869 100644 * If this is an inherited update/delete, there will be a junk attribute * named "tableoid" present in the subplan's targetlist. It will be used diff --git a/src/backend/utils/init/globals.c b/src/backend/utils/init/globals.c -index 381d9e548d..9d101c3a86 100644 +index 381d9e548d..0a4657d291 100644 --- a/src/backend/utils/init/globals.c +++ b/src/backend/utils/init/globals.c @@ -25,7 +25,7 @@ @@ -317,7 +317,7 @@ index 381d9e548d..9d101c3a86 100644 volatile sig_atomic_t InterruptPending = false; volatile sig_atomic_t QueryCancelPending = false; diff --git a/src/include/access/xact.h b/src/include/access/xact.h -index 134f6862da..92ff475332 100644 +index 5af78bd0dc..0c13bc9d83 100644 --- a/src/include/access/xact.h +++ b/src/include/access/xact.h @@ -53,7 +53,9 @@ extern PGDLLIMPORT int XactIsoLevel; @@ -357,7 +357,7 @@ index 3dc03c913e..1002d97499 100644 #endif /* EXECUTOR_H */ diff --git a/src/include/libpq/libpq-be.h b/src/include/libpq/libpq-be.h -index 02015efe13..2091f7f3b7 100644 +index 4acb1cda6e..fd8d38347d 100644 --- a/src/include/libpq/libpq-be.h +++ b/src/include/libpq/libpq-be.h @@ -327,7 +327,7 @@ extern ssize_t be_gssapi_read(Port *port, void *ptr, size_t len); @@ -370,10 +370,10 @@ index 02015efe13..2091f7f3b7 100644 /* TCP keepalives configuration. These are no-ops on an AF_UNIX socket. */ diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h -index 105180764e..2a40d2ce15 100644 +index ee5ad3c058..dc474819d7 100644 --- a/src/include/nodes/execnodes.h +++ b/src/include/nodes/execnodes.h -@@ -579,6 +579,12 @@ typedef struct EState +@@ -592,6 +592,12 @@ typedef struct EState * es_result_relations in no * specific order */ @@ -419,7 +419,7 @@ index de22c9ba2c..c8be5323b8 100644 sub CopyIncludeFiles diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm -index 05ff67e693..d169271df1 100644 +index 9b6539fb15..f8a67c6701 100644 --- a/src/tools/msvc/Mkvcbuild.pm +++ b/src/tools/msvc/Mkvcbuild.pm @@ -41,7 +41,10 @@ my @contrib_uselibpq = @@ -434,7 +434,7 @@ index 05ff67e693..d169271df1 100644 my $contrib_extrasource = { 'cube' => [ 'contrib/cube/cubescan.l', 'contrib/cube/cubeparse.y' ], 'seg' => [ 'contrib/seg/segscan.l', 'contrib/seg/segparse.y' ], -@@ -970,6 +973,7 @@ sub AddContrib +@@ -973,6 +976,7 @@ sub AddContrib my $dn = $1; my $proj = $solution->AddProject($dn, 'dll', 'contrib', "$subdir/$n"); $proj->AddReference($postgres); @@ -442,7 +442,7 @@ index 05ff67e693..d169271df1 100644 AdjustContribProj($proj); } elsif ($mf =~ /^MODULES\s*=\s*(.*)$/mg) -@@ -999,6 +1003,19 @@ sub AddContrib +@@ -1002,6 +1006,19 @@ sub AddContrib return; } @@ -462,7 +462,7 @@ index 05ff67e693..d169271df1 100644 sub GenerateContribSqlFiles { my $n = shift; -@@ -1023,23 +1040,53 @@ sub GenerateContribSqlFiles +@@ -1026,23 +1043,53 @@ sub GenerateContribSqlFiles substr($l, 0, index($l, '$(addsuffix ')) . substr($l, $i + 1); } diff --git a/patches/REL_15_STABLE-pg_pathman-core.diff b/patches/REL_15_STABLE-pg_pathman-core.diff index e0eb9a62..3d72d2e7 100644 --- a/patches/REL_15_STABLE-pg_pathman-core.diff +++ b/patches/REL_15_STABLE-pg_pathman-core.diff @@ -1,5 +1,5 @@ diff --git a/contrib/Makefile b/contrib/Makefile -index bbf220407b0..9a82a2db046 100644 +index bbf220407b..9a82a2db04 100644 --- a/contrib/Makefile +++ b/contrib/Makefile @@ -34,6 +34,7 @@ SUBDIRS = \ @@ -11,7 +11,7 @@ index bbf220407b0..9a82a2db046 100644 pg_stat_statements \ pg_surgery \ diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c -index 594d8da2cdc..a2049e70e95 100644 +index d0e5bc26a7..5ca196518e 100644 --- a/src/backend/access/transam/xact.c +++ b/src/backend/access/transam/xact.c @@ -78,7 +78,7 @@ int DefaultXactIsoLevel = XACT_READ_COMMITTED; @@ -24,7 +24,7 @@ index 594d8da2cdc..a2049e70e95 100644 bool DefaultXactDeferrable = false; bool XactDeferrable; diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c -index ef0f9577ab1..95858960d50 100644 +index ef0f9577ab..95858960d5 100644 --- a/src/backend/executor/execExprInterp.c +++ b/src/backend/executor/execExprInterp.c @@ -1801,6 +1801,16 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) @@ -45,7 +45,7 @@ index ef0f9577ab1..95858960d50 100644 return state->resvalue; } diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c -index ef2fd46092e..8551733c55d 100644 +index ef2fd46092..8551733c55 100644 --- a/src/backend/executor/execMain.c +++ b/src/backend/executor/execMain.c @@ -826,6 +826,13 @@ InitPlan(QueryDesc *queryDesc, int eflags) @@ -77,10 +77,10 @@ index ef2fd46092e..8551733c55d 100644 * ResultRelInfos needed by subplans are initialized from scratch when the * subplans themselves are initialized. diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c -index 04454ad6e60..6a52e86b782 100644 +index ad0aa8dd9d..a2715efa09 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c -@@ -603,6 +603,13 @@ ExecInitUpdateProjection(ModifyTableState *mtstate, +@@ -663,6 +663,13 @@ ExecInitUpdateProjection(ModifyTableState *mtstate, resultRelInfo->ri_projectNewInfoValid = true; } @@ -94,15 +94,15 @@ index 04454ad6e60..6a52e86b782 100644 /* * ExecGetInsertNewTuple * This prepares a "new" tuple ready to be inserted into given result -@@ -3461,6 +3468,7 @@ ExecModifyTable(PlanState *pstate) - PartitionTupleRouting *proute = node->mt_partition_tuple_routing; - List *relinfos = NIL; - ListCell *lc; +@@ -3581,6 +3588,7 @@ ExecModifyTable(PlanState *pstate) + HeapTupleData oldtupdata; + HeapTuple oldtuple; + ItemPointer tupleid; + ResultRelInfo *saved_resultRelInfo; CHECK_FOR_INTERRUPTS(); -@@ -3502,6 +3510,8 @@ ExecModifyTable(PlanState *pstate) +@@ -3622,6 +3630,8 @@ ExecModifyTable(PlanState *pstate) context.mtstate = node; context.epqstate = &node->mt_epqstate; context.estate = estate; @@ -111,7 +111,7 @@ index 04454ad6e60..6a52e86b782 100644 /* * Fetch rows from subplan, and execute the required table modification -@@ -3509,6 +3519,14 @@ ExecModifyTable(PlanState *pstate) +@@ -3629,6 +3639,14 @@ ExecModifyTable(PlanState *pstate) */ for (;;) { @@ -126,7 +126,7 @@ index 04454ad6e60..6a52e86b782 100644 /* * Reset the per-output-tuple exprcontext. This is needed because * triggers expect to use that context as workspace. It's a bit ugly -@@ -3542,7 +3560,9 @@ ExecModifyTable(PlanState *pstate) +@@ -3662,7 +3680,9 @@ ExecModifyTable(PlanState *pstate) bool isNull; Oid resultoid; @@ -137,7 +137,7 @@ index 04454ad6e60..6a52e86b782 100644 &isNull); if (isNull) { -@@ -3579,6 +3599,8 @@ ExecModifyTable(PlanState *pstate) +@@ -3699,6 +3719,8 @@ ExecModifyTable(PlanState *pstate) if (resultRelInfo->ri_usesFdwDirectModify) { Assert(resultRelInfo->ri_projectReturning); @@ -146,7 +146,7 @@ index 04454ad6e60..6a52e86b782 100644 /* * A scan slot containing the data that was actually inserted, -@@ -3588,6 +3610,7 @@ ExecModifyTable(PlanState *pstate) +@@ -3708,6 +3730,7 @@ ExecModifyTable(PlanState *pstate) */ slot = ExecProcessReturning(resultRelInfo, NULL, context.planSlot); @@ -154,7 +154,7 @@ index 04454ad6e60..6a52e86b782 100644 return slot; } -@@ -3618,7 +3641,8 @@ ExecModifyTable(PlanState *pstate) +@@ -3738,7 +3761,8 @@ ExecModifyTable(PlanState *pstate) { /* ri_RowIdAttNo refers to a ctid attribute */ Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)); @@ -164,7 +164,7 @@ index 04454ad6e60..6a52e86b782 100644 resultRelInfo->ri_RowIdAttNo, &isNull); -@@ -3666,7 +3690,8 @@ ExecModifyTable(PlanState *pstate) +@@ -3786,7 +3810,8 @@ ExecModifyTable(PlanState *pstate) */ else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)) { @@ -174,7 +174,7 @@ index 04454ad6e60..6a52e86b782 100644 resultRelInfo->ri_RowIdAttNo, &isNull); /* shouldn't ever get a null result... */ -@@ -3697,9 +3722,12 @@ ExecModifyTable(PlanState *pstate) +@@ -3817,9 +3842,12 @@ ExecModifyTable(PlanState *pstate) /* Initialize projection info if first time for this table */ if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) ExecInitInsertProjection(node, resultRelInfo); @@ -190,7 +190,7 @@ index 04454ad6e60..6a52e86b782 100644 break; case CMD_UPDATE: -@@ -3707,38 +3735,46 @@ ExecModifyTable(PlanState *pstate) +@@ -3827,38 +3855,46 @@ ExecModifyTable(PlanState *pstate) if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) ExecInitUpdateProjection(node, resultRelInfo); @@ -260,7 +260,7 @@ index 04454ad6e60..6a52e86b782 100644 true, false, node->canSetTag, NULL, NULL); break; -@@ -3756,7 +3792,10 @@ ExecModifyTable(PlanState *pstate) +@@ -3876,7 +3912,10 @@ ExecModifyTable(PlanState *pstate) * the work on next call. */ if (slot) @@ -271,7 +271,7 @@ index 04454ad6e60..6a52e86b782 100644 } /* -@@ -3785,6 +3824,7 @@ ExecModifyTable(PlanState *pstate) +@@ -3892,6 +3931,7 @@ ExecModifyTable(PlanState *pstate) node->mt_done = true; @@ -279,7 +279,7 @@ index 04454ad6e60..6a52e86b782 100644 return NULL; } -@@ -3859,6 +3899,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) +@@ -3966,6 +4006,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) ListCell *l; int i; Relation rel; @@ -287,7 +287,7 @@ index 04454ad6e60..6a52e86b782 100644 /* check for unsupported flags */ Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); -@@ -3959,6 +4000,13 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) +@@ -4066,6 +4107,13 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) i++; } @@ -301,8 +301,8 @@ index 04454ad6e60..6a52e86b782 100644 /* * Now we may initialize the subplan. */ -@@ -4041,6 +4089,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) - } +@@ -4157,6 +4205,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) + ExecInitStoredGenerated(resultRelInfo, estate, operation); } + estate->es_result_relation_info = saved_resultRelInfo; @@ -311,7 +311,7 @@ index 04454ad6e60..6a52e86b782 100644 * If this is an inherited update/delete/merge, there will be a junk * attribute named "tableoid" present in the subplan's targetlist. It diff --git a/src/backend/utils/init/globals.c b/src/backend/utils/init/globals.c -index 1a5d29ac9ba..aadca8ea474 100644 +index 1a5d29ac9b..aadca8ea47 100644 --- a/src/backend/utils/init/globals.c +++ b/src/backend/utils/init/globals.c @@ -25,7 +25,7 @@ @@ -324,7 +324,7 @@ index 1a5d29ac9ba..aadca8ea474 100644 volatile sig_atomic_t InterruptPending = false; volatile sig_atomic_t QueryCancelPending = false; diff --git a/src/include/access/xact.h b/src/include/access/xact.h -index 65616ca2f79..965eb544217 100644 +index 8d46a781bb..150d70cb64 100644 --- a/src/include/access/xact.h +++ b/src/include/access/xact.h @@ -53,6 +53,8 @@ extern PGDLLIMPORT int XactIsoLevel; @@ -337,7 +337,7 @@ index 65616ca2f79..965eb544217 100644 /* flag for logging statements in this transaction */ diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h -index 82925b4b633..de23622ca24 100644 +index 82925b4b63..de23622ca2 100644 --- a/src/include/executor/executor.h +++ b/src/include/executor/executor.h @@ -659,5 +659,17 @@ extern ResultRelInfo *ExecLookupResultRelByOid(ModifyTableState *node, @@ -359,10 +359,10 @@ index 82925b4b633..de23622ca24 100644 #endif /* EXECUTOR_H */ diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h -index 57288013795..ec5496afffa 100644 +index f34d06eff4..0970e5f110 100644 --- a/src/include/nodes/execnodes.h +++ b/src/include/nodes/execnodes.h -@@ -611,6 +611,12 @@ typedef struct EState +@@ -624,6 +624,12 @@ typedef struct EState * es_result_relations in no * specific order */ @@ -376,7 +376,7 @@ index 57288013795..ec5496afffa 100644 /* diff --git a/src/tools/msvc/Install.pm b/src/tools/msvc/Install.pm -index 8de79c618cb..c9226ba5ad4 100644 +index 8de79c618c..c9226ba5ad 100644 --- a/src/tools/msvc/Install.pm +++ b/src/tools/msvc/Install.pm @@ -30,6 +30,18 @@ my @client_program_files = ( @@ -408,7 +408,7 @@ index 8de79c618cb..c9226ba5ad4 100644 sub CopyIncludeFiles diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm -index e4feda10fd8..74a0a0a062b 100644 +index ef0a33c10f..27033b0a45 100644 --- a/src/tools/msvc/Mkvcbuild.pm +++ b/src/tools/msvc/Mkvcbuild.pm @@ -39,8 +39,8 @@ my $contrib_defines = {}; @@ -422,7 +422,7 @@ index e4feda10fd8..74a0a0a062b 100644 my $contrib_extrasource = {}; my @contrib_excludes = ( 'bool_plperl', 'commit_ts', -@@ -964,6 +964,7 @@ sub AddContrib +@@ -967,6 +967,7 @@ sub AddContrib my $dn = $1; my $proj = $solution->AddProject($dn, 'dll', 'contrib', "$subdir/$n"); $proj->AddReference($postgres); @@ -430,7 +430,7 @@ index e4feda10fd8..74a0a0a062b 100644 AdjustContribProj($proj); push @projects, $proj; } -@@ -1067,6 +1068,19 @@ sub AddContrib +@@ -1070,6 +1071,19 @@ sub AddContrib return; } @@ -450,7 +450,7 @@ index e4feda10fd8..74a0a0a062b 100644 sub GenerateContribSqlFiles { my $n = shift; -@@ -1091,23 +1105,53 @@ sub GenerateContribSqlFiles +@@ -1094,23 +1108,53 @@ sub GenerateContribSqlFiles substr($l, 0, index($l, '$(addsuffix ')) . substr($l, $i + 1); } From 92f073473bd407311c736d03bfd074c659a21e68 Mon Sep 17 00:00:00 2001 From: Svetlana Derevyanko Date: Fri, 27 Jan 2023 09:54:11 +0300 Subject: [PATCH 495/528] [PGPRO-7630] Post-processing for nodes added in plan tree by pathman New nodes added in pathman planner hook had no correct plan_node_id, which could cause problems later for statistics collector. Added fixes for 'custom_scan_tlist' to let EXPLAIN (VERBOSE) work. Also changed queryId type on uint64. Added hook for compatibility with pgpro_stats. Fixed tree walkers for ModifyTable. Tags: pg_pathman --- src/hooks.c | 81 ++++++++++++++++++++++++++++++++- src/include/hooks.h | 3 ++ src/include/partition_filter.h | 1 + src/include/partition_router.h | 2 +- src/partition_filter.c | 29 ++++++++++++ src/partition_router.c | 8 ++-- src/pg_pathman.c | 2 + src/planner_tree_modification.c | 34 +++++++------- 8 files changed, 137 insertions(+), 23 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 46204d5c..65c62494 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -21,6 +21,7 @@ #include "hooks.h" #include "init.h" #include "partition_filter.h" +#include "partition_overseer.h" #include "partition_router.h" #include "pathman_workers.h" #include "planner_tree_modification.h" @@ -74,6 +75,7 @@ planner_hook_type pathman_planner_hook_next = NULL; post_parse_analyze_hook_type pathman_post_parse_analyze_hook_next = NULL; shmem_startup_hook_type pathman_shmem_startup_hook_next = NULL; ProcessUtility_hook_type pathman_process_utility_hook_next = NULL; +ExecutorStart_hook_type pathman_executor_start_hook_prev = NULL; /* Take care of joins */ @@ -673,6 +675,23 @@ execute_for_plantree(PlannedStmt *planned_stmt, planned_stmt->subplans = subplans; } +/* + * Truncated version of set_plan_refs. + * Pathman can add nodes to already completed and post-processed plan tree. + * reset_plan_node_ids fixes some presentation values for updated plan tree + * to avoid problems in further processing. + */ +static Plan * +reset_plan_node_ids(Plan *plan, void *lastPlanNodeId) +{ + if (plan == NULL) + return NULL; + + plan->plan_node_id = (*(int *) lastPlanNodeId)++; + + return plan; +} + /* * Planner hook. It disables inheritance for tables that have been partitioned * by pathman to prevent standart PostgreSQL partitioning mechanism from @@ -688,7 +707,7 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) #endif { PlannedStmt *result; - uint32 query_id = parse->queryId; + uint64 query_id = parse->queryId; /* Save the result in case it changes */ bool pathman_ready = IsPathmanReady(); @@ -720,6 +739,9 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) if (pathman_ready) { + int lastPlanNodeId = 0; + ListCell *l; + /* Add PartitionFilter node for INSERT queries */ execute_for_plantree(result, add_partition_filters); @@ -729,6 +751,13 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) /* Decrement planner() calls count */ decr_planner_calls_count(); + /* remake parsed tree presentation fixes due to possible adding nodes */ + result->planTree = plan_tree_visitor(result->planTree, reset_plan_node_ids, &lastPlanNodeId); + foreach(l, result->subplans) + { + lfirst(l) = plan_tree_visitor((Plan *) lfirst(l), reset_plan_node_ids, &lastPlanNodeId); + } + /* HACK: restore queryId set by pg_stat_statements */ result->queryId = query_id; } @@ -1125,3 +1154,53 @@ pathman_process_utility_hook(Node *first_arg, dest, completionTag); #endif } + +/* + * Planstate tree nodes could have been copied. + * It breaks references on correspoding + * ModifyTable node from PartitionRouter nodes. + */ +static void +fix_mt_refs(PlanState *state, void *context) +{ + ModifyTableState *mt_state = (ModifyTableState *) state; + PartitionRouterState *pr_state; +#if PG_VERSION_NUM < 140000 + int i; +#endif + + if (!IsA(state, ModifyTableState)) + return; +#if PG_VERSION_NUM >= 140000 + { + CustomScanState *pf_state = (CustomScanState *) outerPlanState(mt_state); +#else + for (i = 0; i < mt_state->mt_nplans; i++) + { + CustomScanState *pf_state = (CustomScanState *) mt_state->mt_plans[i]; +#endif + if (IsPartitionFilterState(pf_state)) + { + pr_state = linitial(pf_state->custom_ps); + if (IsPartitionRouterState(pr_state)) + { + pr_state->mt_state = mt_state; + } + } + } +} + +void +pathman_executor_start_hook(QueryDesc *queryDesc, int eflags) +{ + if (pathman_executor_start_hook_prev) + pathman_executor_start_hook_prev(queryDesc, eflags); + else + standard_ExecutorStart(queryDesc, eflags); + + /* + * HACK for compatibility with pgpro_stats. + * Fix possibly broken planstate tree. + */ + state_tree_visitor(queryDesc->planstate, fix_mt_refs, NULL); +} diff --git a/src/include/hooks.h b/src/include/hooks.h index ccfe060b..813d1342 100644 --- a/src/include/hooks.h +++ b/src/include/hooks.h @@ -28,6 +28,7 @@ extern post_parse_analyze_hook_type pathman_post_parse_analyze_hook_next; extern shmem_startup_hook_type pathman_shmem_startup_hook_next; extern ProcessUtility_hook_type pathman_process_utility_hook_next; extern ExecutorRun_hook_type pathman_executor_run_hook_next; +extern ExecutorStart_hook_type pathman_executor_start_hook_prev; void pathman_join_pathlist_hook(PlannerInfo *root, @@ -115,4 +116,6 @@ void pathman_executor_hook(QueryDesc *queryDesc, ExecutorRun_CountArgType count); #endif +void pathman_executor_start_hook(QueryDesc *queryDescc, + int eflags); #endif /* PATHMAN_HOOKS_H */ diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index d3c2c482..9b9f52f9 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -183,6 +183,7 @@ void destroy_tuple_map(TupleConversionMap *tuple_map); List * pfilter_build_tlist(Plan *subplan); +void pfilter_tlist_fix_resjunk(CustomScan *subplan); /* Find suitable partition using 'value' */ Oid * find_partitions_for_value(Datum value, Oid value_type, diff --git a/src/include/partition_router.h b/src/include/partition_router.h index c6924609..d5684eba 100644 --- a/src/include/partition_router.h +++ b/src/include/partition_router.h @@ -78,7 +78,7 @@ void partition_router_explain(CustomScanState *node, List *ancestors, ExplainState *es); -Plan *make_partition_router(Plan *subplan, int epq_param); +Plan *make_partition_router(Plan *subplan, int epq_param, Index parent_rti); Node *partition_router_create_scan_state(CustomScan *node); TupleTableSlot *partition_router_exec(CustomScanState *node); diff --git a/src/partition_filter.c b/src/partition_filter.c index a267c702..78ad126b 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -817,6 +817,7 @@ make_partition_filter(Plan *subplan, /* Prepare 'custom_scan_tlist' for EXPLAIN (VERBOSE) */ cscan->custom_scan_tlist = copyObject(cscan->scan.plan.targetlist); ChangeVarNodes((Node *) cscan->custom_scan_tlist, INDEX_VAR, parent_rti, 0); + pfilter_tlist_fix_resjunk(cscan); /* Pack partitioned table's Oid and conflict_action */ cscan->custom_private = list_make4(makeInteger(parent_relid), @@ -1114,6 +1115,34 @@ pfilter_build_tlist(Plan *subplan) return result_tlist; } +/* + * resjunk Vars had its varattnos being set on nonexisting relation columns. + * For future processing service attributes should be indicated correctly. + */ +void +pfilter_tlist_fix_resjunk(CustomScan *css) +{ + ListCell *lc; + + foreach(lc, css->custom_scan_tlist) + { + TargetEntry *tle = (TargetEntry *) lfirst(lc); + + if (!IsA(tle->expr, Const)) + { + Var *var = (Var *) tle->expr; + + if (tle->resjunk) + { + /* To make Var recognizable as service attribute. */ + var->varattno = -1; + } + } + } + + return; +} + /* * ---------------------------------------------- * Additional init steps for ResultPartsStorage diff --git a/src/partition_router.c b/src/partition_router.c index 2e982299..bd081218 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -115,7 +115,7 @@ init_partition_router_static_data(void) } Plan * -make_partition_router(Plan *subplan, int epq_param) +make_partition_router(Plan *subplan, int epq_param, Index parent_rti) { CustomScan *cscan = makeNode(CustomScan); @@ -136,8 +136,10 @@ make_partition_router(Plan *subplan, int epq_param) /* Build an appropriate target list */ cscan->scan.plan.targetlist = pfilter_build_tlist(subplan); - /* FIXME: should we use the same tlist? */ - cscan->custom_scan_tlist = subplan->targetlist; + /* Fix 'custom_scan_tlist' for EXPLAIN (VERBOSE) */ + cscan->custom_scan_tlist = copyObject(cscan->scan.plan.targetlist); + ChangeVarNodes((Node *) cscan->custom_scan_tlist, INDEX_VAR, parent_rti, 0); + pfilter_tlist_fix_resjunk(cscan); return &cscan->scan.plan; } diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 94cfce84..6e835a1f 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -357,6 +357,8 @@ _PG_init(void) planner_hook = pathman_planner_hook; pathman_process_utility_hook_next = ProcessUtility_hook; ProcessUtility_hook = pathman_process_utility_hook; + pathman_executor_start_hook_prev = ExecutorStart_hook; + ExecutorStart_hook = pathman_executor_start_hook; /* Initialize static data for all subsystems */ init_main_pathman_toggles(); diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index d9d64cfd..5b6a7982 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -122,8 +122,8 @@ static void handle_modification_query(Query *parse, transform_query_cxt *context static Plan *partition_filter_visitor(Plan *plan, void *context); static Plan *partition_router_visitor(Plan *plan, void *context); -static void state_visit_subplans(List *plans, void (*visitor) (), void *context); -static void state_visit_members(PlanState **planstates, int nplans, void (*visitor) (), void *context); +static void state_visit_subplans(List *plans, void (*visitor) (PlanState *plan, void *context), void *context); +static void state_visit_members(PlanState **planstates, int nplans, void (*visitor) (PlanState *plan, void *context), void *context); static Oid find_deepest_partition(Oid relid, Index rti, Expr *quals); static Node *eval_extern_params_mutator(Node *node, ParamListInfo params); @@ -137,13 +137,13 @@ static bool modifytable_contains_fdw(List *rtable, ModifyTable *node); * id in order to recognize them properly. */ #define QUERY_ID_INITIAL 0 -static uint32 latest_query_id = QUERY_ID_INITIAL; +static uint64 latest_query_id = QUERY_ID_INITIAL; void assign_query_id(Query *query) { - uint32 prev_id = latest_query_id++; + uint64 prev_id = latest_query_id++; if (prev_id > latest_query_id) elog(WARNING, "assign_query_id(): queryId overflow"); @@ -187,14 +187,12 @@ plan_tree_visitor(Plan *plan, plan_tree_visitor((Plan *) lfirst(l), visitor, context); break; +#if PG_VERSION_NUM < 140000 /* reworked in commit 86dc90056dfd */ case T_ModifyTable: -#if PG_VERSION_NUM >= 140000 /* reworked in commit 86dc90056dfd */ - plan_tree_visitor(outerPlan(plan), visitor, context); -#else foreach (l, ((ModifyTable *) plan)->plans) plan_tree_visitor((Plan *) lfirst(l), visitor, context); -#endif break; +#endif case T_Append: foreach (l, ((Append *) plan)->appendplans) @@ -254,15 +252,13 @@ state_tree_visitor(PlanState *state, state_tree_visitor((PlanState *) lfirst(lc), visitor, context); break; +#if PG_VERSION_NUM < 140000 /* reworked in commit 86dc90056dfd */ case T_ModifyTable: -#if PG_VERSION_NUM >= 140000 /* reworked in commit 86dc90056dfd */ - visitor(outerPlanState(state), context); -#else state_visit_members(((ModifyTableState *) state)->mt_plans, ((ModifyTableState *) state)->mt_nplans, visitor, context); -#endif break; +#endif case T_Append: state_visit_members(((AppendState *) state)->appendplans, @@ -307,7 +303,7 @@ state_tree_visitor(PlanState *state, */ static void state_visit_subplans(List *plans, - void (*visitor) (), + void (*visitor) (PlanState *plan, void *context), void *context) { ListCell *lc; @@ -315,7 +311,7 @@ state_visit_subplans(List *plans, foreach (lc, plans) { SubPlanState *sps = lfirst_node(SubPlanState, lc); - visitor(sps->planstate, context); + state_tree_visitor(sps->planstate, visitor, context); } } @@ -325,12 +321,12 @@ state_visit_subplans(List *plans, */ static void state_visit_members(PlanState **planstates, int nplans, - void (*visitor) (), void *context) + void (*visitor) (PlanState *plan, void *context), void *context) { int i; for (i = 0; i < nplans; i++) - visitor(planstates[i], context); + state_tree_visitor(planstates[i], visitor, context); } @@ -939,10 +935,12 @@ partition_router_visitor(Plan *plan, void *context) #if PG_VERSION_NUM >= 140000 /* for changes 86dc90056dfd */ prouter = make_partition_router(subplan, - modify_table->epqParam); + modify_table->epqParam, + modify_table->nominalRelation); #else prouter = make_partition_router((Plan *) lfirst(lc1), - modify_table->epqParam); + modify_table->epqParam, + modify_table->nominalRelation); #endif pfilter = make_partition_filter((Plan *) prouter, relid, From 6bcd9d82b91baffd6b7024501e7c1837ddaffb1e Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Fri, 10 Mar 2023 19:58:11 +0300 Subject: [PATCH 496/528] [PGPRO-7880] Need to check relation Oid before locking It need to do before relation locking: locking of invalid Oid causes an error on replica Tags: pg_pathman --- expected/pathman_calamity.out | 10 +++++----- expected/pathman_calamity_1.out | 10 +++++----- expected/pathman_calamity_2.out | 10 +++++----- expected/pathman_calamity_3.out | 10 +++++----- src/include/utils.h | 1 + src/pathman_workers.c | 2 ++ src/pl_funcs.c | 6 ++++++ src/pl_range_funcs.c | 3 +++ src/utils.c | 11 +++++++++++ 9 files changed, 43 insertions(+), 20 deletions(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 7226e7b9..b9421bde 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -320,7 +320,7 @@ SELECT validate_relname(NULL); ERROR: relation should not be NULL /* check function validate_expression() */ SELECT validate_expression(1::regclass, NULL); /* not ok */ -ERROR: relation "1" does not exist +ERROR: identifier "1" must be normal Oid SELECT validate_expression(NULL::regclass, NULL); /* not ok */ ERROR: 'relid' should not be NULL SELECT validate_expression('calamity.part_test', NULL); /* not ok */ @@ -426,19 +426,19 @@ SELECT build_sequence_name(NULL) IS NULL; /* check function partition_table_concurrently() */ SELECT partition_table_concurrently(1::REGCLASS); /* not ok */ -ERROR: relation "1" has no partitions +ERROR: identifier "1" must be normal Oid SELECT partition_table_concurrently('pg_class', 0); /* not ok */ ERROR: 'batch_size' should not be less than 1 or greater than 10000 SELECT partition_table_concurrently('pg_class', 1, 1E-5); /* not ok */ ERROR: 'sleep_time' should not be less than 0.5 SELECT partition_table_concurrently('pg_class'); /* not ok */ -ERROR: relation "pg_class" has no partitions +ERROR: identifier "1259" must be normal Oid /* check function stop_concurrent_part_task() */ SELECT stop_concurrent_part_task(1::REGCLASS); /* not ok */ ERROR: cannot find worker for relation "1" /* check function drop_range_partition_expand_next() */ SELECT drop_range_partition_expand_next('pg_class'); /* not ok */ -ERROR: relation "pg_class" is not a partition +ERROR: identifier "1259" must be normal Oid SELECT drop_range_partition_expand_next(NULL) IS NULL; ?column? ---------- @@ -560,7 +560,7 @@ DROP FUNCTION calamity.dummy_cb(arg jsonb); SELECT add_to_pathman_config(NULL, 'val'); /* no table */ ERROR: 'parent_relid' should not be NULL SELECT add_to_pathman_config(0::REGCLASS, 'val'); /* no table (oid) */ -ERROR: relation "0" does not exist +ERROR: identifier "0" must be normal Oid SELECT add_to_pathman_config('calamity.part_test', NULL); /* no expr */ ERROR: 'expression' should not be NULL SELECT add_to_pathman_config('calamity.part_test', 'V_A_L'); /* wrong expr */ diff --git a/expected/pathman_calamity_1.out b/expected/pathman_calamity_1.out index 62050cfd..6ca2e7dd 100644 --- a/expected/pathman_calamity_1.out +++ b/expected/pathman_calamity_1.out @@ -320,7 +320,7 @@ SELECT validate_relname(NULL); ERROR: relation should not be NULL /* check function validate_expression() */ SELECT validate_expression(1::regclass, NULL); /* not ok */ -ERROR: relation "1" does not exist +ERROR: identifier "1" must be normal Oid SELECT validate_expression(NULL::regclass, NULL); /* not ok */ ERROR: 'relid' should not be NULL SELECT validate_expression('calamity.part_test', NULL); /* not ok */ @@ -426,19 +426,19 @@ SELECT build_sequence_name(NULL) IS NULL; /* check function partition_table_concurrently() */ SELECT partition_table_concurrently(1::REGCLASS); /* not ok */ -ERROR: relation "1" has no partitions +ERROR: identifier "1" must be normal Oid SELECT partition_table_concurrently('pg_class', 0); /* not ok */ ERROR: 'batch_size' should not be less than 1 or greater than 10000 SELECT partition_table_concurrently('pg_class', 1, 1E-5); /* not ok */ ERROR: 'sleep_time' should not be less than 0.5 SELECT partition_table_concurrently('pg_class'); /* not ok */ -ERROR: relation "pg_class" has no partitions +ERROR: identifier "1259" must be normal Oid /* check function stop_concurrent_part_task() */ SELECT stop_concurrent_part_task(1::REGCLASS); /* not ok */ ERROR: cannot find worker for relation "1" /* check function drop_range_partition_expand_next() */ SELECT drop_range_partition_expand_next('pg_class'); /* not ok */ -ERROR: relation "pg_class" is not a partition +ERROR: identifier "1259" must be normal Oid SELECT drop_range_partition_expand_next(NULL) IS NULL; ?column? ---------- @@ -560,7 +560,7 @@ DROP FUNCTION calamity.dummy_cb(arg jsonb); SELECT add_to_pathman_config(NULL, 'val'); /* no table */ ERROR: 'parent_relid' should not be NULL SELECT add_to_pathman_config(0::REGCLASS, 'val'); /* no table (oid) */ -ERROR: relation "0" does not exist +ERROR: identifier "0" must be normal Oid SELECT add_to_pathman_config('calamity.part_test', NULL); /* no expr */ ERROR: 'expression' should not be NULL SELECT add_to_pathman_config('calamity.part_test', 'V_A_L'); /* wrong expr */ diff --git a/expected/pathman_calamity_2.out b/expected/pathman_calamity_2.out index 5bb1053f..fa3295f6 100644 --- a/expected/pathman_calamity_2.out +++ b/expected/pathman_calamity_2.out @@ -320,7 +320,7 @@ SELECT validate_relname(NULL); ERROR: relation should not be NULL /* check function validate_expression() */ SELECT validate_expression(1::regclass, NULL); /* not ok */ -ERROR: relation "1" does not exist +ERROR: identifier "1" must be normal Oid SELECT validate_expression(NULL::regclass, NULL); /* not ok */ ERROR: 'relid' should not be NULL SELECT validate_expression('calamity.part_test', NULL); /* not ok */ @@ -426,19 +426,19 @@ SELECT build_sequence_name(NULL) IS NULL; /* check function partition_table_concurrently() */ SELECT partition_table_concurrently(1::REGCLASS); /* not ok */ -ERROR: relation "1" has no partitions +ERROR: identifier "1" must be normal Oid SELECT partition_table_concurrently('pg_class', 0); /* not ok */ ERROR: 'batch_size' should not be less than 1 or greater than 10000 SELECT partition_table_concurrently('pg_class', 1, 1E-5); /* not ok */ ERROR: 'sleep_time' should not be less than 0.5 SELECT partition_table_concurrently('pg_class'); /* not ok */ -ERROR: relation "pg_class" has no partitions +ERROR: identifier "1259" must be normal Oid /* check function stop_concurrent_part_task() */ SELECT stop_concurrent_part_task(1::REGCLASS); /* not ok */ ERROR: cannot find worker for relation "1" /* check function drop_range_partition_expand_next() */ SELECT drop_range_partition_expand_next('pg_class'); /* not ok */ -ERROR: relation "pg_class" is not a partition +ERROR: identifier "1259" must be normal Oid SELECT drop_range_partition_expand_next(NULL) IS NULL; ?column? ---------- @@ -560,7 +560,7 @@ DROP FUNCTION calamity.dummy_cb(arg jsonb); SELECT add_to_pathman_config(NULL, 'val'); /* no table */ ERROR: 'parent_relid' should not be NULL SELECT add_to_pathman_config(0::REGCLASS, 'val'); /* no table (oid) */ -ERROR: relation "0" does not exist +ERROR: identifier "0" must be normal Oid SELECT add_to_pathman_config('calamity.part_test', NULL); /* no expr */ ERROR: 'expression' should not be NULL SELECT add_to_pathman_config('calamity.part_test', 'V_A_L'); /* wrong expr */ diff --git a/expected/pathman_calamity_3.out b/expected/pathman_calamity_3.out index bfb3b63c..a8879ef7 100644 --- a/expected/pathman_calamity_3.out +++ b/expected/pathman_calamity_3.out @@ -324,7 +324,7 @@ SELECT validate_relname(NULL); ERROR: relation should not be NULL /* check function validate_expression() */ SELECT validate_expression(1::regclass, NULL); /* not ok */ -ERROR: relation "1" does not exist +ERROR: identifier "1" must be normal Oid SELECT validate_expression(NULL::regclass, NULL); /* not ok */ ERROR: 'relid' should not be NULL SELECT validate_expression('calamity.part_test', NULL); /* not ok */ @@ -430,19 +430,19 @@ SELECT build_sequence_name(NULL) IS NULL; /* check function partition_table_concurrently() */ SELECT partition_table_concurrently(1::REGCLASS); /* not ok */ -ERROR: relation "1" has no partitions +ERROR: identifier "1" must be normal Oid SELECT partition_table_concurrently('pg_class', 0); /* not ok */ ERROR: 'batch_size' should not be less than 1 or greater than 10000 SELECT partition_table_concurrently('pg_class', 1, 1E-5); /* not ok */ ERROR: 'sleep_time' should not be less than 0.5 SELECT partition_table_concurrently('pg_class'); /* not ok */ -ERROR: relation "pg_class" has no partitions +ERROR: identifier "1259" must be normal Oid /* check function stop_concurrent_part_task() */ SELECT stop_concurrent_part_task(1::REGCLASS); /* not ok */ ERROR: cannot find worker for relation "1" /* check function drop_range_partition_expand_next() */ SELECT drop_range_partition_expand_next('pg_class'); /* not ok */ -ERROR: relation "pg_class" is not a partition +ERROR: identifier "1259" must be normal Oid SELECT drop_range_partition_expand_next(NULL) IS NULL; ?column? ---------- @@ -564,7 +564,7 @@ DROP FUNCTION calamity.dummy_cb(arg jsonb); SELECT add_to_pathman_config(NULL, 'val'); /* no table */ ERROR: 'parent_relid' should not be NULL SELECT add_to_pathman_config(0::REGCLASS, 'val'); /* no table (oid) */ -ERROR: relation "0" does not exist +ERROR: identifier "0" must be normal Oid SELECT add_to_pathman_config('calamity.part_test', NULL); /* no expr */ ERROR: 'expression' should not be NULL SELECT add_to_pathman_config('calamity.part_test', 'V_A_L'); /* wrong expr */ diff --git a/src/include/utils.h b/src/include/utils.h index 1e0b87a4..566c04db 100644 --- a/src/include/utils.h +++ b/src/include/utils.h @@ -84,5 +84,6 @@ Datum extract_binary_interval_from_text(Datum interval_text, Oid *interval_type); char **deconstruct_text_array(Datum array, int *array_size); RangeVar **qualified_relnames_to_rangevars(char **relnames, size_t nrelnames); +void check_relation_oid(Oid relid); #endif /* PATHMAN_UTILS_H */ diff --git a/src/pathman_workers.c b/src/pathman_workers.c index eca9ee52..3eb82ab7 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -712,6 +712,8 @@ partition_table_concurrently(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("'sleep_time' should not be less than 0.5"))); + check_relation_oid(relid); + /* Prevent concurrent function calls */ LockRelationOid(relid, lockmode); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 542f99ae..10538bea 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -673,6 +673,7 @@ validate_expression(PG_FUNCTION_ARGS) if (!PG_ARGISNULL(0)) { relid = PG_GETARG_OID(0); + check_relation_oid(relid); } else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("'relid' should not be NULL"))); @@ -807,6 +808,7 @@ add_to_pathman_config(PG_FUNCTION_ARGS) if (!PG_ARGISNULL(0)) { relid = PG_GETARG_OID(0); + check_relation_oid(relid); } else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("'parent_relid' should not be NULL"))); @@ -1037,6 +1039,8 @@ prevent_part_modification(PG_FUNCTION_ARGS) { Oid relid = PG_GETARG_OID(0); + check_relation_oid(relid); + /* Lock partitioned relation till transaction's end */ LockRelationOid(relid, ShareUpdateExclusiveLock); @@ -1051,6 +1055,8 @@ prevent_data_modification(PG_FUNCTION_ARGS) { Oid relid = PG_GETARG_OID(0); + check_relation_oid(relid); + /* * Check that isolation level is READ COMMITTED. * Else we won't be able to see new rows diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index b2a8dc3d..19292a0a 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -499,6 +499,7 @@ split_range_partition(PG_FUNCTION_ARGS) if (!PG_ARGISNULL(0)) { partition1 = PG_GETARG_OID(0); + check_relation_oid(partition1); } else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("'partition1' should not be NULL"))); @@ -835,6 +836,8 @@ drop_range_partition_expand_next(PG_FUNCTION_ARGS) RangeEntry *ranges; int i; + check_relation_oid(partition); + /* Lock the partition we're going to drop */ LockRelationOid(partition, AccessExclusiveLock); diff --git a/src/utils.c b/src/utils.c index 6ebfb8a8..9402d618 100644 --- a/src/utils.c +++ b/src/utils.c @@ -527,3 +527,14 @@ qualified_relnames_to_rangevars(char **relnames, size_t nrelnames) return rangevars; } +/* + * Checks that Oid is valid (it need to do before relation locking: locking of + * invalid Oid causes an error on replica). + */ +void +check_relation_oid(Oid relid) +{ + if (relid < FirstNormalObjectId) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("identifier \"%u\" must be normal Oid", relid))); +} From 47acbe67e07bbf5841e52b705bdb03aa1adf768e Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Thu, 9 Mar 2023 21:29:56 +0300 Subject: [PATCH 497/528] [PGPRO-7870] Added error for case executing prepared query after DROP/CREATE EXTENSION Tags: pg_pathman --- expected/pathman_cache_pranks.out | 150 ++++++++++++++++++ expected/pathman_cache_pranks_1.out | 237 ++++++++++++++++++++++++++++ sql/pathman_cache_pranks.sql | 69 ++++++++ src/nodes_common.c | 15 +- 4 files changed, 469 insertions(+), 2 deletions(-) create mode 100644 expected/pathman_cache_pranks_1.out diff --git a/expected/pathman_cache_pranks.out b/expected/pathman_cache_pranks.out index 5493ae96..278643ff 100644 --- a/expected/pathman_cache_pranks.out +++ b/expected/pathman_cache_pranks.out @@ -76,5 +76,155 @@ ERROR: can't partition table "part_test" with existing children DROP TABLE part_test CASCADE; NOTICE: drop cascades to 302 other objects -- +-- +-- PGPRO-7870 +-- Added error for case executing prepared query after DROP/CREATE EXTENSION. +-- +-- DROP/CREATE extension +CREATE TABLE part_test(a INT4 NOT NULL, b INT4); +PREPARE q(int4) AS SELECT * FROM part_test WHERE a > ALL (array[$1, 898]); +SELECT create_range_partitions('part_test', 'a', 1, 100, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +EXECUTE q(1); +ERROR: table "part_test" is not partitioned +DEALLOCATE q; +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 11 other objects +-- DROP/CREATE disabled extension +CREATE TABLE part_test(a INT4 NOT NULL, b INT4); +PREPARE q(int4) AS SELECT * FROM part_test WHERE a > ALL (array[$1, 898]); +SELECT create_range_partitions('part_test', 'a', 1, 100, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +EXECUTE q(1); +ERROR: table "part_test" is not partitioned +DEALLOCATE q; +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 11 other objects +-- DROP/CREATE extension in autonomous transaction +CREATE TABLE part_test(a INT4 NOT NULL, b INT4); +PREPARE q(int4) AS SELECT * FROM part_test WHERE a > ALL (array[$1, 198]); +SELECT create_range_partitions('part_test', 'a', 1, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +BEGIN; + BEGIN AUTONOMOUS; + DROP EXTENSION pg_pathman; + CREATE EXTENSION pg_pathman; + COMMIT; +COMMIT; +EXECUTE q(1); +ERROR: table "part_test" is not partitioned +DEALLOCATE q; +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 3 other objects -- finalize DROP EXTENSION pg_pathman; diff --git a/expected/pathman_cache_pranks_1.out b/expected/pathman_cache_pranks_1.out new file mode 100644 index 00000000..4a3982a6 --- /dev/null +++ b/expected/pathman_cache_pranks_1.out @@ -0,0 +1,237 @@ +\set VERBOSITY terse +-- is pathman (caches, in particular) strong enough to carry out this? +SET search_path = 'public'; +-- make sure nothing breaks on disable/enable when nothing was initialized yet +SET pg_pathman.enable = false; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SET pg_pathman.enable = true; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +-- wobble with create-drop ext: tests cached relids sanity +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = true; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +DROP EXTENSION pg_pathman; +-- create it for further tests +CREATE EXTENSION pg_pathman; +-- 079797e0d5 +CREATE TABLE part_test(val serial); +INSERT INTO part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT set_interval('part_test', 100); + set_interval +-------------- + +(1 row) + +DELETE FROM pathman_config WHERE partrel = 'part_test'::REGCLASS; +SELECT drop_partitions('part_test'); +ERROR: table "part_test" has no partitions +SELECT disable_pathman_for('part_test'); + disable_pathman_for +--------------------- + +(1 row) + +CREATE TABLE wrong_partition (LIKE part_test) INHERITS (part_test); +NOTICE: merging column "val" with inherited definition +SELECT add_to_pathman_config('part_test', 'val', '10'); +ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist +SELECT add_to_pathman_config('part_test', 'val'); +ERROR: wrong constraint format for HASH partition "part_test_1" +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 5 other objects +-- +-- 85fc5ccf121 +CREATE TABLE part_test(val serial); +INSERT INTO part_test SELECT generate_series(1, 3000); +SELECT create_range_partitions('part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 300 +(1 row) + +SELECT append_range_partition('part_test'); + append_range_partition +------------------------ + part_test_301 +(1 row) + +DELETE FROM part_test; +SELECT create_single_range_partition('part_test', NULL::INT4, NULL); /* not ok */ +ERROR: cannot create partition with range (-inf, +inf) +DELETE FROM pathman_config WHERE partrel = 'part_test'::REGCLASS; +SELECT create_hash_partitions('part_test', 'val', 2, partition_names := ARRAY[]::TEXT[]); /* not ok */ +ERROR: can't partition table "part_test" with existing children +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 302 other objects +-- +-- +-- PGPRO-7870 +-- Added error for case executing prepared query after DROP/CREATE EXTENSION. +-- +-- DROP/CREATE extension +CREATE TABLE part_test(a INT4 NOT NULL, b INT4); +PREPARE q(int4) AS SELECT * FROM part_test WHERE a > ALL (array[$1, 898]); +SELECT create_range_partitions('part_test', 'a', 1, 100, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +EXECUTE q(1); +ERROR: table "part_test" is not partitioned +DEALLOCATE q; +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 11 other objects +-- DROP/CREATE disabled extension +CREATE TABLE part_test(a INT4 NOT NULL, b INT4); +PREPARE q(int4) AS SELECT * FROM part_test WHERE a > ALL (array[$1, 898]); +SELECT create_range_partitions('part_test', 'a', 1, 100, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +EXECUTE q(1); +ERROR: table "part_test" is not partitioned +DEALLOCATE q; +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 11 other objects +-- DROP/CREATE extension in autonomous transaction +CREATE TABLE part_test(a INT4 NOT NULL, b INT4); +PREPARE q(int4) AS SELECT * FROM part_test WHERE a > ALL (array[$1, 198]); +SELECT create_range_partitions('part_test', 'a', 1, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +BEGIN; + BEGIN AUTONOMOUS; +ERROR: syntax error at or near "AUTONOMOUS" at character 7 + DROP EXTENSION pg_pathman; +ERROR: current transaction is aborted, commands ignored until end of transaction block + CREATE EXTENSION pg_pathman; +ERROR: current transaction is aborted, commands ignored until end of transaction block + COMMIT; +COMMIT; +WARNING: there is no transaction in progress +EXECUTE q(1); + a | b +---+--- +(0 rows) + +DEALLOCATE q; +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 3 other objects +-- finalize +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_cache_pranks.sql b/sql/pathman_cache_pranks.sql index 782ef7f0..e3fe00d9 100644 --- a/sql/pathman_cache_pranks.sql +++ b/sql/pathman_cache_pranks.sql @@ -48,6 +48,75 @@ SELECT create_hash_partitions('part_test', 'val', 2, partition_names := ARRAY[]: DROP TABLE part_test CASCADE; -- +-- +-- PGPRO-7870 +-- Added error for case executing prepared query after DROP/CREATE EXTENSION. +-- +-- DROP/CREATE extension +CREATE TABLE part_test(a INT4 NOT NULL, b INT4); +PREPARE q(int4) AS SELECT * FROM part_test WHERE a > ALL (array[$1, 898]); +SELECT create_range_partitions('part_test', 'a', 1, 100, 10); + +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); + +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; + +EXECUTE q(1); + +DEALLOCATE q; +DROP TABLE part_test CASCADE; + +-- DROP/CREATE disabled extension +CREATE TABLE part_test(a INT4 NOT NULL, b INT4); +PREPARE q(int4) AS SELECT * FROM part_test WHERE a > ALL (array[$1, 898]); +SELECT create_range_partitions('part_test', 'a', 1, 100, 10); + +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); + +SET pg_pathman.enable = f; +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = t; + +EXECUTE q(1); + +DEALLOCATE q; +DROP TABLE part_test CASCADE; + +-- DROP/CREATE extension in autonomous transaction +CREATE TABLE part_test(a INT4 NOT NULL, b INT4); +PREPARE q(int4) AS SELECT * FROM part_test WHERE a > ALL (array[$1, 198]); +SELECT create_range_partitions('part_test', 'a', 1, 100, 2); + +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); + +BEGIN; + BEGIN AUTONOMOUS; + DROP EXTENSION pg_pathman; + CREATE EXTENSION pg_pathman; + COMMIT; +COMMIT; + +EXECUTE q(1); + +DEALLOCATE q; +DROP TABLE part_test CASCADE; -- finalize DROP EXTENSION pg_pathman; diff --git a/src/nodes_common.c b/src/nodes_common.c index a6fecb51..f4ebc6b1 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -601,7 +601,10 @@ create_append_plan_common(PlannerInfo *root, RelOptInfo *rel, CustomScan *cscan; prel = get_pathman_relation_info(rpath->relid); - Assert(prel); + if (!prel) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("table \"%s\" is not partitioned", + get_rel_name_or_relid(rpath->relid)))); cscan = makeNode(CustomScan); cscan->custom_scan_tlist = NIL; /* initial value (empty list) */ @@ -709,7 +712,15 @@ begin_append_common(CustomScanState *node, EState *estate, int eflags) #endif scan_state->prel = get_pathman_relation_info(scan_state->relid); - Assert(scan_state->prel); + /* + * scan_state->prel can be NULL in case execution of prepared query that + * was prepared before DROP/CREATE EXTENSION pg_pathman or after + * pathman_config table truncation etc. + */ + if (!scan_state->prel) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("table \"%s\" is not partitioned", + get_rel_name_or_relid(scan_state->relid)))); /* Prepare expression according to set_set_customscan_references() */ scan_state->prel_expr = PrelExpressionForRelid(scan_state->prel, INDEX_VAR); From c4c0e34a6cc74cb8c455c1f26582883457e16630 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Wed, 22 Mar 2023 10:06:36 +0300 Subject: [PATCH 498/528] [PGPRO-7928] Variable pg_pathman.enable must be called before any query Tags: pg_pathman --- expected/pathman_runtime_nodes.out | 41 ++++++++++++++++++++++++---- expected/pathman_runtime_nodes_1.out | 41 ++++++++++++++++++++++++---- sql/pathman_runtime_nodes.sql | 32 ++++++++++++++++++---- src/hooks.c | 24 +++++++++++++++- src/include/hooks.h | 1 + src/init.c | 2 +- 6 files changed, 123 insertions(+), 18 deletions(-) diff --git a/expected/pathman_runtime_nodes.out b/expected/pathman_runtime_nodes.out index 17905e59..5d3b5638 100644 --- a/expected/pathman_runtime_nodes.out +++ b/expected/pathman_runtime_nodes.out @@ -58,7 +58,6 @@ begin return 'ok'; end; $$ language plpgsql -set pg_pathman.enable = true set enable_mergejoin = off set enable_hashjoin = off; create or replace function test.pathman_test_2() returns text as $$ @@ -100,7 +99,6 @@ begin return 'ok'; end; $$ language plpgsql -set pg_pathman.enable = true set enable_mergejoin = off set enable_hashjoin = off; create or replace function test.pathman_test_3() returns text as $$ @@ -133,7 +131,6 @@ begin return 'ok'; end; $$ language plpgsql -set pg_pathman.enable = true set enable_mergejoin = off set enable_hashjoin = off; create or replace function test.pathman_test_4() returns text as $$ @@ -172,7 +169,6 @@ begin return 'ok'; end; $$ language plpgsql -set pg_pathman.enable = true set enable_mergejoin = off set enable_hashjoin = off; create or replace function test.pathman_test_5() returns text as $$ @@ -233,7 +229,6 @@ begin return 'ok'; end; $$ language plpgsql -set pg_pathman.enable = true set enable_hashjoin = off set enable_mergejoin = off; create table test.run_values as select generate_series(1, 10000) val; @@ -464,5 +459,41 @@ DROP FUNCTION test.pathman_test_3(); DROP FUNCTION test.pathman_test_4(); DROP FUNCTION test.pathman_test_5(); DROP SCHEMA test; +-- +-- +-- PGPRO-7928 +-- Variable pg_pathman.enable must be called before any query. +-- +CREATE TABLE part_test (val int NOT NULL); +SELECT create_hash_partitions('part_test', 'val', 2, partition_names := array['part_test_1','pg_pathman']); +ERROR: function create_hash_partitions(unknown, unknown, integer, partition_names => text[]) does not exist at character 8 +CREATE OR REPLACE FUNCTION part_test_trigger() RETURNS TRIGGER AS $$ +BEGIN + RAISE NOTICE '%', format('%s %s %s (%s)', TG_WHEN, TG_OP, TG_LEVEL, TG_TABLE_NAME); + IF TG_OP::text = 'DELETE'::text then + SET pg_pathman.enable = f; + RETURN new; + END IF; +END; +$$ LANGUAGE PLPGSQL; +SET pg_pathman.enable_partitionrouter = t; +CREATE TRIGGER ad AFTER DELETE ON part_test_1 FOR EACH ROW EXECUTE PROCEDURE part_test_trigger (); +ERROR: relation "part_test_1" does not exist +INSERT INTO part_test VALUES (1); +UPDATE part_test SET val = val + 1 RETURNING *, tableoid::regclass; + val | tableoid +-----+----------- + 2 | part_test +(1 row) + +UPDATE part_test SET val = val + 1 RETURNING *, tableoid::regclass; + val | tableoid +-----+----------- + 3 | part_test +(1 row) + +RESET pg_pathman.enable_partitionrouter; +DROP TABLE part_test CASCADE; +DROP FUNCTION part_test_trigger(); DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman; diff --git a/expected/pathman_runtime_nodes_1.out b/expected/pathman_runtime_nodes_1.out index 65382269..10435240 100644 --- a/expected/pathman_runtime_nodes_1.out +++ b/expected/pathman_runtime_nodes_1.out @@ -58,7 +58,6 @@ begin return 'ok'; end; $$ language plpgsql -set pg_pathman.enable = true set enable_mergejoin = off set enable_hashjoin = off; create or replace function test.pathman_test_2() returns text as $$ @@ -100,7 +99,6 @@ begin return 'ok'; end; $$ language plpgsql -set pg_pathman.enable = true set enable_mergejoin = off set enable_hashjoin = off; create or replace function test.pathman_test_3() returns text as $$ @@ -133,7 +131,6 @@ begin return 'ok'; end; $$ language plpgsql -set pg_pathman.enable = true set enable_mergejoin = off set enable_hashjoin = off; create or replace function test.pathman_test_4() returns text as $$ @@ -172,7 +169,6 @@ begin return 'ok'; end; $$ language plpgsql -set pg_pathman.enable = true set enable_mergejoin = off set enable_hashjoin = off; create or replace function test.pathman_test_5() returns text as $$ @@ -233,7 +229,6 @@ begin return 'ok'; end; $$ language plpgsql -set pg_pathman.enable = true set enable_hashjoin = off set enable_mergejoin = off; create table test.run_values as select generate_series(1, 10000) val; @@ -464,5 +459,41 @@ DROP FUNCTION test.pathman_test_3(); DROP FUNCTION test.pathman_test_4(); DROP FUNCTION test.pathman_test_5(); DROP SCHEMA test; +-- +-- +-- PGPRO-7928 +-- Variable pg_pathman.enable must be called before any query. +-- +CREATE TABLE part_test (val int NOT NULL); +SELECT create_hash_partitions('part_test', 'val', 2, partition_names := array['part_test_1','pg_pathman']); +ERROR: function create_hash_partitions(unknown, unknown, integer, partition_names => text[]) does not exist at character 8 +CREATE OR REPLACE FUNCTION part_test_trigger() RETURNS TRIGGER AS $$ +BEGIN + RAISE NOTICE '%', format('%s %s %s (%s)', TG_WHEN, TG_OP, TG_LEVEL, TG_TABLE_NAME); + IF TG_OP::text = 'DELETE'::text then + SET pg_pathman.enable = f; + RETURN new; + END IF; +END; +$$ LANGUAGE PLPGSQL; +SET pg_pathman.enable_partitionrouter = t; +CREATE TRIGGER ad AFTER DELETE ON part_test_1 FOR EACH ROW EXECUTE PROCEDURE part_test_trigger (); +ERROR: relation "part_test_1" does not exist +INSERT INTO part_test VALUES (1); +UPDATE part_test SET val = val + 1 RETURNING *, tableoid::regclass; + val | tableoid +-----+----------- + 2 | part_test +(1 row) + +UPDATE part_test SET val = val + 1 RETURNING *, tableoid::regclass; + val | tableoid +-----+----------- + 3 | part_test +(1 row) + +RESET pg_pathman.enable_partitionrouter; +DROP TABLE part_test CASCADE; +DROP FUNCTION part_test_trigger(); DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman; diff --git a/sql/pathman_runtime_nodes.sql b/sql/pathman_runtime_nodes.sql index 81c046db..9fa7028f 100644 --- a/sql/pathman_runtime_nodes.sql +++ b/sql/pathman_runtime_nodes.sql @@ -63,7 +63,6 @@ begin return 'ok'; end; $$ language plpgsql -set pg_pathman.enable = true set enable_mergejoin = off set enable_hashjoin = off; @@ -106,7 +105,6 @@ begin return 'ok'; end; $$ language plpgsql -set pg_pathman.enable = true set enable_mergejoin = off set enable_hashjoin = off; @@ -140,7 +138,6 @@ begin return 'ok'; end; $$ language plpgsql -set pg_pathman.enable = true set enable_mergejoin = off set enable_hashjoin = off; @@ -180,7 +177,6 @@ begin return 'ok'; end; $$ language plpgsql -set pg_pathman.enable = true set enable_mergejoin = off set enable_hashjoin = off; @@ -242,7 +238,6 @@ begin return 'ok'; end; $$ language plpgsql -set pg_pathman.enable = true set enable_hashjoin = off set enable_mergejoin = off; @@ -347,6 +342,31 @@ DROP FUNCTION test.pathman_test_3(); DROP FUNCTION test.pathman_test_4(); DROP FUNCTION test.pathman_test_5(); DROP SCHEMA test; +-- +-- +-- PGPRO-7928 +-- Variable pg_pathman.enable must be called before any query. +-- +CREATE TABLE part_test (val int NOT NULL); +SELECT create_hash_partitions('part_test', 'val', 2, partition_names := array['part_test_1','pg_pathman']); +CREATE OR REPLACE FUNCTION part_test_trigger() RETURNS TRIGGER AS $$ +BEGIN + RAISE NOTICE '%', format('%s %s %s (%s)', TG_WHEN, TG_OP, TG_LEVEL, TG_TABLE_NAME); + IF TG_OP::text = 'DELETE'::text then + SET pg_pathman.enable = f; + RETURN new; + END IF; +END; +$$ LANGUAGE PLPGSQL; +SET pg_pathman.enable_partitionrouter = t; +CREATE TRIGGER ad AFTER DELETE ON part_test_1 FOR EACH ROW EXECUTE PROCEDURE part_test_trigger (); +INSERT INTO part_test VALUES (1); +UPDATE part_test SET val = val + 1 RETURNING *, tableoid::regclass; +UPDATE part_test SET val = val + 1 RETURNING *, tableoid::regclass; + +RESET pg_pathman.enable_partitionrouter; +DROP TABLE part_test CASCADE; +DROP FUNCTION part_test_trigger(); + DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman; - diff --git a/src/hooks.c b/src/hooks.c index 65c62494..b4ae796a 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -39,8 +39,9 @@ #include "optimizer/prep.h" #include "optimizer/restrictinfo.h" #include "rewrite/rewriteManip.h" -#include "utils/typcache.h" #include "utils/lsyscache.h" +#include "utils/typcache.h" +#include "utils/snapmgr.h" #ifdef USE_ASSERT_CHECKING @@ -614,6 +615,27 @@ pathman_rel_pathlist_hook(PlannerInfo *root, close_pathman_relation_info(prel); } +/* + * 'pg_pathman.enable' GUC check. + */ +bool +pathman_enable_check_hook(bool *newval, void **extra, GucSource source) +{ + if (FirstSnapshotSet || + GetTopTransactionIdIfAny() != InvalidTransactionId || +#ifdef PGPRO_EE + getNestLevelATX() > 0 || +#endif + IsSubTransaction()) + { + GUC_check_errcode(ERRCODE_ACTIVE_SQL_TRANSACTION); + GUC_check_errmsg("\"pg_pathman.enable\" must be called before any query"); + return false; + } + + return true; +} + /* * Intercept 'pg_pathman.enable' GUC assignments. */ diff --git a/src/include/hooks.h b/src/include/hooks.h index 813d1342..4d426f5a 100644 --- a/src/include/hooks.h +++ b/src/include/hooks.h @@ -44,6 +44,7 @@ void pathman_rel_pathlist_hook(PlannerInfo *root, RangeTblEntry *rte); void pathman_enable_assign_hook(bool newval, void *extra); +bool pathman_enable_check_hook(bool *newval, void **extra, GucSource source); PlannedStmt * pathman_planner_hook(Query *parse, #if PG_VERSION_NUM >= 130000 diff --git a/src/init.c b/src/init.c index bdec28fd..4341d406 100644 --- a/src/init.c +++ b/src/init.c @@ -166,7 +166,7 @@ init_main_pathman_toggles(void) DEFAULT_PATHMAN_ENABLE, PGC_SUSET, 0, - NULL, + pathman_enable_check_hook, pathman_enable_assign_hook, NULL); From 2bb067d44ea8b54ba0e3c0ac17af07cf334941ac Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Tue, 18 Apr 2023 14:41:11 +0300 Subject: [PATCH 499/528] [PGPRO-8041] Fixed restrictions for pg_pathman.enable Tags: pg_pathman --- expected/pathman_runtime_nodes.out | 24 +++++++++++++++--------- expected/pathman_runtime_nodes_1.out | 24 +++++++++++++++--------- sql/pathman_runtime_nodes.sql | 2 +- src/hooks.c | 19 ++++++++++++++++--- 4 files changed, 47 insertions(+), 22 deletions(-) diff --git a/expected/pathman_runtime_nodes.out b/expected/pathman_runtime_nodes.out index 5d3b5638..ab8a7e02 100644 --- a/expected/pathman_runtime_nodes.out +++ b/expected/pathman_runtime_nodes.out @@ -465,8 +465,12 @@ DROP SCHEMA test; -- Variable pg_pathman.enable must be called before any query. -- CREATE TABLE part_test (val int NOT NULL); -SELECT create_hash_partitions('part_test', 'val', 2, partition_names := array['part_test_1','pg_pathman']); -ERROR: function create_hash_partitions(unknown, unknown, integer, partition_names => text[]) does not exist at character 8 +SELECT pathman.create_hash_partitions('part_test', 'val', 2, partition_names := array['part_test_1','pg_pathman']); + create_hash_partitions +------------------------ + 2 +(1 row) + CREATE OR REPLACE FUNCTION part_test_trigger() RETURNS TRIGGER AS $$ BEGIN RAISE NOTICE '%', format('%s %s %s (%s)', TG_WHEN, TG_OP, TG_LEVEL, TG_TABLE_NAME); @@ -478,22 +482,24 @@ END; $$ LANGUAGE PLPGSQL; SET pg_pathman.enable_partitionrouter = t; CREATE TRIGGER ad AFTER DELETE ON part_test_1 FOR EACH ROW EXECUTE PROCEDURE part_test_trigger (); -ERROR: relation "part_test_1" does not exist INSERT INTO part_test VALUES (1); UPDATE part_test SET val = val + 1 RETURNING *, tableoid::regclass; - val | tableoid ------+----------- - 2 | part_test + val | tableoid +-----+------------- + 2 | part_test_1 (1 row) UPDATE part_test SET val = val + 1 RETURNING *, tableoid::regclass; - val | tableoid ------+----------- - 3 | part_test +NOTICE: AFTER DELETE ROW (part_test_1) +WARNING: "SET pg_pathman.enable" must be called before any query. Command ignored. + val | tableoid +-----+------------ + 3 | pg_pathman (1 row) RESET pg_pathman.enable_partitionrouter; DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 2 other objects DROP FUNCTION part_test_trigger(); DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman; diff --git a/expected/pathman_runtime_nodes_1.out b/expected/pathman_runtime_nodes_1.out index 10435240..ef928861 100644 --- a/expected/pathman_runtime_nodes_1.out +++ b/expected/pathman_runtime_nodes_1.out @@ -465,8 +465,12 @@ DROP SCHEMA test; -- Variable pg_pathman.enable must be called before any query. -- CREATE TABLE part_test (val int NOT NULL); -SELECT create_hash_partitions('part_test', 'val', 2, partition_names := array['part_test_1','pg_pathman']); -ERROR: function create_hash_partitions(unknown, unknown, integer, partition_names => text[]) does not exist at character 8 +SELECT pathman.create_hash_partitions('part_test', 'val', 2, partition_names := array['part_test_1','pg_pathman']); + create_hash_partitions +------------------------ + 2 +(1 row) + CREATE OR REPLACE FUNCTION part_test_trigger() RETURNS TRIGGER AS $$ BEGIN RAISE NOTICE '%', format('%s %s %s (%s)', TG_WHEN, TG_OP, TG_LEVEL, TG_TABLE_NAME); @@ -478,22 +482,24 @@ END; $$ LANGUAGE PLPGSQL; SET pg_pathman.enable_partitionrouter = t; CREATE TRIGGER ad AFTER DELETE ON part_test_1 FOR EACH ROW EXECUTE PROCEDURE part_test_trigger (); -ERROR: relation "part_test_1" does not exist INSERT INTO part_test VALUES (1); UPDATE part_test SET val = val + 1 RETURNING *, tableoid::regclass; - val | tableoid ------+----------- - 2 | part_test + val | tableoid +-----+------------- + 2 | part_test_1 (1 row) UPDATE part_test SET val = val + 1 RETURNING *, tableoid::regclass; - val | tableoid ------+----------- - 3 | part_test +NOTICE: AFTER DELETE ROW (part_test_1) +WARNING: "SET pg_pathman.enable" must be called before any query. Command ignored. + val | tableoid +-----+------------ + 3 | pg_pathman (1 row) RESET pg_pathman.enable_partitionrouter; DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 2 other objects DROP FUNCTION part_test_trigger(); DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman; diff --git a/sql/pathman_runtime_nodes.sql b/sql/pathman_runtime_nodes.sql index 9fa7028f..bf917d88 100644 --- a/sql/pathman_runtime_nodes.sql +++ b/sql/pathman_runtime_nodes.sql @@ -348,7 +348,7 @@ DROP SCHEMA test; -- Variable pg_pathman.enable must be called before any query. -- CREATE TABLE part_test (val int NOT NULL); -SELECT create_hash_partitions('part_test', 'val', 2, partition_names := array['part_test_1','pg_pathman']); +SELECT pathman.create_hash_partitions('part_test', 'val', 2, partition_names := array['part_test_1','pg_pathman']); CREATE OR REPLACE FUNCTION part_test_trigger() RETURNS TRIGGER AS $$ BEGIN RAISE NOTICE '%', format('%s %s %s (%s)', TG_WHEN, TG_OP, TG_LEVEL, TG_TABLE_NAME); diff --git a/src/hooks.c b/src/hooks.c index b4ae796a..89d2074e 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -621,6 +621,15 @@ pathman_rel_pathlist_hook(PlannerInfo *root, bool pathman_enable_check_hook(bool *newval, void **extra, GucSource source) { + /* The top level statement requires immediate commit: accept GUC change */ + if (MyXactFlags & XACT_FLAGS_NEEDIMMEDIATECOMMIT) + return true; + + /* Ignore the case of re-setting the same value */ + if (*newval == pathman_init_state.pg_pathman_enable) + return true; + + /* Command must be at top level of a fresh transaction. */ if (FirstSnapshotSet || GetTopTransactionIdIfAny() != InvalidTransactionId || #ifdef PGPRO_EE @@ -628,9 +637,13 @@ pathman_enable_check_hook(bool *newval, void **extra, GucSource source) #endif IsSubTransaction()) { - GUC_check_errcode(ERRCODE_ACTIVE_SQL_TRANSACTION); - GUC_check_errmsg("\"pg_pathman.enable\" must be called before any query"); - return false; + /* Keep the old value. */ + *newval = pathman_init_state.pg_pathman_enable; + + ereport(WARNING, + (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION), + errmsg("\"SET pg_pathman.enable\" must be called before any query. " + "Command ignored."))); } return true; From e568aa64afbdc1cdb0191492fef1f7361b34769a Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Wed, 19 Apr 2023 13:38:49 +0300 Subject: [PATCH 500/528] [PGPRO-8041] Corrected warning message; moved line with assignment Tags: pg_pathman --- expected/pathman_runtime_nodes.out | 2 +- expected/pathman_runtime_nodes_1.out | 2 +- src/hooks.c | 9 ++++----- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/expected/pathman_runtime_nodes.out b/expected/pathman_runtime_nodes.out index ab8a7e02..f699ddeb 100644 --- a/expected/pathman_runtime_nodes.out +++ b/expected/pathman_runtime_nodes.out @@ -491,7 +491,7 @@ UPDATE part_test SET val = val + 1 RETURNING *, tableoid::regclass; UPDATE part_test SET val = val + 1 RETURNING *, tableoid::regclass; NOTICE: AFTER DELETE ROW (part_test_1) -WARNING: "SET pg_pathman.enable" must be called before any query. Command ignored. +WARNING: "pg_pathman.enable" must be called before any query, ignored val | tableoid -----+------------ 3 | pg_pathman diff --git a/expected/pathman_runtime_nodes_1.out b/expected/pathman_runtime_nodes_1.out index ef928861..e975c761 100644 --- a/expected/pathman_runtime_nodes_1.out +++ b/expected/pathman_runtime_nodes_1.out @@ -491,7 +491,7 @@ UPDATE part_test SET val = val + 1 RETURNING *, tableoid::regclass; UPDATE part_test SET val = val + 1 RETURNING *, tableoid::regclass; NOTICE: AFTER DELETE ROW (part_test_1) -WARNING: "SET pg_pathman.enable" must be called before any query. Command ignored. +WARNING: "pg_pathman.enable" must be called before any query, ignored val | tableoid -----+------------ 3 | pg_pathman diff --git a/src/hooks.c b/src/hooks.c index 89d2074e..437c89a6 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -637,13 +637,12 @@ pathman_enable_check_hook(bool *newval, void **extra, GucSource source) #endif IsSubTransaction()) { - /* Keep the old value. */ - *newval = pathman_init_state.pg_pathman_enable; - ereport(WARNING, (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION), - errmsg("\"SET pg_pathman.enable\" must be called before any query. " - "Command ignored."))); + errmsg("\"pg_pathman.enable\" must be called before any query, ignored"))); + + /* Keep the old value. */ + *newval = pathman_init_state.pg_pathman_enable; } return true; From 6ef2ea0dcc8d06e0a28571d83efb522bfa414fe6 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Tue, 28 Mar 2023 21:57:35 +0300 Subject: [PATCH 501/528] [PGPRO-7963] Fix for REL_14_STABLE/REL_15_STABLE diffs Tags: pg_pathman --- patches/REL_14_STABLE-pg_pathman-core.diff | 89 +++++++------------- patches/REL_15_STABLE-pg_pathman-core.diff | 96 +++++++--------------- 2 files changed, 59 insertions(+), 126 deletions(-) diff --git a/patches/REL_14_STABLE-pg_pathman-core.diff b/patches/REL_14_STABLE-pg_pathman-core.diff index 57576c44..af130c15 100644 --- a/patches/REL_14_STABLE-pg_pathman-core.diff +++ b/patches/REL_14_STABLE-pg_pathman-core.diff @@ -24,7 +24,7 @@ index bf551b0395..10d2044ae6 100644 bool DefaultXactDeferrable = false; bool XactDeferrable; diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c -index 6b63f93e6d..060146d127 100644 +index bdf59a10fc..972453d9a5 100644 --- a/src/backend/executor/execExprInterp.c +++ b/src/backend/executor/execExprInterp.c @@ -1799,6 +1799,16 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) @@ -77,7 +77,7 @@ index b3ce4bae53..8f2bb12542 100644 * ResultRelInfos needed by subplans are initialized from scratch when the * subplans themselves are initialized. diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c -index 0780554246..a90f3a495d 100644 +index 55c430c9ec..21d9e6304a 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c @@ -510,7 +510,7 @@ ExecInitInsertProjection(ModifyTableState *mtstate, @@ -89,7 +89,7 @@ index 0780554246..a90f3a495d 100644 ExecInitUpdateProjection(ModifyTableState *mtstate, ResultRelInfo *resultRelInfo) { -@@ -2487,6 +2487,7 @@ ExecModifyTable(PlanState *pstate) +@@ -2486,6 +2486,7 @@ ExecModifyTable(PlanState *pstate) ItemPointerData tuple_ctid; HeapTupleData oldtupdata; HeapTuple oldtuple; @@ -97,7 +97,7 @@ index 0780554246..a90f3a495d 100644 CHECK_FOR_INTERRUPTS(); -@@ -2524,12 +2525,23 @@ ExecModifyTable(PlanState *pstate) +@@ -2523,12 +2524,23 @@ ExecModifyTable(PlanState *pstate) resultRelInfo = node->resultRelInfo + node->mt_lastResultIndex; subplanstate = outerPlanState(node); @@ -121,7 +121,7 @@ index 0780554246..a90f3a495d 100644 /* * Reset the per-output-tuple exprcontext. This is needed because * triggers expect to use that context as workspace. It's a bit ugly -@@ -2563,7 +2575,9 @@ ExecModifyTable(PlanState *pstate) +@@ -2562,7 +2574,9 @@ ExecModifyTable(PlanState *pstate) bool isNull; Oid resultoid; @@ -132,7 +132,7 @@ index 0780554246..a90f3a495d 100644 &isNull); if (isNull) elog(ERROR, "tableoid is NULL"); -@@ -2582,6 +2596,8 @@ ExecModifyTable(PlanState *pstate) +@@ -2581,6 +2595,8 @@ ExecModifyTable(PlanState *pstate) if (resultRelInfo->ri_usesFdwDirectModify) { Assert(resultRelInfo->ri_projectReturning); @@ -141,7 +141,7 @@ index 0780554246..a90f3a495d 100644 /* * A scan slot containing the data that was actually inserted, -@@ -2591,6 +2607,7 @@ ExecModifyTable(PlanState *pstate) +@@ -2590,6 +2606,7 @@ ExecModifyTable(PlanState *pstate) */ slot = ExecProcessReturning(resultRelInfo, NULL, planSlot); @@ -149,7 +149,7 @@ index 0780554246..a90f3a495d 100644 return slot; } -@@ -2620,7 +2637,8 @@ ExecModifyTable(PlanState *pstate) +@@ -2619,7 +2636,8 @@ ExecModifyTable(PlanState *pstate) { /* ri_RowIdAttNo refers to a ctid attribute */ Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)); @@ -159,7 +159,7 @@ index 0780554246..a90f3a495d 100644 resultRelInfo->ri_RowIdAttNo, &isNull); /* shouldn't ever get a null result... */ -@@ -2650,7 +2668,8 @@ ExecModifyTable(PlanState *pstate) +@@ -2649,7 +2667,8 @@ ExecModifyTable(PlanState *pstate) */ else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)) { @@ -169,7 +169,7 @@ index 0780554246..a90f3a495d 100644 resultRelInfo->ri_RowIdAttNo, &isNull); /* shouldn't ever get a null result... */ -@@ -2681,8 +2700,12 @@ ExecModifyTable(PlanState *pstate) +@@ -2680,8 +2699,12 @@ ExecModifyTable(PlanState *pstate) /* Initialize projection info if first time for this table */ if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) ExecInitInsertProjection(node, resultRelInfo); @@ -184,58 +184,25 @@ index 0780554246..a90f3a495d 100644 estate, node->canSetTag); break; case CMD_UPDATE: -@@ -2690,37 +2713,45 @@ ExecModifyTable(PlanState *pstate) +@@ -2689,6 +2712,13 @@ ExecModifyTable(PlanState *pstate) if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) ExecInitUpdateProjection(node, resultRelInfo); -- /* -- * Make the new tuple by combining plan's output tuple with -- * the old tuple being updated. -- */ -- oldSlot = resultRelInfo->ri_oldTupleSlot; -- if (oldtuple != NULL) -- { -- /* Use the wholerow junk attr as the old tuple. */ -- ExecForceStoreHeapTuple(oldtuple, oldSlot, false); -- } -- else ++ /* ++ * Do not change the indentation for PostgreSQL code to make it ++ * easier to merge new PostgreSQL changes. ++ */ + /* Do nothing in case tuple was modified in pg_pathman: */ + if (!estate->es_original_tuple) - { -- /* Fetch the most recent version of old tuple. */ -- Relation relation = resultRelInfo->ri_RelationDesc; -- -- Assert(tupleid != NULL); -- if (!table_tuple_fetch_row_version(relation, tupleid, -- SnapshotAny, -- oldSlot)) -- elog(ERROR, "failed to fetch tuple being updated"); -+ /* -+ * Make the new tuple by combining plan's output tuple -+ * with the old tuple being updated. -+ */ -+ oldSlot = resultRelInfo->ri_oldTupleSlot; -+ if (oldtuple != NULL) -+ { -+ /* Use the wholerow junk attr as the old tuple. */ -+ ExecForceStoreHeapTuple(oldtuple, oldSlot, false); -+ } -+ else -+ { -+ /* Fetch the most recent version of old tuple. */ -+ Relation relation = resultRelInfo->ri_RelationDesc; -+ -+ Assert(tupleid != NULL); -+ if (!table_tuple_fetch_row_version(relation, tupleid, -+ SnapshotAny, -+ oldSlot)) -+ elog(ERROR, "failed to fetch tuple being updated"); -+ } -+ slot = ExecGetUpdateNewTuple(resultRelInfo, planSlot, -+ oldSlot); ++ { + /* + * Make the new tuple by combining plan's output tuple with + * the old tuple being updated. +@@ -2712,14 +2742,19 @@ ExecModifyTable(PlanState *pstate) } -- slot = ExecGetUpdateNewTuple(resultRelInfo, planSlot, -- oldSlot); + slot = ExecGetUpdateNewTuple(resultRelInfo, planSlot, + oldSlot); ++ } /* Now apply the update. */ - slot = ExecUpdate(node, resultRelInfo, tupleid, oldtuple, slot, @@ -253,7 +220,7 @@ index 0780554246..a90f3a495d 100644 planSlot, &node->mt_epqstate, estate, true, /* processReturning */ node->canSetTag, -@@ -2737,7 +2768,10 @@ ExecModifyTable(PlanState *pstate) +@@ -2736,7 +2771,10 @@ ExecModifyTable(PlanState *pstate) * the work on next call. */ if (slot) @@ -264,7 +231,7 @@ index 0780554246..a90f3a495d 100644 } /* -@@ -2753,6 +2787,7 @@ ExecModifyTable(PlanState *pstate) +@@ -2752,6 +2790,7 @@ ExecModifyTable(PlanState *pstate) node->mt_done = true; @@ -272,7 +239,7 @@ index 0780554246..a90f3a495d 100644 return NULL; } -@@ -2827,6 +2862,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) +@@ -2826,6 +2865,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) ListCell *l; int i; Relation rel; @@ -280,7 +247,7 @@ index 0780554246..a90f3a495d 100644 /* check for unsupported flags */ Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); -@@ -2923,6 +2959,13 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) +@@ -2922,6 +2962,13 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) i++; } @@ -294,7 +261,7 @@ index 0780554246..a90f3a495d 100644 /* * Now we may initialize the subplan. */ -@@ -3004,6 +3047,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) +@@ -3002,6 +3049,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) ExecInitStoredGenerated(resultRelInfo, estate, operation); } diff --git a/patches/REL_15_STABLE-pg_pathman-core.diff b/patches/REL_15_STABLE-pg_pathman-core.diff index 3d72d2e7..04fae9aa 100644 --- a/patches/REL_15_STABLE-pg_pathman-core.diff +++ b/patches/REL_15_STABLE-pg_pathman-core.diff @@ -24,7 +24,7 @@ index d0e5bc26a7..5ca196518e 100644 bool DefaultXactDeferrable = false; bool XactDeferrable; diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c -index ef0f9577ab..95858960d5 100644 +index d5e46098c2..d3c02c1def 100644 --- a/src/backend/executor/execExprInterp.c +++ b/src/backend/executor/execExprInterp.c @@ -1801,6 +1801,16 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) @@ -77,10 +77,10 @@ index ef2fd46092..8551733c55 100644 * ResultRelInfos needed by subplans are initialized from scratch when the * subplans themselves are initialized. diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c -index ad0aa8dd9d..a2715efa09 100644 +index 2f6e66b641..d4a1e48c20 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c -@@ -663,6 +663,13 @@ ExecInitUpdateProjection(ModifyTableState *mtstate, +@@ -641,6 +641,13 @@ ExecInitUpdateProjection(ModifyTableState *mtstate, resultRelInfo->ri_projectNewInfoValid = true; } @@ -94,7 +94,7 @@ index ad0aa8dd9d..a2715efa09 100644 /* * ExecGetInsertNewTuple * This prepares a "new" tuple ready to be inserted into given result -@@ -3581,6 +3588,7 @@ ExecModifyTable(PlanState *pstate) +@@ -3524,6 +3531,7 @@ ExecModifyTable(PlanState *pstate) HeapTupleData oldtupdata; HeapTuple oldtuple; ItemPointer tupleid; @@ -102,7 +102,7 @@ index ad0aa8dd9d..a2715efa09 100644 CHECK_FOR_INTERRUPTS(); -@@ -3622,6 +3630,8 @@ ExecModifyTable(PlanState *pstate) +@@ -3565,6 +3573,8 @@ ExecModifyTable(PlanState *pstate) context.mtstate = node; context.epqstate = &node->mt_epqstate; context.estate = estate; @@ -111,7 +111,7 @@ index ad0aa8dd9d..a2715efa09 100644 /* * Fetch rows from subplan, and execute the required table modification -@@ -3629,6 +3639,14 @@ ExecModifyTable(PlanState *pstate) +@@ -3572,6 +3582,14 @@ ExecModifyTable(PlanState *pstate) */ for (;;) { @@ -126,7 +126,7 @@ index ad0aa8dd9d..a2715efa09 100644 /* * Reset the per-output-tuple exprcontext. This is needed because * triggers expect to use that context as workspace. It's a bit ugly -@@ -3662,7 +3680,9 @@ ExecModifyTable(PlanState *pstate) +@@ -3605,7 +3623,9 @@ ExecModifyTable(PlanState *pstate) bool isNull; Oid resultoid; @@ -137,7 +137,7 @@ index ad0aa8dd9d..a2715efa09 100644 &isNull); if (isNull) { -@@ -3699,6 +3719,8 @@ ExecModifyTable(PlanState *pstate) +@@ -3642,6 +3662,8 @@ ExecModifyTable(PlanState *pstate) if (resultRelInfo->ri_usesFdwDirectModify) { Assert(resultRelInfo->ri_projectReturning); @@ -146,7 +146,7 @@ index ad0aa8dd9d..a2715efa09 100644 /* * A scan slot containing the data that was actually inserted, -@@ -3708,6 +3730,7 @@ ExecModifyTable(PlanState *pstate) +@@ -3651,6 +3673,7 @@ ExecModifyTable(PlanState *pstate) */ slot = ExecProcessReturning(resultRelInfo, NULL, context.planSlot); @@ -154,7 +154,7 @@ index ad0aa8dd9d..a2715efa09 100644 return slot; } -@@ -3738,7 +3761,8 @@ ExecModifyTable(PlanState *pstate) +@@ -3681,7 +3704,8 @@ ExecModifyTable(PlanState *pstate) { /* ri_RowIdAttNo refers to a ctid attribute */ Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)); @@ -164,7 +164,7 @@ index ad0aa8dd9d..a2715efa09 100644 resultRelInfo->ri_RowIdAttNo, &isNull); -@@ -3786,7 +3810,8 @@ ExecModifyTable(PlanState *pstate) +@@ -3729,7 +3753,8 @@ ExecModifyTable(PlanState *pstate) */ else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)) { @@ -174,7 +174,7 @@ index ad0aa8dd9d..a2715efa09 100644 resultRelInfo->ri_RowIdAttNo, &isNull); /* shouldn't ever get a null result... */ -@@ -3817,9 +3842,12 @@ ExecModifyTable(PlanState *pstate) +@@ -3760,9 +3785,12 @@ ExecModifyTable(PlanState *pstate) /* Initialize projection info if first time for this table */ if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) ExecInitInsertProjection(node, resultRelInfo); @@ -190,59 +190,25 @@ index ad0aa8dd9d..a2715efa09 100644 break; case CMD_UPDATE: -@@ -3827,38 +3855,46 @@ ExecModifyTable(PlanState *pstate) +@@ -3770,6 +3798,13 @@ ExecModifyTable(PlanState *pstate) if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) ExecInitUpdateProjection(node, resultRelInfo); -- /* -- * Make the new tuple by combining plan's output tuple with -- * the old tuple being updated. -- */ -- oldSlot = resultRelInfo->ri_oldTupleSlot; -- if (oldtuple != NULL) -- { -- /* Use the wholerow junk attr as the old tuple. */ -- ExecForceStoreHeapTuple(oldtuple, oldSlot, false); -- } -- else ++ /* ++ * Do not change the indentation for PostgreSQL code to make it ++ * easier to merge new PostgreSQL changes. ++ */ + /* Do nothing in case tuple was modified in pg_pathman: */ + if (!estate->es_original_tuple) - { -- /* Fetch the most recent version of old tuple. */ -- Relation relation = resultRelInfo->ri_RelationDesc; -+ /* -+ * Make the new tuple by combining plan's output tuple -+ * with the old tuple being updated. -+ */ -+ oldSlot = resultRelInfo->ri_oldTupleSlot; -+ if (oldtuple != NULL) -+ { -+ /* Use the wholerow junk attr as the old tuple. */ -+ ExecForceStoreHeapTuple(oldtuple, oldSlot, false); -+ } -+ else -+ { -+ /* Fetch the most recent version of old tuple. */ -+ Relation relation = resultRelInfo->ri_RelationDesc; - -- if (!table_tuple_fetch_row_version(relation, tupleid, -- SnapshotAny, -- oldSlot)) -- elog(ERROR, "failed to fetch tuple being updated"); -+ if (!table_tuple_fetch_row_version(relation, tupleid, -+ SnapshotAny, -+ oldSlot)) -+ elog(ERROR, "failed to fetch tuple being updated"); -+ } -+ slot = internalGetUpdateNewTuple(resultRelInfo, context.planSlot, -+ oldSlot, NULL); -+ context.GetUpdateNewTuple = internalGetUpdateNewTuple; -+ context.relaction = NULL; - } -- slot = internalGetUpdateNewTuple(resultRelInfo, context.planSlot, -- oldSlot, NULL); -- context.GetUpdateNewTuple = internalGetUpdateNewTuple; -- context.relaction = NULL; ++ { + /* + * Make the new tuple by combining plan's output tuple with + * the old tuple being updated. +@@ -3793,14 +3828,19 @@ ExecModifyTable(PlanState *pstate) + slot = ExecGetUpdateNewTuple(resultRelInfo, context.planSlot, + oldSlot); + context.relaction = NULL; ++ } /* Now apply the update. */ - slot = ExecUpdate(&context, resultRelInfo, tupleid, oldtuple, @@ -260,7 +226,7 @@ index ad0aa8dd9d..a2715efa09 100644 true, false, node->canSetTag, NULL, NULL); break; -@@ -3876,7 +3912,10 @@ ExecModifyTable(PlanState *pstate) +@@ -3818,7 +3858,10 @@ ExecModifyTable(PlanState *pstate) * the work on next call. */ if (slot) @@ -271,7 +237,7 @@ index ad0aa8dd9d..a2715efa09 100644 } /* -@@ -3892,6 +3931,7 @@ ExecModifyTable(PlanState *pstate) +@@ -3834,6 +3877,7 @@ ExecModifyTable(PlanState *pstate) node->mt_done = true; @@ -279,7 +245,7 @@ index ad0aa8dd9d..a2715efa09 100644 return NULL; } -@@ -3966,6 +4006,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) +@@ -3908,6 +3952,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) ListCell *l; int i; Relation rel; @@ -287,7 +253,7 @@ index ad0aa8dd9d..a2715efa09 100644 /* check for unsupported flags */ Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); -@@ -4066,6 +4107,13 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) +@@ -4008,6 +4053,13 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) i++; } @@ -301,7 +267,7 @@ index ad0aa8dd9d..a2715efa09 100644 /* * Now we may initialize the subplan. */ -@@ -4157,6 +4205,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) +@@ -4102,6 +4154,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) ExecInitStoredGenerated(resultRelInfo, estate, operation); } From 084e2645a8ccd85feeaf860935a971381d8c84f7 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Fri, 7 Apr 2023 15:24:20 +0300 Subject: [PATCH 502/528] [PGPRO-7928] Fix for REL_14_STABLE diff Tags: pg_pathman --- patches/REL_14_STABLE-pg_pathman-core.diff | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/patches/REL_14_STABLE-pg_pathman-core.diff b/patches/REL_14_STABLE-pg_pathman-core.diff index af130c15..a6ac1afa 100644 --- a/patches/REL_14_STABLE-pg_pathman-core.diff +++ b/patches/REL_14_STABLE-pg_pathman-core.diff @@ -353,6 +353,19 @@ index ee5ad3c058..dc474819d7 100644 PartitionDirectory es_partition_directory; /* for PartitionDesc lookup */ /* +diff --git a/src/include/utils/snapmgr.h b/src/include/utils/snapmgr.h +index 33e6c14e81..abd9bba23e 100644 +--- a/src/include/utils/snapmgr.h ++++ b/src/include/utils/snapmgr.h +@@ -53,7 +53,7 @@ extern TimestampTz GetSnapshotCurrentTimestamp(void); + extern TimestampTz GetOldSnapshotThresholdTimestamp(void); + extern void SnapshotTooOldMagicForTest(void); + +-extern bool FirstSnapshotSet; ++extern PGDLLIMPORT bool FirstSnapshotSet; + + extern PGDLLIMPORT TransactionId TransactionXmin; + extern PGDLLIMPORT TransactionId RecentXmin; diff --git a/src/tools/msvc/Install.pm b/src/tools/msvc/Install.pm index de22c9ba2c..c8be5323b8 100644 --- a/src/tools/msvc/Install.pm From b1f19b7e331ee0d9d6c1866b301cc30fce856ad0 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Thu, 1 Jun 2023 21:39:56 +0300 Subject: [PATCH 503/528] Travis-CI: added clang15 for PostgreSQL v11-v13 and removed v10 --- .travis.yml | 6 ------ Dockerfile.tmpl | 2 +- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/.travis.yml b/.travis.yml index dd63d98f..81a40e18 100644 --- a/.travis.yml +++ b/.travis.yml @@ -30,9 +30,3 @@ env: - PG_VERSION=12 - PG_VERSION=11 LEVEL=hardcore - PG_VERSION=11 - - PG_VERSION=10 LEVEL=hardcore - - PG_VERSION=10 - -jobs: - allow_failures: - - env: PG_VERSION=10 LEVEL=nightmare diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index 0a25ad14..309719de 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -9,7 +9,7 @@ RUN apk add --no-cache \ coreutils linux-headers \ make musl-dev gcc bison flex \ zlib-dev libedit-dev \ - clang clang-analyzer; + clang clang15 clang-analyzer; # Install fresh valgrind RUN apk add valgrind \ From d752a8071e79ac54ad21d8df90ad38e662a17921 Mon Sep 17 00:00:00 2001 From: "Anton A. Melnikov" Date: Thu, 1 Jun 2023 01:32:59 +0300 Subject: [PATCH 504/528] PGPRO-8238, PGPRO-8122: Fix build with master at 5df319f3d. Correct number of args in ExecInitRangeTable(), ExecInsertIndexTuples(), ExecBRUpdateTriggers() and ExecBRDeleteTriggers(). Caused by: - b803b7d132e3505ab77c29acf91f3d1caa298f95 Fill EState.es_rteperminfos more systematically. - 19d8e2308bc51ec4ab993ce90077342c915dd116 Ignore BRIN indexes when checking for HOT updates - 9321c79c86e6a6a4eac22e2235a21a8b68388723 Fix concurrent update issues with MERGE. Tags: pg_pathman --- src/include/compat/pg_compat.h | 26 ++++++++++++++++++++------ src/utility_stmt_hooking.c | 8 ++++++-- 2 files changed, 26 insertions(+), 8 deletions(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 4ae249e6..bc9323ae 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -779,7 +779,12 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, /* * ExecBRUpdateTriggers() */ -#if PG_VERSION_NUM >= 150000 /* for commit 7103ebb7aae8 */ +#if PG_VERSION_NUM >= 160000 +#define ExecBRUpdateTriggersCompat(estate, epqstate, relinfo, \ + tupleid, fdw_trigtuple, newslot) \ + ExecBRUpdateTriggers((estate), (epqstate), (relinfo), (tupleid), \ + (fdw_trigtuple), (newslot), NULL, NULL) +#elif PG_VERSION_NUM >= 150000 /* for commit 7103ebb7aae8 */ #define ExecBRUpdateTriggersCompat(estate, epqstate, relinfo, \ tupleid, fdw_trigtuple, newslot) \ ExecBRUpdateTriggers((estate), (epqstate), (relinfo), (tupleid), \ @@ -809,7 +814,12 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, /* * ExecBRDeleteTriggers() */ -#if PG_VERSION_NUM >= 110000 +#if PG_VERSION_NUM >= 160000 +#define ExecBRDeleteTriggersCompat(estate, epqstate, relinfo, tupleid, \ + fdw_trigtuple, epqslot) \ + ExecBRDeleteTriggers((estate), (epqstate), (relinfo), (tupleid), \ + (fdw_trigtuple), (epqslot), NULL, NULL) +#elif PG_VERSION_NUM >= 110000 #define ExecBRDeleteTriggersCompat(estate, epqstate, relinfo, tupleid, \ fdw_trigtuple, epqslot) \ ExecBRDeleteTriggers((estate), (epqstate), (relinfo), (tupleid), \ @@ -1028,15 +1038,19 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, /* * ExecInsertIndexTuples. Since 12 slot contains tupleid. * Since 14: new fields "resultRelInfo", "update". + * Since 16: new bool field "onlySummarizing". */ -#if PG_VERSION_NUM >= 140000 -#define ExecInsertIndexTuplesCompat(resultRelInfo, slot, tupleid, estate, update, noDupError, specConflict, arbiterIndexes) \ +#if PG_VERSION_NUM >= 160000 +#define ExecInsertIndexTuplesCompat(resultRelInfo, slot, tupleid, estate, update, noDupError, specConflict, arbiterIndexes, onlySummarizing) \ + ExecInsertIndexTuples((resultRelInfo), (slot), (estate), (update), (noDupError), (specConflict), (arbiterIndexes), (onlySummarizing)) +#elif PG_VERSION_NUM >= 140000 +#define ExecInsertIndexTuplesCompat(resultRelInfo, slot, tupleid, estate, update, noDupError, specConflict, arbiterIndexes, onlySummarizing) \ ExecInsertIndexTuples((resultRelInfo), (slot), (estate), (update), (noDupError), (specConflict), (arbiterIndexes)) #elif PG_VERSION_NUM >= 120000 -#define ExecInsertIndexTuplesCompat(resultRelInfo, slot, tupleid, estate, update, noDupError, specConflict, arbiterIndexes) \ +#define ExecInsertIndexTuplesCompat(resultRelInfo, slot, tupleid, estate, update, noDupError, specConflict, arbiterIndexes, onlySummarizing) \ ExecInsertIndexTuples((slot), (estate), (noDupError), (specConflict), (arbiterIndexes)) #else -#define ExecInsertIndexTuplesCompat(resultRelInfo, slot, tupleid, estate, update, noDupError, specConflict, arbiterIndexes) \ +#define ExecInsertIndexTuplesCompat(resultRelInfo, slot, tupleid, estate, update, noDupError, specConflict, arbiterIndexes, onlySummarizing) \ ExecInsertIndexTuples((slot), (tupleid), (estate), (noDupError), (specConflict), (arbiterIndexes)) #endif diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index d1d9010c..704387d8 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -564,10 +564,14 @@ PathmanCopyFrom( #if PG_VERSION_NUM >= 140000 /* reworked in 1375422c7826 */ /* - * Call ExecInitRangeTable() should be first because in 14 it initializes + * Call ExecInitRangeTable() should be first because in 14+ it initializes * field "estate->es_result_relations": */ +#if PG_VERSION_NUM >= 160000 + ExecInitRangeTable(estate, range_table, cstate->rteperminfos); +#else ExecInitRangeTable(estate, range_table); +#endif estate->es_result_relations = (ResultRelInfo **) palloc0(list_length(range_table) * sizeof(ResultRelInfo *)); estate->es_result_relations[0] = parent_rri; @@ -749,7 +753,7 @@ PathmanCopyFrom( /* ... and create index entries for it */ if (child_rri->ri_NumIndices > 0) recheckIndexes = ExecInsertIndexTuplesCompat(estate->es_result_relation_info, - slot, &(tuple->t_self), estate, false, false, NULL, NIL); + slot, &(tuple->t_self), estate, false, false, NULL, NIL, false); } #ifdef PG_SHARDMAN /* Handle foreign tables */ From 4f9a6024ee6e6fd99de44e8f254469db6564f599 Mon Sep 17 00:00:00 2001 From: "Anton A. Melnikov" Date: Thu, 22 Jun 2023 11:17:15 +0300 Subject: [PATCH 505/528] PGPRO-8166: Fix build with vanilla at db93e739ac. Tags: pg_pathman --- expected/pathman_column_type_2.out | 203 +++++++++++++++++++++++++++++ expected/pathman_join_clause_5.out | 160 +++++++++++++++++++++++ src/hooks.c | 8 +- src/include/compat/pg_compat.h | 17 ++- 4 files changed, 383 insertions(+), 5 deletions(-) create mode 100644 expected/pathman_column_type_2.out create mode 100644 expected/pathman_join_clause_5.out diff --git a/expected/pathman_column_type_2.out b/expected/pathman_column_type_2.out new file mode 100644 index 00000000..0fbd0793 --- /dev/null +++ b/expected/pathman_column_type_2.out @@ -0,0 +1,203 @@ +/* + * In 9ce77d75c5a (>= 13) struct Var was changed, which caused the output + * of get_partition_cooked_key to change. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_column_type; +/* + * RANGE partitioning. + */ +/* create new table (val int) */ +CREATE TABLE test_column_type.test(val INT4 NOT NULL); +SELECT create_range_partitions('test_column_type.test', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* make sure that bounds and dispatch info has been cached */ +SELECT * FROM test_column_type.test; + val +----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 +(3 rows) + +/* + * Get parsed and analyzed expression. + */ +CREATE FUNCTION get_cached_partition_cooked_key(REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'get_cached_partition_cooked_key_pl' +LANGUAGE C STRICT; +SELECT get_partition_cooked_key('test_column_type.test'::REGCLASS); + get_partition_cooked_key +--------------------------------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location 8} +(1 row) + +SELECT get_cached_partition_cooked_key('test_column_type.test'::REGCLASS); + get_cached_partition_cooked_key +--------------------------------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location 8} +(1 row) + +SELECT get_partition_key_type('test_column_type.test'::REGCLASS); + get_partition_key_type +------------------------ + integer +(1 row) + +/* change column's type (should also flush caches) */ +ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; +/* check that correct expression has been built */ +SELECT get_partition_key_type('test_column_type.test'::REGCLASS); + get_partition_key_type +------------------------ + numeric +(1 row) + +SELECT get_partition_cooked_key('test_column_type.test'::REGCLASS); + get_partition_cooked_key +----------------------------------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 1700 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location 8} +(1 row) + +SELECT get_cached_partition_cooked_key('test_column_type.test'::REGCLASS); + get_cached_partition_cooked_key +----------------------------------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 1700 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location 8} +(1 row) + +DROP FUNCTION get_cached_partition_cooked_key(REGCLASS); +/* make sure that everything works properly */ +SELECT * FROM test_column_type.test; + val +----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 +(3 rows) + +/* check insert dispatching */ +INSERT INTO test_column_type.test VALUES (1); +SELECT tableoid::regclass, * FROM test_column_type.test; + tableoid | val +-------------------------+----- + test_column_type.test_1 | 1 +(1 row) + +SELECT drop_partitions('test_column_type.test'); +NOTICE: 1 rows copied from test_column_type.test_1 +NOTICE: 0 rows copied from test_column_type.test_2 +NOTICE: 0 rows copied from test_column_type.test_3 +NOTICE: 0 rows copied from test_column_type.test_4 +NOTICE: 0 rows copied from test_column_type.test_5 +NOTICE: 0 rows copied from test_column_type.test_6 +NOTICE: 0 rows copied from test_column_type.test_7 +NOTICE: 0 rows copied from test_column_type.test_8 +NOTICE: 0 rows copied from test_column_type.test_9 +NOTICE: 0 rows copied from test_column_type.test_10 + drop_partitions +----------------- + 10 +(1 row) + +DROP TABLE test_column_type.test CASCADE; +/* + * HASH partitioning. + */ +/* create new table (id int, val int) */ +CREATE TABLE test_column_type.test(id INT4 NOT NULL, val INT4); +SELECT create_hash_partitions('test_column_type.test', 'id', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +/* make sure that bounds and dispatch info has been cached */ +SELECT * FROM test_column_type.test; + id | val +----+----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 5 + partition parents cache | 5 +(3 rows) + +/* change column's type (should NOT work) */ +ALTER TABLE test_column_type.test ALTER id TYPE NUMERIC; +ERROR: cannot change type of column "id" of table "test" partitioned by HASH +/* make sure that everything works properly */ +SELECT * FROM test_column_type.test; + id | val +----+----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 5 + partition parents cache | 5 +(3 rows) + +/* change column's type (should flush caches) */ +ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; +/* make sure that everything works properly */ +SELECT * FROM test_column_type.test; + id | val +----+----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 5 + partition parents cache | 5 +(3 rows) + +/* check insert dispatching */ +INSERT INTO test_column_type.test VALUES (1); +SELECT tableoid::regclass, * FROM test_column_type.test; + tableoid | id | val +-------------------------+----+----- + test_column_type.test_0 | 1 | +(1 row) + +SELECT drop_partitions('test_column_type.test'); +NOTICE: 1 rows copied from test_column_type.test_0 +NOTICE: 0 rows copied from test_column_type.test_1 +NOTICE: 0 rows copied from test_column_type.test_2 +NOTICE: 0 rows copied from test_column_type.test_3 +NOTICE: 0 rows copied from test_column_type.test_4 + drop_partitions +----------------- + 5 +(1 row) + +DROP TABLE test_column_type.test CASCADE; +DROP SCHEMA test_column_type; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_join_clause_5.out b/expected/pathman_join_clause_5.out new file mode 100644 index 00000000..179f50f7 --- /dev/null +++ b/expected/pathman_join_clause_5.out @@ -0,0 +1,160 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +/* + * Test push down a join clause into child nodes of append + */ +/* create test tables */ +CREATE TABLE test.fk ( + id1 INT NOT NULL, + id2 INT NOT NULL, + start_key INT, + end_key INT, + PRIMARY KEY (id1, id2)); +CREATE TABLE test.mytbl ( + id1 INT NOT NULL, + id2 INT NOT NULL, + key INT NOT NULL, + CONSTRAINT fk_fk FOREIGN KEY (id1, id2) REFERENCES test.fk(id1, id2), + PRIMARY KEY (id1, key)); +SELECT pathman.create_hash_partitions('test.mytbl', 'id1', 8); + create_hash_partitions +------------------------ + 8 +(1 row) + +/* ...fill out with test data */ +INSERT INTO test.fk VALUES (1, 1); +INSERT INTO test.mytbl VALUES (1, 1, 5), (1, 1, 6); +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; +/* run test queries */ +EXPLAIN (COSTS OFF) /* test plan */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Nested Loop + -> Seq Scan on fk + -> Custom Scan (RuntimeAppend) + Prune by: (m.id1 = fk.id1) + -> Seq Scan on mytbl_0 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_1 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_2 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_3 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_4 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_5 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_6 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_7 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) +(20 rows) + +/* test joint data */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + tableoid | id1 | id2 | key | start_key | end_key +--------------+-----+-----+-----+-----------+--------- + test.mytbl_6 | 1 | 1 | 5 | | +(1 row) + +/* + * Test case by @dimarick + */ +CREATE TABLE test.parent ( + id SERIAL NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child_nopart ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +INSERT INTO test.parent (owner_id) VALUES (1), (2), (3), (3); +INSERT INTO test.child (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +INSERT INTO test.child_nopart (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +SELECT pathman.create_hash_partitions('test.child', 'owner_id', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; +/* Query #1 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +---------------------------------------------------------------------- + Nested Loop Left Join + Join Filter: (child.parent_id = parent.id) + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Seq Scan on child_1 child + Filter: (owner_id = 3) +(6 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + +/* Query #2 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +---------------------------------------------------------------------- + Nested Loop Left Join + Join Filter: (child.parent_id = parent.id) + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Seq Scan on child_1 child + Filter: (owner_id = 3) +(6 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + +DROP TABLE test.child CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE test.child_nopart CASCADE; +DROP TABLE test.mytbl CASCADE; +NOTICE: drop cascades to 8 other objects +DROP TABLE test.fk CASCADE; +DROP TABLE test.parent CASCADE; +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/src/hooks.c b/src/hooks.c index 437c89a6..2ff2667c 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -449,12 +449,12 @@ pathman_rel_pathlist_hook(PlannerInfo *root, tce = lookup_type_cache(prel->ev_type, TYPECACHE_LT_OPR | TYPECACHE_GT_OPR); /* Make pathkeys */ - pathkeys = build_expression_pathkey(root, (Expr *) part_expr, NULL, - tce->lt_opr, NULL, false); + pathkeys = build_expression_pathkey_compat(root, (Expr *) part_expr, NULL, + tce->lt_opr, NULL, false); if (pathkeys) pathkeyAsc = (PathKey *) linitial(pathkeys); - pathkeys = build_expression_pathkey(root, (Expr *) part_expr, NULL, - tce->gt_opr, NULL, false); + pathkeys = build_expression_pathkey_compat(root, (Expr *) part_expr, NULL, + tce->gt_opr, NULL, false); if (pathkeys) pathkeyDesc = (PathKey *) linitial(pathkeys); } diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index bc9323ae..e75ab1c4 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -1208,13 +1208,18 @@ void set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti); /* * make_restrictinfo() + * In >=16 3th and 9th arguments were removed (b448f1c8d83) * In >=14 new argument was added (55dc86eca70) */ +#if PG_VERSION_NUM >= 160000 +#define make_restrictinfo_compat(r, c, ipd, od, p, sl, rr, or, nr) make_restrictinfo((r), (c), (ipd), (p), (sl), (rr), (or)) +#else #if PG_VERSION_NUM >= 140000 #define make_restrictinfo_compat(r, c, ipd, od, p, sl, rr, or, nr) make_restrictinfo((r), (c), (ipd), (od), (p), (sl), (rr), (or), (nr)) #else #define make_restrictinfo_compat(r, c, ipd, od, p, sl, rr, or, nr) make_restrictinfo((c), (ipd), (od), (p), (sl), (rr), (or), (nr)) -#endif +#endif /* #if PG_VERSION_NUM >= 140000 */ +#endif /* #if PG_VERSION_NUM >= 160000 */ /* * pull_varnos() @@ -1226,4 +1231,14 @@ void set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti); #define pull_varnos_compat(r, n) pull_varnos(n) #endif +/* + * build_expression_pathkey() + * In >=16 argument was removed (b448f1c8d83) + */ +#if PG_VERSION_NUM >= 160000 +#define build_expression_pathkey_compat(root, expr, nullable_relids, opno, rel, create_it) build_expression_pathkey(root, expr, opno, rel, create_it) +#else +#define build_expression_pathkey_compat(root, expr, nullable_relids, opno, rel, create_it) build_expression_pathkey(root, expr, nullable_relids, opno, rel, create_it) +#endif + #endif /* PG_COMPAT_H */ From ac5f05a5b4ae9a97cc9c1517bbd2ba26309209b0 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Wed, 28 Jun 2023 00:17:27 +0300 Subject: [PATCH 506/528] [PGPRO-8370] Fix build with vanilla at f5c446e336 Tags: pg_pathman --- src/include/compat/pg_compat.h | 13 ++++++++++++- src/partition_filter.c | 4 ++-- src/partition_router.c | 6 +++--- src/utility_stmt_hooking.c | 8 ++++++++ 4 files changed, 25 insertions(+), 6 deletions(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index e75ab1c4..5a12b528 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -1208,11 +1208,12 @@ void set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti); /* * make_restrictinfo() + * In >=16 4th, 5th and 9th arguments were added (991a3df227e) * In >=16 3th and 9th arguments were removed (b448f1c8d83) * In >=14 new argument was added (55dc86eca70) */ #if PG_VERSION_NUM >= 160000 -#define make_restrictinfo_compat(r, c, ipd, od, p, sl, rr, or, nr) make_restrictinfo((r), (c), (ipd), (p), (sl), (rr), (or)) +#define make_restrictinfo_compat(r, c, ipd, od, p, sl, rr, or, nr) make_restrictinfo((r), (c), (ipd), false, false, (p), (sl), (rr), NULL, (or)) #else #if PG_VERSION_NUM >= 140000 #define make_restrictinfo_compat(r, c, ipd, od, p, sl, rr, or, nr) make_restrictinfo((r), (c), (ipd), (od), (p), (sl), (rr), (or), (nr)) @@ -1241,4 +1242,14 @@ void set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti); #define build_expression_pathkey_compat(root, expr, nullable_relids, opno, rel, create_it) build_expression_pathkey(root, expr, nullable_relids, opno, rel, create_it) #endif +/* + * EvalPlanQualInit() + * In >=16 argument was added (70b42f27902) + */ +#if PG_VERSION_NUM >= 160000 +#define EvalPlanQualInit_compat(epqstate, parentestate, subplan, auxrowmarks, epqParam) EvalPlanQualInit(epqstate, parentestate, subplan, auxrowmarks, epqParam, NIL) +#else +#define EvalPlanQualInit_compat(epqstate, parentestate, subplan, auxrowmarks, epqParam) EvalPlanQualInit(epqstate, parentestate, subplan, auxrowmarks, epqParam) +#endif + #endif /* PG_COMPAT_H */ diff --git a/src/partition_filter.c b/src/partition_filter.c index 78ad126b..d4cf8308 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -345,8 +345,8 @@ scan_result_parts_storage(EState *estate, ResultPartsStorage *parts_storage, child_perminfo->updatedCols = translate_col_privs(parent_perminfo->updatedCols, translated_vars); - /* Check permissions for partition */ - ExecCheckPermissions(list_make1(child_rte), list_make1(child_perminfo), true); + /* Check permissions for one partition */ + ExecCheckOneRtePermissions(child_rte, child_perminfo, true); #else /* Build Var translation list for 'inserted_cols' */ make_inh_translation_list(base_rel, child_rel, 0, &translated_vars, NULL); diff --git a/src/partition_router.c b/src/partition_router.c index bd081218..4a597a13 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -170,9 +170,9 @@ partition_router_begin(CustomScanState *node, EState *estate, int eflags) /* Remember current relation we're going to delete from */ state->current_rri = estate->es_result_relation_info; - EvalPlanQualInit(&state->epqstate, estate, - state->subplan, NIL, - state->epqparam); + EvalPlanQualInit_compat(&state->epqstate, estate, + state->subplan, NIL, + state->epqparam); /* It's convenient to store PlanState in 'custom_ps' */ node->custom_ps = list_make1(ExecInitNode(state->subplan, estate, eflags)); diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 704387d8..83bfa680 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -517,6 +517,14 @@ PathmanDoCopy(const CopyStmt *stmt, } else { +#if PG_VERSION_NUM >= 160000 /* for commit f75cec4fff87 */ + /* + * Forget current RangeTblEntries and RTEPermissionInfos. + * Standard DoCopy will create new ones. + */ + pstate->p_rtable = NULL; + pstate->p_rteperminfos = NULL; +#endif /* Call standard DoCopy using a new CopyStmt */ DoCopyCompat(pstate, stmt, stmt_location, stmt_len, processed); } From 144d954c9dcc6a296926572398d460f44f49f482 Mon Sep 17 00:00:00 2001 From: Maxim Orlov Date: Mon, 26 Jun 2023 12:37:31 +0300 Subject: [PATCH 507/528] Fix parallel installcheck-world Create unique db objects in order not to mess with other tests. tags: pg_pathman --- expected/pathman_CVE-2020-14350.out | 22 +-- expected/pathman_permissions.out | 204 ++++++++++++++-------------- expected/pathman_permissions_1.out | 204 ++++++++++++++-------------- sql/pathman_CVE-2020-14350.sql | 22 +-- sql/pathman_permissions.sql | 110 +++++++-------- 5 files changed, 281 insertions(+), 281 deletions(-) diff --git a/expected/pathman_CVE-2020-14350.out b/expected/pathman_CVE-2020-14350.out index c4250097..a48e182f 100644 --- a/expected/pathman_CVE-2020-14350.out +++ b/expected/pathman_CVE-2020-14350.out @@ -7,15 +7,15 @@ DROP FUNCTION IF EXISTS _partition_data_concurrent(oid,integer); DROP FUNCTION IF EXISTS create_single_range_partition(TEXT,ANYELEMENT,ANYELEMENT,TEXT); DROP TABLE IF EXISTS test1 CASCADE; DROP TABLE IF EXISTS test2 CASCADE; -DROP ROLE IF EXISTS regress_hacker; +DROP ROLE IF EXISTS pathman_regress_hacker; SET client_min_messages = 'notice'; GRANT CREATE ON SCHEMA public TO PUBLIC; CREATE EXTENSION pg_pathman; -CREATE ROLE regress_hacker LOGIN; +CREATE ROLE pathman_regress_hacker LOGIN; -- Test 1 RESET ROLE; -ALTER ROLE regress_hacker NOSUPERUSER; -SET ROLE regress_hacker; +ALTER ROLE pathman_regress_hacker NOSUPERUSER; +SET ROLE pathman_regress_hacker; SHOW is_superuser; is_superuser -------------- @@ -26,7 +26,7 @@ CREATE FUNCTION _partition_data_concurrent(relation oid, p_limit INT, OUT p_tota RETURNS bigint AS $$ BEGIN - ALTER ROLE regress_hacker SUPERUSER; + ALTER ROLE pathman_regress_hacker SUPERUSER; SELECT _partition_data_concurrent(relation, NULL::text, NULL::text, p_limit) INTO p_total; END $$ LANGUAGE plpgsql; @@ -53,7 +53,7 @@ SELECT pg_sleep(1); (1 row) -- Test result (must be 'off') -SET ROLE regress_hacker; +SET ROLE pathman_regress_hacker; SHOW is_superuser; is_superuser -------------- @@ -62,8 +62,8 @@ SHOW is_superuser; -- Test 2 RESET ROLE; -ALTER ROLE regress_hacker NOSUPERUSER; -SET ROLE regress_hacker; +ALTER ROLE pathman_regress_hacker NOSUPERUSER; +SET ROLE pathman_regress_hacker; SHOW is_superuser; is_superuser -------------- @@ -74,7 +74,7 @@ CREATE FUNCTION create_single_range_partition(parent_relid TEXT, start_value ANY RETURNS REGCLASS AS $$ BEGIN - ALTER ROLE regress_hacker SUPERUSER; + ALTER ROLE pathman_regress_hacker SUPERUSER; RETURN create_single_range_partition(parent_relid, start_value, end_value, partition_name, NULL::text); END $$ LANGUAGE plpgsql; @@ -89,7 +89,7 @@ SELECT create_range_partitions('test2', 'i', 0, 1); INSERT INTO test2 values(1); -- Test result (must be 'off') -SET ROLE regress_hacker; +SET ROLE pathman_regress_hacker; SHOW is_superuser; is_superuser -------------- @@ -112,5 +112,5 @@ NOTICE: drop cascades to 3 other objects DETAIL: drop cascades to sequence test2_seq drop cascades to table test2_1 drop cascades to table test2_2 -DROP ROLE regress_hacker; +DROP ROLE pathman_regress_hacker; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_permissions.out b/expected/pathman_permissions.out index 04b1112d..a29865d0 100644 --- a/expected/pathman_permissions.out +++ b/expected/pathman_permissions.out @@ -2,107 +2,107 @@ SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA permissions; -CREATE ROLE user1 LOGIN; -CREATE ROLE user2 LOGIN; -GRANT USAGE, CREATE ON SCHEMA permissions TO user1; -GRANT USAGE, CREATE ON SCHEMA permissions TO user2; +CREATE ROLE pathman_user1 LOGIN; +CREATE ROLE pathman_user2 LOGIN; +GRANT USAGE, CREATE ON SCHEMA permissions TO pathman_user1; +GRANT USAGE, CREATE ON SCHEMA permissions TO pathman_user2; /* Switch to #1 */ -SET ROLE user1; -CREATE TABLE permissions.user1_table(id serial, a int); -INSERT INTO permissions.user1_table SELECT g, g FROM generate_series(1, 20) as g; +SET ROLE pathman_user1; +CREATE TABLE permissions.pathman_user1_table(id serial, a int); +INSERT INTO permissions.pathman_user1_table SELECT g, g FROM generate_series(1, 20) as g; /* Should fail (can't SELECT) */ -SET ROLE user2; +SET ROLE pathman_user2; DO $$ BEGIN - SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); + SELECT create_range_partitions('permissions.pathman_user1_table', 'id', 1, 10, 2); EXCEPTION WHEN insufficient_privilege THEN RAISE NOTICE 'Insufficient priviliges'; END$$; NOTICE: Insufficient priviliges -/* Grant SELECT to user2 */ -SET ROLE user1; -GRANT SELECT ON permissions.user1_table TO user2; +/* Grant SELECT to pathman_user2 */ +SET ROLE pathman_user1; +GRANT SELECT ON permissions.pathman_user1_table TO pathman_user2; /* Should fail (don't own parent) */ -SET ROLE user2; +SET ROLE pathman_user2; DO $$ BEGIN - SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); + SELECT create_range_partitions('permissions.pathman_user1_table', 'id', 1, 10, 2); EXCEPTION WHEN insufficient_privilege THEN RAISE NOTICE 'Insufficient priviliges'; END$$; NOTICE: Insufficient priviliges /* Should be ok */ -SET ROLE user1; -SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); +SET ROLE pathman_user1; +SELECT create_range_partitions('permissions.pathman_user1_table', 'id', 1, 10, 2); create_range_partitions ------------------------- 2 (1 row) /* Should be able to see */ -SET ROLE user2; +SET ROLE pathman_user2; SELECT * FROM pathman_config; - partrel | expr | parttype | range_interval --------------------------+------+----------+---------------- - permissions.user1_table | id | 2 | 10 + partrel | expr | parttype | range_interval +---------------------------------+------+----------+---------------- + permissions.pathman_user1_table | id | 2 | 10 (1 row) SELECT * FROM pathman_config_params; - partrel | enable_parent | auto | init_callback | spawn_using_bgw --------------------------+---------------+------+---------------+----------------- - permissions.user1_table | f | t | | f + partrel | enable_parent | auto | init_callback | spawn_using_bgw +---------------------------------+---------------+------+---------------+----------------- + permissions.pathman_user1_table | f | t | | f (1 row) /* Should fail */ -SET ROLE user2; -SELECT set_enable_parent('permissions.user1_table', true); -WARNING: only the owner or superuser can change partitioning configuration of table "user1_table" +SET ROLE pathman_user2; +SELECT set_enable_parent('permissions.pathman_user1_table', true); +WARNING: only the owner or superuser can change partitioning configuration of table "pathman_user1_table" ERROR: new row violates row-level security policy for table "pathman_config_params" -SELECT set_auto('permissions.user1_table', false); -WARNING: only the owner or superuser can change partitioning configuration of table "user1_table" +SELECT set_auto('permissions.pathman_user1_table', false); +WARNING: only the owner or superuser can change partitioning configuration of table "pathman_user1_table" ERROR: new row violates row-level security policy for table "pathman_config_params" /* Should fail */ -SET ROLE user2; +SET ROLE pathman_user2; DELETE FROM pathman_config -WHERE partrel = 'permissions.user1_table'::regclass; -WARNING: only the owner or superuser can change partitioning configuration of table "user1_table" +WHERE partrel = 'permissions.pathman_user1_table'::regclass; +WARNING: only the owner or superuser can change partitioning configuration of table "pathman_user1_table" /* No rights to insert, should fail */ -SET ROLE user2; +SET ROLE pathman_user2; DO $$ BEGIN - INSERT INTO permissions.user1_table (id, a) VALUES (35, 0); + INSERT INTO permissions.pathman_user1_table (id, a) VALUES (35, 0); EXCEPTION WHEN insufficient_privilege THEN RAISE NOTICE 'Insufficient priviliges'; END$$; NOTICE: Insufficient priviliges /* No rights to create partitions (need INSERT privilege) */ -SET ROLE user2; -SELECT prepend_range_partition('permissions.user1_table'); -ERROR: permission denied for parent relation "user1_table" -/* Allow user2 to create partitions */ -SET ROLE user1; -GRANT INSERT ON permissions.user1_table TO user2; -GRANT UPDATE(a) ON permissions.user1_table TO user2; /* per-column ACL */ +SET ROLE pathman_user2; +SELECT prepend_range_partition('permissions.pathman_user1_table'); +ERROR: permission denied for parent relation "pathman_user1_table" +/* Allow pathman_user2 to create partitions */ +SET ROLE pathman_user1; +GRANT INSERT ON permissions.pathman_user1_table TO pathman_user2; +GRANT UPDATE(a) ON permissions.pathman_user1_table TO pathman_user2; /* per-column ACL */ /* Should be able to prepend a partition */ -SET ROLE user2; -SELECT prepend_range_partition('permissions.user1_table'); - prepend_range_partition ---------------------------- - permissions.user1_table_4 +SET ROLE pathman_user2; +SELECT prepend_range_partition('permissions.pathman_user1_table'); + prepend_range_partition +----------------------------------- + permissions.pathman_user1_table_4 (1 row) SELECT attname, attacl FROM pg_attribute WHERE attrelid = (SELECT "partition" FROM pathman_partition_list - WHERE parent = 'permissions.user1_table'::REGCLASS + WHERE parent = 'permissions.pathman_user1_table'::REGCLASS ORDER BY range_min::int ASC /* prepend */ LIMIT 1) ORDER BY attname; /* check ACL for each column */ - attname | attacl -----------+----------------- - a | {user2=w/user1} + attname | attacl +----------+--------------------------------- + a | {pathman_user2=w/pathman_user1} cmax | cmin | ctid | @@ -113,8 +113,8 @@ ORDER BY attname; /* check ACL for each column */ (8 rows) /* Have rights, should be ok (parent's ACL is shared by new children) */ -SET ROLE user2; -INSERT INTO permissions.user1_table (id, a) VALUES (35, 0) RETURNING *; +SET ROLE pathman_user2; +INSERT INTO permissions.pathman_user1_table (id, a) VALUES (35, 0) RETURNING *; id | a ----+--- 35 | 0 @@ -122,76 +122,76 @@ INSERT INTO permissions.user1_table (id, a) VALUES (35, 0) RETURNING *; SELECT relname, relacl FROM pg_class WHERE oid = ANY (SELECT "partition" FROM pathman_partition_list - WHERE parent = 'permissions.user1_table'::REGCLASS + WHERE parent = 'permissions.pathman_user1_table'::REGCLASS ORDER BY range_max::int DESC /* append */ LIMIT 3) -ORDER BY relname; /* we also check ACL for "user1_table_2" */ - relname | relacl ----------------+-------------------------------------- - user1_table_2 | {user1=arwdDxt/user1,user2=r/user1} - user1_table_5 | {user1=arwdDxt/user1,user2=ar/user1} - user1_table_6 | {user1=arwdDxt/user1,user2=ar/user1} +ORDER BY relname; /* we also check ACL for "pathman_user1_table_2" */ + relname | relacl +-----------------------+---------------------------------------------------------------------- + pathman_user1_table_2 | {pathman_user1=arwdDxt/pathman_user1,pathman_user2=r/pathman_user1} + pathman_user1_table_5 | {pathman_user1=arwdDxt/pathman_user1,pathman_user2=ar/pathman_user1} + pathman_user1_table_6 | {pathman_user1=arwdDxt/pathman_user1,pathman_user2=ar/pathman_user1} (3 rows) /* Try to drop partition, should fail */ DO $$ BEGIN - SELECT drop_range_partition('permissions.user1_table_4'); + SELECT drop_range_partition('permissions.pathman_user1_table_4'); EXCEPTION WHEN insufficient_privilege THEN RAISE NOTICE 'Insufficient priviliges'; END$$; NOTICE: Insufficient priviliges /* Disable automatic partition creation */ -SET ROLE user1; -SELECT set_auto('permissions.user1_table', false); +SET ROLE pathman_user1; +SELECT set_auto('permissions.pathman_user1_table', false); set_auto ---------- (1 row) /* Partition creation, should fail */ -SET ROLE user2; -INSERT INTO permissions.user1_table (id, a) VALUES (55, 0) RETURNING *; +SET ROLE pathman_user2; +INSERT INTO permissions.pathman_user1_table (id, a) VALUES (55, 0) RETURNING *; ERROR: no suitable partition for key '55' /* Finally drop partitions */ -SET ROLE user1; -SELECT drop_partitions('permissions.user1_table'); -NOTICE: 10 rows copied from permissions.user1_table_1 -NOTICE: 10 rows copied from permissions.user1_table_2 -NOTICE: 0 rows copied from permissions.user1_table_4 -NOTICE: 0 rows copied from permissions.user1_table_5 -NOTICE: 1 rows copied from permissions.user1_table_6 +SET ROLE pathman_user1; +SELECT drop_partitions('permissions.pathman_user1_table'); +NOTICE: 10 rows copied from permissions.pathman_user1_table_1 +NOTICE: 10 rows copied from permissions.pathman_user1_table_2 +NOTICE: 0 rows copied from permissions.pathman_user1_table_4 +NOTICE: 0 rows copied from permissions.pathman_user1_table_5 +NOTICE: 1 rows copied from permissions.pathman_user1_table_6 drop_partitions ----------------- 5 (1 row) /* Switch to #2 */ -SET ROLE user2; +SET ROLE pathman_user2; /* Test ddl event trigger */ -CREATE TABLE permissions.user2_table(id serial); -SELECT create_hash_partitions('permissions.user2_table', 'id', 3); +CREATE TABLE permissions.pathman_user2_table(id serial); +SELECT create_hash_partitions('permissions.pathman_user2_table', 'id', 3); create_hash_partitions ------------------------ 3 (1 row) -INSERT INTO permissions.user2_table SELECT generate_series(1, 30); -SELECT drop_partitions('permissions.user2_table'); -NOTICE: 9 rows copied from permissions.user2_table_0 -NOTICE: 11 rows copied from permissions.user2_table_1 -NOTICE: 10 rows copied from permissions.user2_table_2 +INSERT INTO permissions.pathman_user2_table SELECT generate_series(1, 30); +SELECT drop_partitions('permissions.pathman_user2_table'); +NOTICE: 9 rows copied from permissions.pathman_user2_table_0 +NOTICE: 11 rows copied from permissions.pathman_user2_table_1 +NOTICE: 10 rows copied from permissions.pathman_user2_table_2 drop_partitions ----------------- 3 (1 row) /* Switch to #1 */ -SET ROLE user1; +SET ROLE pathman_user1; CREATE TABLE permissions.dropped_column(a int, val int not null, b int, c int); INSERT INTO permissions.dropped_column SELECT i,i,i,i FROM generate_series(1, 30) i; -GRANT SELECT(val), INSERT(val) ON permissions.dropped_column TO user2; +GRANT SELECT(val), INSERT(val) ON permissions.dropped_column TO pathman_user2; SELECT create_range_partitions('permissions.dropped_column', 'val', 1, 10); create_range_partitions ------------------------- @@ -203,11 +203,11 @@ WHERE attrelid = ANY (SELECT "partition" FROM pathman_partition_list WHERE parent = 'permissions.dropped_column'::REGCLASS) AND attacl IS NOT NULL ORDER BY attrelid::regclass::text; /* check ACL for each column */ - attrelid | attname | attacl -------------------------------+---------+------------------ - permissions.dropped_column_1 | val | {user2=ar/user1} - permissions.dropped_column_2 | val | {user2=ar/user1} - permissions.dropped_column_3 | val | {user2=ar/user1} + attrelid | attname | attacl +------------------------------+---------+---------------------------------- + permissions.dropped_column_1 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_2 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_3 | val | {pathman_user2=ar/pathman_user1} (3 rows) ALTER TABLE permissions.dropped_column DROP COLUMN a; /* DROP "a" */ @@ -222,12 +222,12 @@ WHERE attrelid = ANY (SELECT "partition" FROM pathman_partition_list WHERE parent = 'permissions.dropped_column'::REGCLASS) AND attacl IS NOT NULL ORDER BY attrelid::regclass::text; /* check ACL for each column (+1 partition) */ - attrelid | attname | attacl -------------------------------+---------+------------------ - permissions.dropped_column_1 | val | {user2=ar/user1} - permissions.dropped_column_2 | val | {user2=ar/user1} - permissions.dropped_column_3 | val | {user2=ar/user1} - permissions.dropped_column_4 | val | {user2=ar/user1} + attrelid | attname | attacl +------------------------------+---------+---------------------------------- + permissions.dropped_column_1 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_2 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_3 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_4 | val | {pathman_user2=ar/pathman_user1} (4 rows) ALTER TABLE permissions.dropped_column DROP COLUMN b; /* DROP "b" */ @@ -242,22 +242,22 @@ WHERE attrelid = ANY (SELECT "partition" FROM pathman_partition_list WHERE parent = 'permissions.dropped_column'::REGCLASS) AND attacl IS NOT NULL ORDER BY attrelid::regclass::text; /* check ACL for each column (+1 partition) */ - attrelid | attname | attacl -------------------------------+---------+------------------ - permissions.dropped_column_1 | val | {user2=ar/user1} - permissions.dropped_column_2 | val | {user2=ar/user1} - permissions.dropped_column_3 | val | {user2=ar/user1} - permissions.dropped_column_4 | val | {user2=ar/user1} - permissions.dropped_column_5 | val | {user2=ar/user1} + attrelid | attname | attacl +------------------------------+---------+---------------------------------- + permissions.dropped_column_1 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_2 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_3 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_4 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_5 | val | {pathman_user2=ar/pathman_user1} (5 rows) DROP TABLE permissions.dropped_column CASCADE; NOTICE: drop cascades to 6 other objects /* Finally reset user */ RESET ROLE; -DROP OWNED BY user1; -DROP OWNED BY user2; -DROP USER user1; -DROP USER user2; +DROP OWNED BY pathman_user1; +DROP OWNED BY pathman_user2; +DROP USER pathman_user1; +DROP USER pathman_user2; DROP SCHEMA permissions; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_permissions_1.out b/expected/pathman_permissions_1.out index a50aa524..dc976aae 100644 --- a/expected/pathman_permissions_1.out +++ b/expected/pathman_permissions_1.out @@ -2,107 +2,107 @@ SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA permissions; -CREATE ROLE user1 LOGIN; -CREATE ROLE user2 LOGIN; -GRANT USAGE, CREATE ON SCHEMA permissions TO user1; -GRANT USAGE, CREATE ON SCHEMA permissions TO user2; +CREATE ROLE pathman_user1 LOGIN; +CREATE ROLE pathman_user2 LOGIN; +GRANT USAGE, CREATE ON SCHEMA permissions TO pathman_user1; +GRANT USAGE, CREATE ON SCHEMA permissions TO pathman_user2; /* Switch to #1 */ -SET ROLE user1; -CREATE TABLE permissions.user1_table(id serial, a int); -INSERT INTO permissions.user1_table SELECT g, g FROM generate_series(1, 20) as g; +SET ROLE pathman_user1; +CREATE TABLE permissions.pathman_user1_table(id serial, a int); +INSERT INTO permissions.pathman_user1_table SELECT g, g FROM generate_series(1, 20) as g; /* Should fail (can't SELECT) */ -SET ROLE user2; +SET ROLE pathman_user2; DO $$ BEGIN - SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); + SELECT create_range_partitions('permissions.pathman_user1_table', 'id', 1, 10, 2); EXCEPTION WHEN insufficient_privilege THEN RAISE NOTICE 'Insufficient priviliges'; END$$; NOTICE: Insufficient priviliges -/* Grant SELECT to user2 */ -SET ROLE user1; -GRANT SELECT ON permissions.user1_table TO user2; +/* Grant SELECT to pathman_user2 */ +SET ROLE pathman_user1; +GRANT SELECT ON permissions.pathman_user1_table TO pathman_user2; /* Should fail (don't own parent) */ -SET ROLE user2; +SET ROLE pathman_user2; DO $$ BEGIN - SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); + SELECT create_range_partitions('permissions.pathman_user1_table', 'id', 1, 10, 2); EXCEPTION WHEN insufficient_privilege THEN RAISE NOTICE 'Insufficient priviliges'; END$$; NOTICE: Insufficient priviliges /* Should be ok */ -SET ROLE user1; -SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); +SET ROLE pathman_user1; +SELECT create_range_partitions('permissions.pathman_user1_table', 'id', 1, 10, 2); create_range_partitions ------------------------- 2 (1 row) /* Should be able to see */ -SET ROLE user2; +SET ROLE pathman_user2; SELECT * FROM pathman_config; - partrel | expr | parttype | range_interval --------------------------+------+----------+---------------- - permissions.user1_table | id | 2 | 10 + partrel | expr | parttype | range_interval +---------------------------------+------+----------+---------------- + permissions.pathman_user1_table | id | 2 | 10 (1 row) SELECT * FROM pathman_config_params; - partrel | enable_parent | auto | init_callback | spawn_using_bgw --------------------------+---------------+------+---------------+----------------- - permissions.user1_table | f | t | | f + partrel | enable_parent | auto | init_callback | spawn_using_bgw +---------------------------------+---------------+------+---------------+----------------- + permissions.pathman_user1_table | f | t | | f (1 row) /* Should fail */ -SET ROLE user2; -SELECT set_enable_parent('permissions.user1_table', true); -WARNING: only the owner or superuser can change partitioning configuration of table "user1_table" +SET ROLE pathman_user2; +SELECT set_enable_parent('permissions.pathman_user1_table', true); +WARNING: only the owner or superuser can change partitioning configuration of table "pathman_user1_table" ERROR: new row violates row-level security policy for table "pathman_config_params" -SELECT set_auto('permissions.user1_table', false); -WARNING: only the owner or superuser can change partitioning configuration of table "user1_table" +SELECT set_auto('permissions.pathman_user1_table', false); +WARNING: only the owner or superuser can change partitioning configuration of table "pathman_user1_table" ERROR: new row violates row-level security policy for table "pathman_config_params" /* Should fail */ -SET ROLE user2; +SET ROLE pathman_user2; DELETE FROM pathman_config -WHERE partrel = 'permissions.user1_table'::regclass; -WARNING: only the owner or superuser can change partitioning configuration of table "user1_table" +WHERE partrel = 'permissions.pathman_user1_table'::regclass; +WARNING: only the owner or superuser can change partitioning configuration of table "pathman_user1_table" /* No rights to insert, should fail */ -SET ROLE user2; +SET ROLE pathman_user2; DO $$ BEGIN - INSERT INTO permissions.user1_table (id, a) VALUES (35, 0); + INSERT INTO permissions.pathman_user1_table (id, a) VALUES (35, 0); EXCEPTION WHEN insufficient_privilege THEN RAISE NOTICE 'Insufficient priviliges'; END$$; NOTICE: Insufficient priviliges /* No rights to create partitions (need INSERT privilege) */ -SET ROLE user2; -SELECT prepend_range_partition('permissions.user1_table'); -ERROR: permission denied for parent relation "user1_table" -/* Allow user2 to create partitions */ -SET ROLE user1; -GRANT INSERT ON permissions.user1_table TO user2; -GRANT UPDATE(a) ON permissions.user1_table TO user2; /* per-column ACL */ +SET ROLE pathman_user2; +SELECT prepend_range_partition('permissions.pathman_user1_table'); +ERROR: permission denied for parent relation "pathman_user1_table" +/* Allow pathman_user2 to create partitions */ +SET ROLE pathman_user1; +GRANT INSERT ON permissions.pathman_user1_table TO pathman_user2; +GRANT UPDATE(a) ON permissions.pathman_user1_table TO pathman_user2; /* per-column ACL */ /* Should be able to prepend a partition */ -SET ROLE user2; -SELECT prepend_range_partition('permissions.user1_table'); - prepend_range_partition ---------------------------- - permissions.user1_table_4 +SET ROLE pathman_user2; +SELECT prepend_range_partition('permissions.pathman_user1_table'); + prepend_range_partition +----------------------------------- + permissions.pathman_user1_table_4 (1 row) SELECT attname, attacl FROM pg_attribute WHERE attrelid = (SELECT "partition" FROM pathman_partition_list - WHERE parent = 'permissions.user1_table'::REGCLASS + WHERE parent = 'permissions.pathman_user1_table'::REGCLASS ORDER BY range_min::int ASC /* prepend */ LIMIT 1) ORDER BY attname; /* check ACL for each column */ - attname | attacl -----------+----------------- - a | {user2=w/user1} + attname | attacl +----------+--------------------------------- + a | {pathman_user2=w/pathman_user1} cmax | cmin | ctid | @@ -113,8 +113,8 @@ ORDER BY attname; /* check ACL for each column */ (8 rows) /* Have rights, should be ok (parent's ACL is shared by new children) */ -SET ROLE user2; -INSERT INTO permissions.user1_table (id, a) VALUES (35, 0) RETURNING *; +SET ROLE pathman_user2; +INSERT INTO permissions.pathman_user1_table (id, a) VALUES (35, 0) RETURNING *; id | a ----+--- 35 | 0 @@ -122,76 +122,76 @@ INSERT INTO permissions.user1_table (id, a) VALUES (35, 0) RETURNING *; SELECT relname, relacl FROM pg_class WHERE oid = ANY (SELECT "partition" FROM pathman_partition_list - WHERE parent = 'permissions.user1_table'::REGCLASS + WHERE parent = 'permissions.pathman_user1_table'::REGCLASS ORDER BY range_max::int DESC /* append */ LIMIT 3) -ORDER BY relname; /* we also check ACL for "user1_table_2" */ - relname | relacl ----------------+--------------------------------------- - user1_table_2 | {user1=arwdDxtm/user1,user2=r/user1} - user1_table_5 | {user1=arwdDxtm/user1,user2=ar/user1} - user1_table_6 | {user1=arwdDxtm/user1,user2=ar/user1} +ORDER BY relname; /* we also check ACL for "pathman_user1_table_2" */ + relname | relacl +-----------------------+----------------------------------------------------------------------- + pathman_user1_table_2 | {pathman_user1=arwdDxtm/pathman_user1,pathman_user2=r/pathman_user1} + pathman_user1_table_5 | {pathman_user1=arwdDxtm/pathman_user1,pathman_user2=ar/pathman_user1} + pathman_user1_table_6 | {pathman_user1=arwdDxtm/pathman_user1,pathman_user2=ar/pathman_user1} (3 rows) /* Try to drop partition, should fail */ DO $$ BEGIN - SELECT drop_range_partition('permissions.user1_table_4'); + SELECT drop_range_partition('permissions.pathman_user1_table_4'); EXCEPTION WHEN insufficient_privilege THEN RAISE NOTICE 'Insufficient priviliges'; END$$; NOTICE: Insufficient priviliges /* Disable automatic partition creation */ -SET ROLE user1; -SELECT set_auto('permissions.user1_table', false); +SET ROLE pathman_user1; +SELECT set_auto('permissions.pathman_user1_table', false); set_auto ---------- (1 row) /* Partition creation, should fail */ -SET ROLE user2; -INSERT INTO permissions.user1_table (id, a) VALUES (55, 0) RETURNING *; +SET ROLE pathman_user2; +INSERT INTO permissions.pathman_user1_table (id, a) VALUES (55, 0) RETURNING *; ERROR: no suitable partition for key '55' /* Finally drop partitions */ -SET ROLE user1; -SELECT drop_partitions('permissions.user1_table'); -NOTICE: 10 rows copied from permissions.user1_table_1 -NOTICE: 10 rows copied from permissions.user1_table_2 -NOTICE: 0 rows copied from permissions.user1_table_4 -NOTICE: 0 rows copied from permissions.user1_table_5 -NOTICE: 1 rows copied from permissions.user1_table_6 +SET ROLE pathman_user1; +SELECT drop_partitions('permissions.pathman_user1_table'); +NOTICE: 10 rows copied from permissions.pathman_user1_table_1 +NOTICE: 10 rows copied from permissions.pathman_user1_table_2 +NOTICE: 0 rows copied from permissions.pathman_user1_table_4 +NOTICE: 0 rows copied from permissions.pathman_user1_table_5 +NOTICE: 1 rows copied from permissions.pathman_user1_table_6 drop_partitions ----------------- 5 (1 row) /* Switch to #2 */ -SET ROLE user2; +SET ROLE pathman_user2; /* Test ddl event trigger */ -CREATE TABLE permissions.user2_table(id serial); -SELECT create_hash_partitions('permissions.user2_table', 'id', 3); +CREATE TABLE permissions.pathman_user2_table(id serial); +SELECT create_hash_partitions('permissions.pathman_user2_table', 'id', 3); create_hash_partitions ------------------------ 3 (1 row) -INSERT INTO permissions.user2_table SELECT generate_series(1, 30); -SELECT drop_partitions('permissions.user2_table'); -NOTICE: 9 rows copied from permissions.user2_table_0 -NOTICE: 11 rows copied from permissions.user2_table_1 -NOTICE: 10 rows copied from permissions.user2_table_2 +INSERT INTO permissions.pathman_user2_table SELECT generate_series(1, 30); +SELECT drop_partitions('permissions.pathman_user2_table'); +NOTICE: 9 rows copied from permissions.pathman_user2_table_0 +NOTICE: 11 rows copied from permissions.pathman_user2_table_1 +NOTICE: 10 rows copied from permissions.pathman_user2_table_2 drop_partitions ----------------- 3 (1 row) /* Switch to #1 */ -SET ROLE user1; +SET ROLE pathman_user1; CREATE TABLE permissions.dropped_column(a int, val int not null, b int, c int); INSERT INTO permissions.dropped_column SELECT i,i,i,i FROM generate_series(1, 30) i; -GRANT SELECT(val), INSERT(val) ON permissions.dropped_column TO user2; +GRANT SELECT(val), INSERT(val) ON permissions.dropped_column TO pathman_user2; SELECT create_range_partitions('permissions.dropped_column', 'val', 1, 10); create_range_partitions ------------------------- @@ -203,11 +203,11 @@ WHERE attrelid = ANY (SELECT "partition" FROM pathman_partition_list WHERE parent = 'permissions.dropped_column'::REGCLASS) AND attacl IS NOT NULL ORDER BY attrelid::regclass::text; /* check ACL for each column */ - attrelid | attname | attacl -------------------------------+---------+------------------ - permissions.dropped_column_1 | val | {user2=ar/user1} - permissions.dropped_column_2 | val | {user2=ar/user1} - permissions.dropped_column_3 | val | {user2=ar/user1} + attrelid | attname | attacl +------------------------------+---------+---------------------------------- + permissions.dropped_column_1 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_2 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_3 | val | {pathman_user2=ar/pathman_user1} (3 rows) ALTER TABLE permissions.dropped_column DROP COLUMN a; /* DROP "a" */ @@ -222,12 +222,12 @@ WHERE attrelid = ANY (SELECT "partition" FROM pathman_partition_list WHERE parent = 'permissions.dropped_column'::REGCLASS) AND attacl IS NOT NULL ORDER BY attrelid::regclass::text; /* check ACL for each column (+1 partition) */ - attrelid | attname | attacl -------------------------------+---------+------------------ - permissions.dropped_column_1 | val | {user2=ar/user1} - permissions.dropped_column_2 | val | {user2=ar/user1} - permissions.dropped_column_3 | val | {user2=ar/user1} - permissions.dropped_column_4 | val | {user2=ar/user1} + attrelid | attname | attacl +------------------------------+---------+---------------------------------- + permissions.dropped_column_1 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_2 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_3 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_4 | val | {pathman_user2=ar/pathman_user1} (4 rows) ALTER TABLE permissions.dropped_column DROP COLUMN b; /* DROP "b" */ @@ -242,22 +242,22 @@ WHERE attrelid = ANY (SELECT "partition" FROM pathman_partition_list WHERE parent = 'permissions.dropped_column'::REGCLASS) AND attacl IS NOT NULL ORDER BY attrelid::regclass::text; /* check ACL for each column (+1 partition) */ - attrelid | attname | attacl -------------------------------+---------+------------------ - permissions.dropped_column_1 | val | {user2=ar/user1} - permissions.dropped_column_2 | val | {user2=ar/user1} - permissions.dropped_column_3 | val | {user2=ar/user1} - permissions.dropped_column_4 | val | {user2=ar/user1} - permissions.dropped_column_5 | val | {user2=ar/user1} + attrelid | attname | attacl +------------------------------+---------+---------------------------------- + permissions.dropped_column_1 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_2 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_3 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_4 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_5 | val | {pathman_user2=ar/pathman_user1} (5 rows) DROP TABLE permissions.dropped_column CASCADE; NOTICE: drop cascades to 6 other objects /* Finally reset user */ RESET ROLE; -DROP OWNED BY user1; -DROP OWNED BY user2; -DROP USER user1; -DROP USER user2; +DROP OWNED BY pathman_user1; +DROP OWNED BY pathman_user2; +DROP USER pathman_user1; +DROP USER pathman_user2; DROP SCHEMA permissions; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_CVE-2020-14350.sql b/sql/pathman_CVE-2020-14350.sql index e3730744..07daa617 100644 --- a/sql/pathman_CVE-2020-14350.sql +++ b/sql/pathman_CVE-2020-14350.sql @@ -8,24 +8,24 @@ DROP FUNCTION IF EXISTS _partition_data_concurrent(oid,integer); DROP FUNCTION IF EXISTS create_single_range_partition(TEXT,ANYELEMENT,ANYELEMENT,TEXT); DROP TABLE IF EXISTS test1 CASCADE; DROP TABLE IF EXISTS test2 CASCADE; -DROP ROLE IF EXISTS regress_hacker; +DROP ROLE IF EXISTS pathman_regress_hacker; SET client_min_messages = 'notice'; GRANT CREATE ON SCHEMA public TO PUBLIC; CREATE EXTENSION pg_pathman; -CREATE ROLE regress_hacker LOGIN; +CREATE ROLE pathman_regress_hacker LOGIN; -- Test 1 RESET ROLE; -ALTER ROLE regress_hacker NOSUPERUSER; +ALTER ROLE pathman_regress_hacker NOSUPERUSER; -SET ROLE regress_hacker; +SET ROLE pathman_regress_hacker; SHOW is_superuser; CREATE FUNCTION _partition_data_concurrent(relation oid, p_limit INT, OUT p_total BIGINT) RETURNS bigint AS $$ BEGIN - ALTER ROLE regress_hacker SUPERUSER; + ALTER ROLE pathman_regress_hacker SUPERUSER; SELECT _partition_data_concurrent(relation, NULL::text, NULL::text, p_limit) INTO p_total; END $$ LANGUAGE plpgsql; @@ -39,20 +39,20 @@ SELECT partition_table_concurrently('test1', 10, 1); SELECT pg_sleep(1); -- Test result (must be 'off') -SET ROLE regress_hacker; +SET ROLE pathman_regress_hacker; SHOW is_superuser; -- Test 2 RESET ROLE; -ALTER ROLE regress_hacker NOSUPERUSER; +ALTER ROLE pathman_regress_hacker NOSUPERUSER; -SET ROLE regress_hacker; +SET ROLE pathman_regress_hacker; SHOW is_superuser; CREATE FUNCTION create_single_range_partition(parent_relid TEXT, start_value ANYELEMENT, end_value ANYELEMENT, partition_name TEXT) RETURNS REGCLASS AS $$ BEGIN - ALTER ROLE regress_hacker SUPERUSER; + ALTER ROLE pathman_regress_hacker SUPERUSER; RETURN create_single_range_partition(parent_relid, start_value, end_value, partition_name, NULL::text); END $$ LANGUAGE plpgsql; @@ -64,7 +64,7 @@ SELECT create_range_partitions('test2', 'i', 0, 1); INSERT INTO test2 values(1); -- Test result (must be 'off') -SET ROLE regress_hacker; +SET ROLE pathman_regress_hacker; SHOW is_superuser; -- Cleanup @@ -73,6 +73,6 @@ DROP FUNCTION _partition_data_concurrent(oid,integer); DROP FUNCTION create_single_range_partition(TEXT,ANYELEMENT,ANYELEMENT,TEXT); DROP TABLE test1 CASCADE; DROP TABLE test2 CASCADE; -DROP ROLE regress_hacker; +DROP ROLE pathman_regress_hacker; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_permissions.sql b/sql/pathman_permissions.sql index 49e1fc18..3e2cf92a 100644 --- a/sql/pathman_permissions.sql +++ b/sql/pathman_permissions.sql @@ -4,137 +4,137 @@ SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA permissions; -CREATE ROLE user1 LOGIN; -CREATE ROLE user2 LOGIN; +CREATE ROLE pathman_user1 LOGIN; +CREATE ROLE pathman_user2 LOGIN; -GRANT USAGE, CREATE ON SCHEMA permissions TO user1; -GRANT USAGE, CREATE ON SCHEMA permissions TO user2; +GRANT USAGE, CREATE ON SCHEMA permissions TO pathman_user1; +GRANT USAGE, CREATE ON SCHEMA permissions TO pathman_user2; /* Switch to #1 */ -SET ROLE user1; -CREATE TABLE permissions.user1_table(id serial, a int); -INSERT INTO permissions.user1_table SELECT g, g FROM generate_series(1, 20) as g; +SET ROLE pathman_user1; +CREATE TABLE permissions.pathman_user1_table(id serial, a int); +INSERT INTO permissions.pathman_user1_table SELECT g, g FROM generate_series(1, 20) as g; /* Should fail (can't SELECT) */ -SET ROLE user2; +SET ROLE pathman_user2; DO $$ BEGIN - SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); + SELECT create_range_partitions('permissions.pathman_user1_table', 'id', 1, 10, 2); EXCEPTION WHEN insufficient_privilege THEN RAISE NOTICE 'Insufficient priviliges'; END$$; -/* Grant SELECT to user2 */ -SET ROLE user1; -GRANT SELECT ON permissions.user1_table TO user2; +/* Grant SELECT to pathman_user2 */ +SET ROLE pathman_user1; +GRANT SELECT ON permissions.pathman_user1_table TO pathman_user2; /* Should fail (don't own parent) */ -SET ROLE user2; +SET ROLE pathman_user2; DO $$ BEGIN - SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); + SELECT create_range_partitions('permissions.pathman_user1_table', 'id', 1, 10, 2); EXCEPTION WHEN insufficient_privilege THEN RAISE NOTICE 'Insufficient priviliges'; END$$; /* Should be ok */ -SET ROLE user1; -SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); +SET ROLE pathman_user1; +SELECT create_range_partitions('permissions.pathman_user1_table', 'id', 1, 10, 2); /* Should be able to see */ -SET ROLE user2; +SET ROLE pathman_user2; SELECT * FROM pathman_config; SELECT * FROM pathman_config_params; /* Should fail */ -SET ROLE user2; -SELECT set_enable_parent('permissions.user1_table', true); -SELECT set_auto('permissions.user1_table', false); +SET ROLE pathman_user2; +SELECT set_enable_parent('permissions.pathman_user1_table', true); +SELECT set_auto('permissions.pathman_user1_table', false); /* Should fail */ -SET ROLE user2; +SET ROLE pathman_user2; DELETE FROM pathman_config -WHERE partrel = 'permissions.user1_table'::regclass; +WHERE partrel = 'permissions.pathman_user1_table'::regclass; /* No rights to insert, should fail */ -SET ROLE user2; +SET ROLE pathman_user2; DO $$ BEGIN - INSERT INTO permissions.user1_table (id, a) VALUES (35, 0); + INSERT INTO permissions.pathman_user1_table (id, a) VALUES (35, 0); EXCEPTION WHEN insufficient_privilege THEN RAISE NOTICE 'Insufficient priviliges'; END$$; /* No rights to create partitions (need INSERT privilege) */ -SET ROLE user2; -SELECT prepend_range_partition('permissions.user1_table'); +SET ROLE pathman_user2; +SELECT prepend_range_partition('permissions.pathman_user1_table'); -/* Allow user2 to create partitions */ -SET ROLE user1; -GRANT INSERT ON permissions.user1_table TO user2; -GRANT UPDATE(a) ON permissions.user1_table TO user2; /* per-column ACL */ +/* Allow pathman_user2 to create partitions */ +SET ROLE pathman_user1; +GRANT INSERT ON permissions.pathman_user1_table TO pathman_user2; +GRANT UPDATE(a) ON permissions.pathman_user1_table TO pathman_user2; /* per-column ACL */ /* Should be able to prepend a partition */ -SET ROLE user2; -SELECT prepend_range_partition('permissions.user1_table'); +SET ROLE pathman_user2; +SELECT prepend_range_partition('permissions.pathman_user1_table'); SELECT attname, attacl FROM pg_attribute WHERE attrelid = (SELECT "partition" FROM pathman_partition_list - WHERE parent = 'permissions.user1_table'::REGCLASS + WHERE parent = 'permissions.pathman_user1_table'::REGCLASS ORDER BY range_min::int ASC /* prepend */ LIMIT 1) ORDER BY attname; /* check ACL for each column */ /* Have rights, should be ok (parent's ACL is shared by new children) */ -SET ROLE user2; -INSERT INTO permissions.user1_table (id, a) VALUES (35, 0) RETURNING *; +SET ROLE pathman_user2; +INSERT INTO permissions.pathman_user1_table (id, a) VALUES (35, 0) RETURNING *; SELECT relname, relacl FROM pg_class WHERE oid = ANY (SELECT "partition" FROM pathman_partition_list - WHERE parent = 'permissions.user1_table'::REGCLASS + WHERE parent = 'permissions.pathman_user1_table'::REGCLASS ORDER BY range_max::int DESC /* append */ LIMIT 3) -ORDER BY relname; /* we also check ACL for "user1_table_2" */ +ORDER BY relname; /* we also check ACL for "pathman_user1_table_2" */ /* Try to drop partition, should fail */ DO $$ BEGIN - SELECT drop_range_partition('permissions.user1_table_4'); + SELECT drop_range_partition('permissions.pathman_user1_table_4'); EXCEPTION WHEN insufficient_privilege THEN RAISE NOTICE 'Insufficient priviliges'; END$$; /* Disable automatic partition creation */ -SET ROLE user1; -SELECT set_auto('permissions.user1_table', false); +SET ROLE pathman_user1; +SELECT set_auto('permissions.pathman_user1_table', false); /* Partition creation, should fail */ -SET ROLE user2; -INSERT INTO permissions.user1_table (id, a) VALUES (55, 0) RETURNING *; +SET ROLE pathman_user2; +INSERT INTO permissions.pathman_user1_table (id, a) VALUES (55, 0) RETURNING *; /* Finally drop partitions */ -SET ROLE user1; -SELECT drop_partitions('permissions.user1_table'); +SET ROLE pathman_user1; +SELECT drop_partitions('permissions.pathman_user1_table'); /* Switch to #2 */ -SET ROLE user2; +SET ROLE pathman_user2; /* Test ddl event trigger */ -CREATE TABLE permissions.user2_table(id serial); -SELECT create_hash_partitions('permissions.user2_table', 'id', 3); -INSERT INTO permissions.user2_table SELECT generate_series(1, 30); -SELECT drop_partitions('permissions.user2_table'); +CREATE TABLE permissions.pathman_user2_table(id serial); +SELECT create_hash_partitions('permissions.pathman_user2_table', 'id', 3); +INSERT INTO permissions.pathman_user2_table SELECT generate_series(1, 30); +SELECT drop_partitions('permissions.pathman_user2_table'); /* Switch to #1 */ -SET ROLE user1; +SET ROLE pathman_user1; CREATE TABLE permissions.dropped_column(a int, val int not null, b int, c int); INSERT INTO permissions.dropped_column SELECT i,i,i,i FROM generate_series(1, 30) i; -GRANT SELECT(val), INSERT(val) ON permissions.dropped_column TO user2; +GRANT SELECT(val), INSERT(val) ON permissions.dropped_column TO pathman_user2; SELECT create_range_partitions('permissions.dropped_column', 'val', 1, 10); @@ -168,10 +168,10 @@ DROP TABLE permissions.dropped_column CASCADE; /* Finally reset user */ RESET ROLE; -DROP OWNED BY user1; -DROP OWNED BY user2; -DROP USER user1; -DROP USER user2; +DROP OWNED BY pathman_user1; +DROP OWNED BY pathman_user2; +DROP USER pathman_user1; +DROP USER pathman_user2; DROP SCHEMA permissions; From 789e1117119d2347a8b86a3201359a8d45d98865 Mon Sep 17 00:00:00 2001 From: "Anton A. Melnikov" Date: Mon, 24 Jul 2023 11:05:37 +0300 Subject: [PATCH 508/528] PGPRO-8546: Create targetlist in partition filter and partition router nodes right with the parent_rti indexes. In accordance with the pointing of Tom Lane: https://www.postgresql.org/message-id/71315.1686243488%40sss.pgh.pa.us Tags: pg_pathman --- src/include/partition_filter.h | 4 +--- src/partition_filter.c | 39 +++------------------------------- src/partition_overseer.c | 2 +- src/partition_router.c | 7 +----- 4 files changed, 6 insertions(+), 46 deletions(-) diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index 9b9f52f9..042b1d55 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -181,9 +181,7 @@ TupleConversionMap * build_part_tuple_map_child(Relation child_rel); void destroy_tuple_map(TupleConversionMap *tuple_map); -List * pfilter_build_tlist(Plan *subplan); - -void pfilter_tlist_fix_resjunk(CustomScan *subplan); +List * pfilter_build_tlist(Plan *subplan, Index varno); /* Find suitable partition using 'value' */ Oid * find_partitions_for_value(Datum value, Oid value_type, diff --git a/src/partition_filter.c b/src/partition_filter.c index d4cf8308..4391bcf3 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -812,12 +812,7 @@ make_partition_filter(Plan *subplan, cscan->scan.scanrelid = 0; /* Build an appropriate target list */ - cscan->scan.plan.targetlist = pfilter_build_tlist(subplan); - - /* Prepare 'custom_scan_tlist' for EXPLAIN (VERBOSE) */ - cscan->custom_scan_tlist = copyObject(cscan->scan.plan.targetlist); - ChangeVarNodes((Node *) cscan->custom_scan_tlist, INDEX_VAR, parent_rti, 0); - pfilter_tlist_fix_resjunk(cscan); + cscan->scan.plan.targetlist = pfilter_build_tlist(subplan, parent_rti); /* Pack partitioned table's Oid and conflict_action */ cscan->custom_private = list_make4(makeInteger(parent_relid), @@ -1076,7 +1071,7 @@ partition_filter_explain(CustomScanState *node, List *ancestors, ExplainState *e * Build partition filter's target list pointing to subplan tuple's elements. */ List * -pfilter_build_tlist(Plan *subplan) +pfilter_build_tlist(Plan *subplan, Index varno) { List *result_tlist = NIL; ListCell *lc; @@ -1096,7 +1091,7 @@ pfilter_build_tlist(Plan *subplan) } else { - Var *var = makeVar(INDEX_VAR, /* point to subplan's elements */ + Var *var = makeVar(varno, /* point to subplan's elements */ tle->resno, exprType((Node *) tle->expr), exprTypmod((Node *) tle->expr), @@ -1115,34 +1110,6 @@ pfilter_build_tlist(Plan *subplan) return result_tlist; } -/* - * resjunk Vars had its varattnos being set on nonexisting relation columns. - * For future processing service attributes should be indicated correctly. - */ -void -pfilter_tlist_fix_resjunk(CustomScan *css) -{ - ListCell *lc; - - foreach(lc, css->custom_scan_tlist) - { - TargetEntry *tle = (TargetEntry *) lfirst(lc); - - if (!IsA(tle->expr, Const)) - { - Var *var = (Var *) tle->expr; - - if (tle->resjunk) - { - /* To make Var recognizable as service attribute. */ - var->varattno = -1; - } - } - } - - return; -} - /* * ---------------------------------------------- * Additional init steps for ResultPartsStorage diff --git a/src/partition_overseer.c b/src/partition_overseer.c index ffa770ba..d858374a 100644 --- a/src/partition_overseer.c +++ b/src/partition_overseer.c @@ -46,7 +46,7 @@ make_partition_overseer(Plan *subplan) cscan->scan.scanrelid = 0; /* Build an appropriate target list */ - cscan->scan.plan.targetlist = pfilter_build_tlist(subplan); + cscan->scan.plan.targetlist = pfilter_build_tlist(subplan, INDEX_VAR); cscan->custom_scan_tlist = subplan->targetlist; return &cscan->scan.plan; diff --git a/src/partition_router.c b/src/partition_router.c index 4a597a13..5f00e9b1 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -134,12 +134,7 @@ make_partition_router(Plan *subplan, int epq_param, Index parent_rti) cscan->scan.scanrelid = 0; /* Build an appropriate target list */ - cscan->scan.plan.targetlist = pfilter_build_tlist(subplan); - - /* Fix 'custom_scan_tlist' for EXPLAIN (VERBOSE) */ - cscan->custom_scan_tlist = copyObject(cscan->scan.plan.targetlist); - ChangeVarNodes((Node *) cscan->custom_scan_tlist, INDEX_VAR, parent_rti, 0); - pfilter_tlist_fix_resjunk(cscan); + cscan->scan.plan.targetlist = pfilter_build_tlist(subplan, parent_rti); return &cscan->scan.plan; } From 7ed25e40d3ace75277614e1ebfd870ead5148ef5 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Fri, 22 Sep 2023 12:45:11 +0300 Subject: [PATCH 509/528] travis-ci for v16 --- .travis.yml | 2 ++ Dockerfile.tmpl | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 81a40e18..411c98aa 100644 --- a/.travis.yml +++ b/.travis.yml @@ -20,6 +20,8 @@ notifications: on_failure: always env: + - PG_VERSION=16 LEVEL=hardcore + - PG_VERSION=16 - PG_VERSION=15 LEVEL=hardcore - PG_VERSION=15 - PG_VERSION=14 LEVEL=hardcore diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index 309719de..4dd24ca5 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -9,7 +9,7 @@ RUN apk add --no-cache \ coreutils linux-headers \ make musl-dev gcc bison flex \ zlib-dev libedit-dev \ - clang clang15 clang-analyzer; + pkgconf icu-dev clang clang15 clang-analyzer; # Install fresh valgrind RUN apk add valgrind \ From 34430a5277e560ce1ccf84405357105e713b9b37 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Fri, 22 Sep 2023 14:21:22 +0300 Subject: [PATCH 510/528] core patch for v16 --- patches/REL_16_STABLE-pg_pathman-core.diff | 547 +++++++++++++++++++++ 1 file changed, 547 insertions(+) create mode 100644 patches/REL_16_STABLE-pg_pathman-core.diff diff --git a/patches/REL_16_STABLE-pg_pathman-core.diff b/patches/REL_16_STABLE-pg_pathman-core.diff new file mode 100644 index 00000000..63d88a38 --- /dev/null +++ b/patches/REL_16_STABLE-pg_pathman-core.diff @@ -0,0 +1,547 @@ +diff --git a/contrib/Makefile b/contrib/Makefile +index bbf220407b..9a82a2db04 100644 +--- a/contrib/Makefile ++++ b/contrib/Makefile +@@ -34,6 +34,7 @@ SUBDIRS = \ + passwordcheck \ + pg_buffercache \ + pg_freespacemap \ ++ pg_pathman \ + pg_prewarm \ + pg_stat_statements \ + pg_surgery \ +diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c +index 37c5e34cce..d4bad64db1 100644 +--- a/src/backend/access/transam/xact.c ++++ b/src/backend/access/transam/xact.c +@@ -79,7 +79,7 @@ int DefaultXactIsoLevel = XACT_READ_COMMITTED; + int XactIsoLevel = XACT_READ_COMMITTED; + + bool DefaultXactReadOnly = false; +-bool XactReadOnly; ++bool XactReadOnly = false; + + bool DefaultXactDeferrable = false; + bool XactDeferrable; +diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c +index 851946a927..32758378c7 100644 +--- a/src/backend/executor/execExprInterp.c ++++ b/src/backend/executor/execExprInterp.c +@@ -1845,6 +1845,16 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) + } + + out: ++ ++ /* ++ * pg_pathman: pass 'tts_tableOid' to result tuple to determine from ++ * which partition the tuple was read ++ */ ++ if (resultslot) ++ { ++ resultslot->tts_tableOid = scanslot ? scanslot->tts_tableOid : ++ (innerslot ? innerslot->tts_tableOid : (outerslot ? outerslot->tts_tableOid : InvalidOid)); ++ } + *isnull = state->resnull; + return state->resvalue; + } +diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c +index 4c5a7bbf62..7d638aa22d 100644 +--- a/src/backend/executor/execMain.c ++++ b/src/backend/executor/execMain.c +@@ -561,6 +561,39 @@ ExecutorRewind(QueryDesc *queryDesc) + } + + ++/* ++ * ExecCheckOneRtePermissions ++ * Check access permissions for one RTE ++ * ++ * Returns true if permissions are adequate. Otherwise, throws an appropriate ++ * error if ereport_on_violation is true, or simply returns false otherwise. ++ * ++ * This function uses pg_pathman due to commit f75cec4fff, see PGPRO-7792 ++ */ ++bool ++ExecCheckOneRtePermissions(RangeTblEntry *rte, RTEPermissionInfo *perminfo, ++ bool ereport_on_violation) ++{ ++ bool result = true; ++ ++ Assert(OidIsValid(perminfo->relid)); ++ Assert(rte->relid == perminfo->relid); ++ ++ result = ExecCheckOneRelPerms(perminfo); ++ ++ if (!result) ++ { ++ if (ereport_on_violation) ++ aclcheck_error(ACLCHECK_NO_PRIV, ++ get_relkind_objtype(get_rel_relkind(perminfo->relid)), ++ get_rel_name(perminfo->relid)); ++ return false; ++ } ++ ++ return result; ++} ++ ++ + /* + * ExecCheckPermissions + * Check access permissions of relations mentioned in a query +@@ -856,6 +889,13 @@ InitPlan(QueryDesc *queryDesc, int eflags) + + estate->es_plannedstmt = plannedstmt; + ++ /* ++ * Fields "es_result_relation_info", "es_original_tuple" are used for ++ * pg_pathman only: ++ */ ++ estate->es_result_relation_info = NULL; ++ estate->es_original_tuple = NULL; ++ + /* + * Next, build the ExecRowMark array from the PlanRowMark(s), if any. + */ +@@ -2873,6 +2913,13 @@ EvalPlanQualStart(EPQState *epqstate, Plan *planTree) + rcestate->es_output_cid = parentestate->es_output_cid; + rcestate->es_queryEnv = parentestate->es_queryEnv; + ++ /* ++ * Fields "es_result_relation_info", "es_original_tuple" are used for ++ * pg_pathman only: ++ */ ++ rcestate->es_result_relation_info = NULL; ++ rcestate->es_original_tuple = NULL; ++ + /* + * ResultRelInfos needed by subplans are initialized from scratch when the + * subplans themselves are initialized. +diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c +index 5005d8c0d1..e664848393 100644 +--- a/src/backend/executor/nodeModifyTable.c ++++ b/src/backend/executor/nodeModifyTable.c +@@ -660,6 +660,13 @@ ExecInitUpdateProjection(ModifyTableState *mtstate, + resultRelInfo->ri_projectNewInfoValid = true; + } + ++void ++PgproExecInitUpdateProjection(ModifyTableState *mtstate, ++ ResultRelInfo *resultRelInfo) ++{ ++ ExecInitUpdateProjection(mtstate, resultRelInfo); ++} ++ + /* + * ExecGetInsertNewTuple + * This prepares a "new" tuple ready to be inserted into given result +@@ -3550,6 +3557,7 @@ ExecModifyTable(PlanState *pstate) + HeapTupleData oldtupdata; + HeapTuple oldtuple; + ItemPointer tupleid; ++ ResultRelInfo *saved_resultRelInfo; + + CHECK_FOR_INTERRUPTS(); + +@@ -3591,6 +3599,8 @@ ExecModifyTable(PlanState *pstate) + context.mtstate = node; + context.epqstate = &node->mt_epqstate; + context.estate = estate; ++ saved_resultRelInfo = estate->es_result_relation_info; ++ estate->es_result_relation_info = NULL; + + /* + * Fetch rows from subplan, and execute the required table modification +@@ -3598,6 +3608,14 @@ ExecModifyTable(PlanState *pstate) + */ + for (;;) + { ++ /* ++ * "es_original_tuple" should contain original modified tuple (new ++ * values of the changed columns plus row identity information such as ++ * CTID) in case tuple planSlot is replaced in pg_pathman to new value ++ * in call "ExecProcNode(subplanstate)". ++ */ ++ estate->es_original_tuple = NULL; ++ + /* + * Reset the per-output-tuple exprcontext. This is needed because + * triggers expect to use that context as workspace. It's a bit ugly +@@ -3631,7 +3649,9 @@ ExecModifyTable(PlanState *pstate) + bool isNull; + Oid resultoid; + +- datum = ExecGetJunkAttribute(context.planSlot, node->mt_resultOidAttno, ++ datum = ExecGetJunkAttribute(estate->es_original_tuple ? ++ estate->es_original_tuple : context.planSlot, ++ node->mt_resultOidAttno, + &isNull); + if (isNull) + { +@@ -3668,6 +3688,8 @@ ExecModifyTable(PlanState *pstate) + if (resultRelInfo->ri_usesFdwDirectModify) + { + Assert(resultRelInfo->ri_projectReturning); ++ /* PartitionRouter does not support foreign data wrappers: */ ++ Assert(estate->es_original_tuple == NULL); + + /* + * A scan slot containing the data that was actually inserted, +@@ -3677,6 +3699,7 @@ ExecModifyTable(PlanState *pstate) + */ + slot = ExecProcessReturning(resultRelInfo, NULL, context.planSlot); + ++ estate->es_result_relation_info = saved_resultRelInfo; + return slot; + } + +@@ -3707,7 +3730,8 @@ ExecModifyTable(PlanState *pstate) + { + /* ri_RowIdAttNo refers to a ctid attribute */ + Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)); +- datum = ExecGetJunkAttribute(slot, ++ datum = ExecGetJunkAttribute(estate->es_original_tuple ++ ? estate->es_original_tuple : slot, + resultRelInfo->ri_RowIdAttNo, + &isNull); + +@@ -3755,7 +3779,8 @@ ExecModifyTable(PlanState *pstate) + */ + else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)) + { +- datum = ExecGetJunkAttribute(slot, ++ datum = ExecGetJunkAttribute(estate->es_original_tuple ++ ? estate->es_original_tuple : slot, + resultRelInfo->ri_RowIdAttNo, + &isNull); + /* shouldn't ever get a null result... */ +@@ -3786,9 +3811,12 @@ ExecModifyTable(PlanState *pstate) + /* Initialize projection info if first time for this table */ + if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) + ExecInitInsertProjection(node, resultRelInfo); +- slot = ExecGetInsertNewTuple(resultRelInfo, context.planSlot); +- slot = ExecInsert(&context, resultRelInfo, slot, +- node->canSetTag, NULL, NULL); ++ /* Do nothing in case tuple was modified in pg_pathman: */ ++ if (!estate->es_original_tuple) ++ slot = ExecGetInsertNewTuple(resultRelInfo, context.planSlot); ++ slot = ExecInsert(&context, estate->es_result_relation_info ? ++ estate->es_result_relation_info : resultRelInfo, ++ slot, node->canSetTag, NULL, NULL); + break; + + case CMD_UPDATE: +@@ -3796,6 +3824,13 @@ ExecModifyTable(PlanState *pstate) + if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) + ExecInitUpdateProjection(node, resultRelInfo); + ++ /* ++ * Do not change the indentation for PostgreSQL code to make it ++ * easier to merge new PostgreSQL changes. ++ */ ++ /* Do nothing in case tuple was modified in pg_pathman: */ ++ if (!estate->es_original_tuple) ++ { + /* + * Make the new tuple by combining plan's output tuple with + * the old tuple being updated. +@@ -3819,14 +3854,19 @@ ExecModifyTable(PlanState *pstate) + slot = ExecGetUpdateNewTuple(resultRelInfo, context.planSlot, + oldSlot); + context.relaction = NULL; ++ } + + /* Now apply the update. */ +- slot = ExecUpdate(&context, resultRelInfo, tupleid, oldtuple, ++ slot = ExecUpdate(&context, estate->es_result_relation_info ? ++ estate->es_result_relation_info : resultRelInfo, ++ tupleid, oldtuple, + slot, node->canSetTag); + break; + + case CMD_DELETE: +- slot = ExecDelete(&context, resultRelInfo, tupleid, oldtuple, ++ slot = ExecDelete(&context, estate->es_result_relation_info ? ++ estate->es_result_relation_info : resultRelInfo, ++ tupleid, oldtuple, + true, false, node->canSetTag, NULL, NULL); + break; + +@@ -3844,7 +3884,10 @@ ExecModifyTable(PlanState *pstate) + * the work on next call. + */ + if (slot) ++ { ++ estate->es_result_relation_info = saved_resultRelInfo; + return slot; ++ } + } + + /* +@@ -3860,6 +3903,7 @@ ExecModifyTable(PlanState *pstate) + + node->mt_done = true; + ++ estate->es_result_relation_info = saved_resultRelInfo; + return NULL; + } + +@@ -3934,6 +3978,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) + ListCell *l; + int i; + Relation rel; ++ ResultRelInfo *saved_resultRelInfo; + + /* check for unsupported flags */ + Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); +@@ -4035,6 +4080,13 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) + i++; + } + ++ /* ++ * pg_pathman: set "estate->es_result_relation_info" value for take it in ++ * functions partition_filter_begin(), partition_router_begin() ++ */ ++ saved_resultRelInfo = estate->es_result_relation_info; ++ estate->es_result_relation_info = mtstate->resultRelInfo; ++ + /* + * Now we may initialize the subplan. + */ +@@ -4117,6 +4169,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) + } + } + ++ estate->es_result_relation_info = saved_resultRelInfo; ++ + /* + * If this is an inherited update/delete/merge, there will be a junk + * attribute named "tableoid" present in the subplan's targetlist. It +diff --git a/src/backend/utils/init/globals.c b/src/backend/utils/init/globals.c +index 011ec18015..7b4fcb2807 100644 +--- a/src/backend/utils/init/globals.c ++++ b/src/backend/utils/init/globals.c +@@ -25,7 +25,7 @@ + #include "storage/backendid.h" + + +-ProtocolVersion FrontendProtocol; ++ProtocolVersion FrontendProtocol = (ProtocolVersion) 0; + + volatile sig_atomic_t InterruptPending = false; + volatile sig_atomic_t QueryCancelPending = false; +diff --git a/src/include/access/xact.h b/src/include/access/xact.h +index 7d3b9446e6..20030111f4 100644 +--- a/src/include/access/xact.h ++++ b/src/include/access/xact.h +@@ -53,6 +53,8 @@ extern PGDLLIMPORT int XactIsoLevel; + + /* Xact read-only state */ + extern PGDLLIMPORT bool DefaultXactReadOnly; ++ ++#define PGPRO_PATHMAN_AWARE_COPY + extern PGDLLIMPORT bool XactReadOnly; + + /* flag for logging statements in this transaction */ +diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h +index ac02247947..c39ae13a8e 100644 +--- a/src/include/executor/executor.h ++++ b/src/include/executor/executor.h +@@ -208,6 +208,9 @@ extern void standard_ExecutorFinish(QueryDesc *queryDesc); + extern void ExecutorEnd(QueryDesc *queryDesc); + extern void standard_ExecutorEnd(QueryDesc *queryDesc); + extern void ExecutorRewind(QueryDesc *queryDesc); ++extern bool ExecCheckOneRtePermissions(RangeTblEntry *rte, ++ RTEPermissionInfo *perminfo, ++ bool ereport_on_violation); + extern bool ExecCheckPermissions(List *rangeTable, + List *rteperminfos, bool ereport_on_violation); + extern void CheckValidResultRel(ResultRelInfo *resultRelInfo, CmdType operation); +@@ -676,5 +679,17 @@ extern ResultRelInfo *ExecLookupResultRelByOid(ModifyTableState *node, + Oid resultoid, + bool missing_ok, + bool update_cache); ++#define PG_HAVE_PGPRO_EXEC_INIT_UPDATE_PROJECTION ++/* ++ * This function is static in vanilla, but pg_pathman wants it exported. ++ * We cannot make it extern with the same name to avoid compilation errors ++ * in timescaledb, which ships it's own static copy of the same function. ++ * So, export ExecInitUpdateProjection with Pgpro prefix. ++ * ++ * The define above helps pg_pathman to expect proper exported symbol ++ * from various versions of pgpro. ++ */ ++extern void PgproExecInitUpdateProjection(ModifyTableState *mtstate, ++ ResultRelInfo *resultRelInfo); + + #endif /* EXECUTOR_H */ +diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h +index cb714f4a19..d34a103fc6 100644 +--- a/src/include/nodes/execnodes.h ++++ b/src/include/nodes/execnodes.h +@@ -638,6 +638,12 @@ typedef struct EState + * es_result_relations in no + * specific order */ + ++ /* These fields was added for compatibility pg_pathman with 14: */ ++ ResultRelInfo *es_result_relation_info; /* currently active array elt */ ++ TupleTableSlot *es_original_tuple; /* original modified tuple (new values ++ * of the changed columns plus row ++ * identity information such as CTID) */ ++ + PartitionDirectory es_partition_directory; /* for PartitionDesc lookup */ + + /* +diff --git a/src/tools/msvc/Install.pm b/src/tools/msvc/Install.pm +index 05548d7c0a..37754370e0 100644 +--- a/src/tools/msvc/Install.pm ++++ b/src/tools/msvc/Install.pm +@@ -30,6 +30,22 @@ my @client_program_files = ( + 'pg_receivewal', 'pg_recvlogical', 'pg_restore', 'psql', + 'reindexdb', 'vacuumdb', @client_contribs); + ++sub SubstituteMakefileVariables ++{ ++ local $_ = shift; # Line to substitue ++ my $mf = shift; # Makefile text ++ while (/\$\((\w+)\)/) ++ { ++ my $varname = $1; ++ if ($mf =~ /^$varname\s*=\s*(.*)$/mg) ++ { ++ my $varvalue=$1; ++ s/\$\($varname\)/$varvalue/g; ++ } ++ } ++ return $_; ++} ++ + sub lcopy + { + my $src = shift; +@@ -580,7 +596,7 @@ sub ParseAndCleanRule + substr($flist, 0, index($flist, '$(addsuffix ')) + . substr($flist, $i + 1); + } +- return $flist; ++ return SubstituteMakefileVariables($flist, $mf); + } + + sub CopyIncludeFiles +diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm +index 9e05eb91b1..baedbb784a 100644 +--- a/src/tools/msvc/Mkvcbuild.pm ++++ b/src/tools/msvc/Mkvcbuild.pm +@@ -40,7 +40,7 @@ my @contrib_uselibpq = (); + my @contrib_uselibpgport = (); + my @contrib_uselibpgcommon = (); + my $contrib_extralibs = { 'libpq_pipeline' => ['ws2_32.lib'] }; +-my $contrib_extraincludes = {}; ++my $contrib_extraincludes = { 'pg_pathman' => ['contrib/pg_pathman/src/include'] }; + my $contrib_extrasource = {}; + my @contrib_excludes = ( + 'bool_plperl', 'commit_ts', +@@ -979,6 +979,7 @@ sub AddContrib + my $dn = $1; + my $proj = $solution->AddProject($dn, 'dll', 'contrib', "$subdir/$n"); + $proj->AddReference($postgres); ++ $proj->RemoveFile("$subdir/$n/src/declarative.c") if $n eq 'pg_pathman'; + AdjustContribProj($proj); + push @projects, $proj; + } +@@ -1082,6 +1083,22 @@ sub AddContrib + return; + } + ++sub SubstituteMakefileVariables ++{ ++ local $_ = shift; # Line to substitue ++ my $mf = shift; # Makefile text ++ while (/\$\((\w+)\)/) ++ { ++ my $varname = $1; ++ if ($mf =~ /^$varname\s*=\s*(.*)$/mg) ++ { ++ my $varvalue=$1; ++ s/\$\($varname\)/$varvalue/g; ++ } ++ } ++ return $_; ++} ++ + sub GenerateContribSqlFiles + { + my $n = shift; +@@ -1106,23 +1123,59 @@ sub GenerateContribSqlFiles + substr($l, 0, index($l, '$(addsuffix ')) . substr($l, $i + 1); + } + ++ $l = SubstituteMakefileVariables($l,$mf); + foreach my $d (split /\s+/, $l) + { +- my $in = "$d.in"; +- my $out = "$d"; +- +- if (Solution::IsNewer("contrib/$n/$out", "contrib/$n/$in")) ++ if ( -f "contrib/$n/$d.in" ) ++ { ++ my $in = "$d.in"; ++ my $out = "$d"; ++ if (Solution::IsNewer("contrib/$n/$out", "contrib/$n/$in")) ++ { ++ print "Building $out from $in (contrib/$n)...\n"; ++ my $cont = Project::read_file("contrib/$n/$in"); ++ my $dn = $out; ++ $dn =~ s/\.sql$//; ++ $cont =~ s/MODULE_PATHNAME/\$libdir\/$dn/g; ++ my $o; ++ open($o, '>', "contrib/$n/$out") ++ || croak "Could not write to contrib/$n/$d"; ++ print $o $cont; ++ close($o); ++ } ++ } ++ else + { +- print "Building $out from $in (contrib/$n)...\n"; +- my $cont = Project::read_file("contrib/$n/$in"); +- my $dn = $out; +- $dn =~ s/\.sql$//; +- $cont =~ s/MODULE_PATHNAME/\$libdir\/$dn/g; +- my $o; +- open($o, '>', "contrib/$n/$out") +- || croak "Could not write to contrib/$n/$d"; +- print $o $cont; +- close($o); ++ # Search for makefile rule. ++ # For now we do not process rule command and assume ++ # that we should just concatenate all prerequisites ++ # ++ my @prereq = (); ++ my $target; ++ my @rules = $mf =~ /^(\S+)\s*:\s*([^=].*)$/mg; ++ RULE: ++ while (@rules) ++ { ++ $target = SubstituteMakefileVariables(shift @rules,$mf); ++ @prereq = split(/\s+/,SubstituteMakefileVariables(shift @rules,$mf)); ++ last RULE if ($target eq $d); ++ @prereq = (); ++ } ++ croak "Don't know how to build contrib/$n/$d" unless @prereq; ++ if (grep(Solution::IsNewer("contrib/$n/$d","contrib/$n/$_"), ++ @prereq)) ++ { ++ print STDERR "building $d from @prereq by concatentation\n"; ++ my $o; ++ open $o, ">contrib/$n/$d" ++ or croak("Couldn't write to contrib/$n/$d:$!"); ++ for my $in (@prereq) ++ { ++ my $data = Project::read_file("contrib/$n/$in"); ++ print $o $data; ++ } ++ close $o; ++ } + } + } + } From ceeeaa66e53bb72b9248acaf0ad05835a62ad140 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Fri, 22 Sep 2023 16:36:37 +0300 Subject: [PATCH 511/528] Corrected some functions for v16 --- tests/cmocka/missing_basic.c | 17 +++++++++++++---- tests/cmocka/missing_stringinfo.c | 8 +++++++- 2 files changed, 20 insertions(+), 5 deletions(-) diff --git a/tests/cmocka/missing_basic.c b/tests/cmocka/missing_basic.c index 36d76160..d20eb87f 100644 --- a/tests/cmocka/missing_basic.c +++ b/tests/cmocka/missing_basic.c @@ -24,20 +24,29 @@ pfree(void *pointer) void ExceptionalCondition(const char *conditionName, +#if PG_VERSION_NUM < 160000 const char *errorType, +#endif const char *fileName, int lineNumber) { - if (!PointerIsValid(conditionName) || - !PointerIsValid(fileName) || - !PointerIsValid(errorType)) + if (!PointerIsValid(conditionName) || !PointerIsValid(fileName) +#if PG_VERSION_NUM < 160000 + || !PointerIsValid(errorType) +#endif + ) { printf("TRAP: ExceptionalCondition: bad arguments\n"); } else { printf("TRAP: %s(\"%s\", File: \"%s\", Line: %d)\n", - errorType, conditionName, +#if PG_VERSION_NUM < 160000 + errorType, +#else + "", +#endif + conditionName, fileName, lineNumber); } diff --git a/tests/cmocka/missing_stringinfo.c b/tests/cmocka/missing_stringinfo.c index edf4d8a4..80710a4e 100644 --- a/tests/cmocka/missing_stringinfo.c +++ b/tests/cmocka/missing_stringinfo.c @@ -206,7 +206,13 @@ appendStringInfoSpaces(StringInfo str, int count) * if necessary. */ void -appendBinaryStringInfo(StringInfo str, const char *data, int datalen) +appendBinaryStringInfo(StringInfo str, +#if PG_VERSION_NUM < 160000 + const char *data, +#else + const void *data, +#endif + int datalen) { Assert(str != NULL); From db938cca85e7dc42ee7090a5d9e12774e2cee782 Mon Sep 17 00:00:00 2001 From: "Anton A. Melnikov" Date: Wed, 27 Sep 2023 18:25:39 +0300 Subject: [PATCH 512/528] PGPRO-8546: Add core patch for REL_11_STABLE. Don't generate deforming jit code for tuples without user attributes. Without this patch an "ERROR: unknown alignment" may occur during jit compilation. Tags: pg_pathman --- README.md | 4 +- patches/REL_11_STABLE-pg_pathman-core.diff | 53 ++++++++++++++++++++++ 2 files changed, 55 insertions(+), 2 deletions(-) create mode 100644 patches/REL_11_STABLE-pg_pathman-core.diff diff --git a/README.md b/README.md index 43d585ff..1394bc6f 100644 --- a/README.md +++ b/README.md @@ -13,8 +13,8 @@ The `pg_pathman` module provides optimized partitioning mechanism and functions The extension is compatible with: - * PostgreSQL 11, 12, 13; - * PostgreSQL with core-patch: 14, 15; + * PostgreSQL 12, 13; + * PostgreSQL with core-patch: 11, 14, 15; * Postgres Pro Standard 11, 12, 13, 14, 15; * Postgres Pro Enterprise; diff --git a/patches/REL_11_STABLE-pg_pathman-core.diff b/patches/REL_11_STABLE-pg_pathman-core.diff new file mode 100644 index 00000000..b3b08e0a --- /dev/null +++ b/patches/REL_11_STABLE-pg_pathman-core.diff @@ -0,0 +1,53 @@ +diff --git a/src/backend/jit/llvm/llvmjit_deform.c b/src/backend/jit/llvm/llvmjit_deform.c +index 6384ac940d8..8b4f731e7a8 100644 +--- a/src/backend/jit/llvm/llvmjit_deform.c ++++ b/src/backend/jit/llvm/llvmjit_deform.c +@@ -104,6 +104,10 @@ slot_compile_deform(LLVMJitContext *context, TupleDesc desc, int natts) + + int attnum; + ++ /* don't generate code for tuples without user attributes */ ++ if (desc->natts == 0) ++ return NULL; ++ + mod = llvm_mutable_module(context); + + funcname = llvm_expand_funcname(context, "deform"); +diff --git a/src/backend/jit/llvm/llvmjit_expr.c b/src/backend/jit/llvm/llvmjit_expr.c +index 12138e49577..8638ebc4ba1 100644 +--- a/src/backend/jit/llvm/llvmjit_expr.c ++++ b/src/backend/jit/llvm/llvmjit_expr.c +@@ -274,6 +274,7 @@ llvm_compile_expr(ExprState *state) + LLVMValueRef v_slot; + LLVMBasicBlockRef b_fetch; + LLVMValueRef v_nvalid; ++ LLVMValueRef l_jit_deform = NULL; + + b_fetch = l_bb_before_v(opblocks[i + 1], + "op.%d.fetch", i); +@@ -336,17 +337,20 @@ llvm_compile_expr(ExprState *state) + */ + if (desc && (context->base.flags & PGJIT_DEFORM)) + { +- LLVMValueRef params[1]; +- LLVMValueRef l_jit_deform; +- + l_jit_deform = +- slot_compile_deform(context, desc, ++ slot_compile_deform(context, ++ desc, + op->d.fetch.last_var); ++ } ++ ++ if (l_jit_deform) ++ { ++ LLVMValueRef params[1]; ++ + params[0] = v_slot; + + LLVMBuildCall(b, l_jit_deform, + params, lengthof(params), ""); +- + } + else + { From 35ab52f1f86795daf2992012e142ac84166116bf Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Fri, 3 Nov 2023 04:09:31 +0300 Subject: [PATCH 513/528] [PGPRO-9137] Fix for vanilla commits 178ee1d858, b1444a09dc Tags: pg_pathman --- expected/pathman_update_triggers_1.out | 198 +++++++++++++++++++++++++ src/include/partition_filter.h | 5 + src/partition_filter.c | 30 +++- 3 files changed, 232 insertions(+), 1 deletion(-) create mode 100644 expected/pathman_update_triggers_1.out diff --git a/expected/pathman_update_triggers_1.out b/expected/pathman_update_triggers_1.out new file mode 100644 index 00000000..5d26ac1e --- /dev/null +++ b/expected/pathman_update_triggers_1.out @@ -0,0 +1,198 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_update_triggers; +create table test_update_triggers.test (val int not null); +select create_hash_partitions('test_update_triggers.test', 'val', 2, + partition_names := array[ + 'test_update_triggers.test_1', + 'test_update_triggers.test_2']); + create_hash_partitions +------------------------ + 2 +(1 row) + +create or replace function test_update_triggers.test_trigger() returns trigger as $$ +begin + raise notice '%', format('%s %s %s (%s)', TG_WHEN, TG_OP, TG_LEVEL, TG_TABLE_NAME); + + if TG_OP::text = 'DELETE'::text then + return old; + else + return new; + end if; end; +$$ language plpgsql; +/* Enable our precious custom node */ +set pg_pathman.enable_partitionrouter = t; +/* + * Statement level triggers + */ +create trigger bus before update ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger bds before delete ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger bis before insert ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger aus after update ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger ads after delete ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger ais after insert ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger bus before update ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger bds before delete ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger bis before insert ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger aus after update ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger ads after delete ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger ais after insert ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger bus before update ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger bds before delete ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger bis before insert ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger aus after update ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger ads after delete ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger ais after insert ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +/* multiple values */ +insert into test_update_triggers.test select generate_series(1, 200); +NOTICE: BEFORE INSERT STATEMENT (test) +NOTICE: AFTER INSERT STATEMENT (test) +update test_update_triggers.test set val = val + 1; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: AFTER INSERT STATEMENT (test) +NOTICE: AFTER UPDATE STATEMENT (test) +update test_update_triggers.test set val = val + 1; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: AFTER INSERT STATEMENT (test) +NOTICE: AFTER UPDATE STATEMENT (test) +update test_update_triggers.test set val = val + 1; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: AFTER INSERT STATEMENT (test) +NOTICE: AFTER UPDATE STATEMENT (test) +update test_update_triggers.test set val = val + 1; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: AFTER INSERT STATEMENT (test) +NOTICE: AFTER UPDATE STATEMENT (test) +update test_update_triggers.test set val = val + 1; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: AFTER INSERT STATEMENT (test) +NOTICE: AFTER UPDATE STATEMENT (test) +select count(distinct val) from test_update_triggers.test; + count +------- + 200 +(1 row) + +truncate test_update_triggers.test; +/* + * Row level triggers + */ +create trigger bu before update ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bd before delete ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bi before insert ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger au after update ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ad after delete ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ai after insert ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bu before update ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bd before delete ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bi before insert ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger au after update ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ad after delete ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ai after insert ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +/* single value */ +insert into test_update_triggers.test values (1); +NOTICE: BEFORE INSERT STATEMENT (test) +NOTICE: BEFORE INSERT ROW (test_1) +NOTICE: AFTER INSERT ROW (test_1) +NOTICE: AFTER INSERT STATEMENT (test) +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: BEFORE UPDATE ROW (test_1) +NOTICE: AFTER UPDATE ROW (test_1) +NOTICE: AFTER UPDATE STATEMENT (test) + val | tableoid +-----+----------------------------- + 2 | test_update_triggers.test_1 +(1 row) + +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: BEFORE UPDATE ROW (test_1) +NOTICE: BEFORE DELETE ROW (test_1) +NOTICE: BEFORE INSERT ROW (test_2) +NOTICE: AFTER DELETE ROW (test_1) +NOTICE: AFTER INSERT STATEMENT (test) +NOTICE: AFTER INSERT ROW (test_2) +NOTICE: AFTER UPDATE STATEMENT (test) + val | tableoid +-----+----------------------------- + 3 | test_update_triggers.test_2 +(1 row) + +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: BEFORE UPDATE ROW (test_2) +NOTICE: AFTER UPDATE ROW (test_2) +NOTICE: AFTER UPDATE STATEMENT (test) + val | tableoid +-----+----------------------------- + 4 | test_update_triggers.test_2 +(1 row) + +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: BEFORE UPDATE ROW (test_2) +NOTICE: BEFORE DELETE ROW (test_2) +NOTICE: BEFORE INSERT ROW (test_1) +NOTICE: AFTER DELETE ROW (test_2) +NOTICE: AFTER INSERT STATEMENT (test) +NOTICE: AFTER INSERT ROW (test_1) +NOTICE: AFTER UPDATE STATEMENT (test) + val | tableoid +-----+----------------------------- + 5 | test_update_triggers.test_1 +(1 row) + +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: BEFORE UPDATE ROW (test_1) +NOTICE: AFTER UPDATE ROW (test_1) +NOTICE: AFTER UPDATE STATEMENT (test) + val | tableoid +-----+----------------------------- + 6 | test_update_triggers.test_1 +(1 row) + +select count(distinct val) from test_update_triggers.test; + count +------- + 1 +(1 row) + +DROP TABLE test_update_triggers.test CASCADE; +NOTICE: drop cascades to 2 other objects +DROP FUNCTION test_update_triggers.test_trigger(); +DROP SCHEMA test_update_triggers; +DROP EXTENSION pg_pathman CASCADE; diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index 042b1d55..4aae0bbb 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -119,6 +119,11 @@ typedef struct CmdType command_type; TupleTableSlot *tup_convert_slot; /* slot for rebuilt tuples */ + +#if PG_VERSION_NUM >= 160000 /* for commit 178ee1d858 */ + Index parent_rti; /* Parent RT index for use of EXPLAIN, + see "ModifyTable::nominalRelation" */ +#endif } PartitionFilterState; diff --git a/src/partition_filter.c b/src/partition_filter.c index 4391bcf3..3d5e4bd3 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -815,10 +815,18 @@ make_partition_filter(Plan *subplan, cscan->scan.plan.targetlist = pfilter_build_tlist(subplan, parent_rti); /* Pack partitioned table's Oid and conflict_action */ +#if PG_VERSION_NUM >= 160000 /* for commit 178ee1d858 */ + cscan->custom_private = list_make5(makeInteger(parent_relid), + makeInteger(conflict_action), + returning_list, + makeInteger(command_type), + makeInteger(parent_rti)); +#else cscan->custom_private = list_make4(makeInteger(parent_relid), makeInteger(conflict_action), returning_list, makeInteger(command_type)); +#endif return &cscan->scan.plan; } @@ -841,6 +849,9 @@ partition_filter_create_scan_state(CustomScan *node) state->on_conflict_action = intVal(lsecond(node->custom_private)); state->returning_list = (List *) lthird(node->custom_private); state->command_type = (CmdType) intVal(lfourth(node->custom_private)); +#if PG_VERSION_NUM >= 160000 /* for commit 178ee1d858 */ + state->parent_rti = (Index) intVal(lfirst(list_nth_cell(node->custom_private, 4))); +#endif /* Check boundaries */ Assert(state->on_conflict_action >= ONCONFLICT_NONE || @@ -875,7 +886,24 @@ partition_filter_begin(CustomScanState *node, EState *estate, int eflags) RPS_RRI_CB(NULL, NULL)); #if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ /* ResultRelInfo of partitioned table. */ - state->result_parts.init_rri = current_rri; + { + RangeTblEntry *rte = rt_fetch(current_rri->ri_RangeTableIndex, estate->es_range_table); + + if (rte->perminfoindex > 0) + state->result_parts.init_rri = current_rri; + else + { + /* + * Additional changes for 178ee1d858d: we cannot use current_rri + * because RTE for this ResultRelInfo has perminfoindex = 0. Need + * to use parent_rti (modify_table->nominalRelation) instead. + */ + Assert(state->parent_rti > 0); + state->result_parts.init_rri = estate->es_result_relations[state->parent_rti - 1]; + if (!state->result_parts.init_rri) + elog(ERROR, "cannot determine result info for partitioned table"); + } + } #endif } From 51edb67c59eb34187b841970833e55a8a9de4c9d Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Wed, 29 Nov 2023 18:41:37 +0300 Subject: [PATCH 514/528] [PGPRO-9251] Added new parameter PG_TEST_SKIP PG_TEST_SKIP parameter is used similarly to PG_TEST_EXTRA and is intended for skip pg_pathman regression tests. Tags: pg_pathman --- Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index c3fe4038..b8a683a3 100644 --- a/Makefile +++ b/Makefile @@ -30,6 +30,7 @@ DATA = pg_pathman--1.0--1.1.sql \ PGFILEDESC = "pg_pathman - partitioning tool for PostgreSQL" +ifneq (pg_pathman,$(filter pg_pathman,$(PG_TEST_SKIP))) REGRESS = pathman_array_qual \ pathman_basic \ pathman_bgw \ @@ -63,7 +64,7 @@ REGRESS = pathman_array_qual \ pathman_utility_stmt \ pathman_views \ pathman_CVE-2020-14350 - +endif EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add From 9283ab7e4996cac120e78987ae7d0e69124815df Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Fri, 15 Dec 2023 00:58:19 +0300 Subject: [PATCH 515/528] [PGPRO-9334] Corrections for isolation tests Tags: pg_pathman --- Makefile | 2 +- expected/for_update.out | 28 +- expected/insert_nodes.out | 130 +++-- expected/rollback_on_create_partitions.out | 618 ++++++++++++++------- specs/for_update.spec | 2 - specs/insert_nodes.spec | 5 +- specs/rollback_on_create_partitions.spec | 2 +- 7 files changed, 509 insertions(+), 278 deletions(-) diff --git a/Makefile b/Makefile index c3fe4038..ce7a3b0c 100644 --- a/Makefile +++ b/Makefile @@ -99,7 +99,7 @@ ISOLATIONCHECKS=insert_nodes for_update rollback_on_create_partitions submake-isolation: $(MAKE) -C $(top_builddir)/src/test/isolation all -isolationcheck: | submake-isolation +isolationcheck: | submake-isolation temp-install $(MKDIR_P) isolation_output $(pg_isolation_regress_check) \ --temp-config=$(top_srcdir)/$(subdir)/conf.add \ diff --git a/expected/for_update.out b/expected/for_update.out index 3e41031e..ffd425e4 100644 --- a/expected/for_update.out +++ b/expected/for_update.out @@ -2,37 +2,49 @@ Parsed test spec with 2 sessions starting permutation: s1_b s1_update s2_select s1_r create_range_partitions +----------------------- + 10 +(1 row) -10 step s1_b: begin; step s1_update: update test_tbl set id = 2 where id = 1; step s2_select: select * from test_tbl where id = 1; -id val +id|val +--+--- + 1| 1 +(1 row) -1 1 step s1_r: rollback; starting permutation: s1_b s1_update s2_select_locked s1_r create_range_partitions +----------------------- + 10 +(1 row) -10 step s1_b: begin; step s1_update: update test_tbl set id = 2 where id = 1; step s2_select_locked: select * from test_tbl where id = 1 for share; step s1_r: rollback; step s2_select_locked: <... completed> -id val +id|val +--+--- + 1| 1 +(1 row) -1 1 starting permutation: s1_b s1_update s2_select_locked s1_c create_range_partitions +----------------------- + 10 +(1 row) -10 step s1_b: begin; step s1_update: update test_tbl set id = 2 where id = 1; step s2_select_locked: select * from test_tbl where id = 1 for share; step s1_c: commit; step s2_select_locked: <... completed> -id val +id|val +--+--- +(0 rows) diff --git a/expected/insert_nodes.out b/expected/insert_nodes.out index 64758aef..5ff8d63d 100644 --- a/expected/insert_nodes.out +++ b/expected/insert_nodes.out @@ -2,122 +2,144 @@ Parsed test spec with 2 sessions starting permutation: s1b s1_insert_150 s1r s1_show_partitions s2b s2_insert_150 s2c s2_show_partitions set_spawn_using_bgw +------------------- + +(1 row) - step s1b: BEGIN; step s1_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); step s1r: ROLLBACK; -step s1_show_partitions: SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c +step s1_show_partitions: SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid WHERE i.inhparent = 'range_rel'::regclass ORDER BY c.oid; -consrc +pg_get_constraintdef +------------------------------------ +PRIMARY KEY (id) +CHECK (((id >= 1) AND (id < 101))) +PRIMARY KEY (id) +CHECK (((id >= 101) AND (id < 201))) +(4 rows) - -((id >= 1) AND (id < 101)) - -((id >= 101) AND (id < 201)) step s2b: BEGIN; step s2_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); step s2c: COMMIT; -step s2_show_partitions: SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c +step s2_show_partitions: SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid WHERE i.inhparent = 'range_rel'::regclass ORDER BY c.oid; -consrc +pg_get_constraintdef +------------------------------------ +PRIMARY KEY (id) +CHECK (((id >= 1) AND (id < 101))) +PRIMARY KEY (id) +CHECK (((id >= 101) AND (id < 201))) +(4 rows) - -((id >= 1) AND (id < 101)) - -((id >= 101) AND (id < 201)) starting permutation: s1b s1_insert_150 s1r s1_show_partitions s2b s2_insert_300 s2c s2_show_partitions set_spawn_using_bgw +------------------- + +(1 row) - step s1b: BEGIN; step s1_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); step s1r: ROLLBACK; -step s1_show_partitions: SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c +step s1_show_partitions: SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid WHERE i.inhparent = 'range_rel'::regclass ORDER BY c.oid; -consrc +pg_get_constraintdef +------------------------------------ +PRIMARY KEY (id) +CHECK (((id >= 1) AND (id < 101))) +PRIMARY KEY (id) +CHECK (((id >= 101) AND (id < 201))) +(4 rows) - -((id >= 1) AND (id < 101)) - -((id >= 101) AND (id < 201)) step s2b: BEGIN; step s2_insert_300: INSERT INTO range_rel SELECT generate_series(151, 300); step s2c: COMMIT; -step s2_show_partitions: SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c +step s2_show_partitions: SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid WHERE i.inhparent = 'range_rel'::regclass ORDER BY c.oid; -consrc +pg_get_constraintdef +------------------------------------ +PRIMARY KEY (id) +CHECK (((id >= 1) AND (id < 101))) +PRIMARY KEY (id) +CHECK (((id >= 101) AND (id < 201))) +PRIMARY KEY (id) +CHECK (((id >= 201) AND (id < 301))) +(6 rows) - -((id >= 1) AND (id < 101)) - -((id >= 101) AND (id < 201)) - -((id >= 201) AND (id < 301)) starting permutation: s1b s1_insert_300 s1r s1_show_partitions s2b s2_insert_150 s2c s2_show_partitions set_spawn_using_bgw +------------------- + +(1 row) - step s1b: BEGIN; step s1_insert_300: INSERT INTO range_rel SELECT generate_series(151, 300); step s1r: ROLLBACK; -step s1_show_partitions: SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c +step s1_show_partitions: SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid WHERE i.inhparent = 'range_rel'::regclass ORDER BY c.oid; -consrc +pg_get_constraintdef +------------------------------------ +PRIMARY KEY (id) +CHECK (((id >= 1) AND (id < 101))) +PRIMARY KEY (id) +CHECK (((id >= 101) AND (id < 201))) +PRIMARY KEY (id) +CHECK (((id >= 201) AND (id < 301))) +(6 rows) - -((id >= 1) AND (id < 101)) - -((id >= 101) AND (id < 201)) - -((id >= 201) AND (id < 301)) step s2b: BEGIN; step s2_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); step s2c: COMMIT; -step s2_show_partitions: SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c +step s2_show_partitions: SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid WHERE i.inhparent = 'range_rel'::regclass ORDER BY c.oid; -consrc +pg_get_constraintdef +------------------------------------ +PRIMARY KEY (id) +CHECK (((id >= 1) AND (id < 101))) +PRIMARY KEY (id) +CHECK (((id >= 101) AND (id < 201))) +PRIMARY KEY (id) +CHECK (((id >= 201) AND (id < 301))) +(6 rows) - -((id >= 1) AND (id < 101)) - -((id >= 101) AND (id < 201)) - -((id >= 201) AND (id < 301)) starting permutation: s1b s1_insert_150 s2b s2_insert_300 s1r s2r s2_show_partitions set_spawn_using_bgw +------------------- + +(1 row) - step s1b: BEGIN; step s1_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); step s2b: BEGIN; step s2_insert_300: INSERT INTO range_rel SELECT generate_series(151, 300); step s1r: ROLLBACK; step s2r: ROLLBACK; -step s2_show_partitions: SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c +step s2_show_partitions: SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid WHERE i.inhparent = 'range_rel'::regclass ORDER BY c.oid; -consrc +pg_get_constraintdef +------------------------------------ +PRIMARY KEY (id) +CHECK (((id >= 1) AND (id < 101))) +PRIMARY KEY (id) +CHECK (((id >= 101) AND (id < 201))) +PRIMARY KEY (id) +CHECK (((id >= 201) AND (id < 301))) +(6 rows) - -((id >= 1) AND (id < 101)) - -((id >= 101) AND (id < 201)) - -((id >= 201) AND (id < 301)) diff --git a/expected/rollback_on_create_partitions.out b/expected/rollback_on_create_partitions.out index 3531107d..ee0c7c0f 100644 --- a/expected/rollback_on_create_partitions.out +++ b/expected/rollback_on_create_partitions.out @@ -5,64 +5,72 @@ step begin: BEGIN; step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) + +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent |partition +---------+------------ +range_rel|range_rel_1 +range_rel|range_rel_2 +range_rel|range_rel_3 +range_rel|range_rel_4 +range_rel|range_rel_5 +range_rel|range_rel_6 +range_rel|range_rel_7 +range_rel|range_rel_8 +range_rel|range_rel_9 +range_rel|range_rel_10 +(10 rows) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN - -Append - -> Seq Scan on range_rel_1 - -> Seq Scan on range_rel_2 - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 - -> Seq Scan on range_rel_5 - -> Seq Scan on range_rel_6 - -> Seq Scan on range_rel_7 - -> Seq Scan on range_rel_8 - -> Seq Scan on range_rel_9 - -> Seq Scan on range_rel_10 step rollback: ROLLBACK; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel starting permutation: begin insert_data create_partitions show_rel commit show_rel step begin: BEGIN; step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) + +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent |partition +---------+------------ +range_rel|range_rel_1 +range_rel|range_rel_2 +range_rel|range_rel_3 +range_rel|range_rel_4 +range_rel|range_rel_5 +range_rel|range_rel_6 +range_rel|range_rel_7 +range_rel|range_rel_8 +range_rel|range_rel_9 +range_rel|range_rel_10 +(10 rows) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN - -Append - -> Seq Scan on range_rel_1 - -> Seq Scan on range_rel_2 - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 - -> Seq Scan on range_rel_5 - -> Seq Scan on range_rel_6 - -> Seq Scan on range_rel_7 - -> Seq Scan on range_rel_8 - -> Seq Scan on range_rel_9 - -> Seq Scan on range_rel_10 step commit: COMMIT; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN - -Append - -> Seq Scan on range_rel_1 - -> Seq Scan on range_rel_2 - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 - -> Seq Scan on range_rel_5 - -> Seq Scan on range_rel_6 - -> Seq Scan on range_rel_7 - -> Seq Scan on range_rel_8 - -> Seq Scan on range_rel_9 - -> Seq Scan on range_rel_10 +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent |partition +---------+------------ +range_rel|range_rel_1 +range_rel|range_rel_2 +range_rel|range_rel_3 +range_rel|range_rel_4 +range_rel|range_rel_5 +range_rel|range_rel_6 +range_rel|range_rel_7 +range_rel|range_rel_8 +range_rel|range_rel_9 +range_rel|range_rel_10 +(10 rows) + starting permutation: begin insert_data savepoint_a create_partitions savepoint_b drop_partitions show_rel savepoint_c rollback show_rel step begin: BEGIN; @@ -70,23 +78,39 @@ step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step savepoint_a: SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) -10 step savepoint_b: SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel step savepoint_c: SAVEPOINT c; step rollback: ROLLBACK; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel starting permutation: begin insert_data savepoint_a create_partitions savepoint_b drop_partitions show_rel savepoint_c commit show_rel step begin: BEGIN; @@ -94,23 +118,39 @@ step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step savepoint_a: SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) -10 step savepoint_b: SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel step savepoint_c: SAVEPOINT c; step commit: COMMIT; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel starting permutation: begin insert_data savepoint_a create_partitions savepoint_b drop_partitions savepoint_c rollback_b show_rel rollback show_rel step begin: BEGIN; @@ -118,34 +158,50 @@ step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step savepoint_a: SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) -10 step savepoint_b: SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 step savepoint_c: SAVEPOINT c; step rollback_b: ROLLBACK TO SAVEPOINT b; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN - -Append - -> Seq Scan on range_rel_1 - -> Seq Scan on range_rel_2 - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 - -> Seq Scan on range_rel_5 - -> Seq Scan on range_rel_6 - -> Seq Scan on range_rel_7 - -> Seq Scan on range_rel_8 - -> Seq Scan on range_rel_9 - -> Seq Scan on range_rel_10 +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent |partition +---------+------------ +range_rel|range_rel_1 +range_rel|range_rel_2 +range_rel|range_rel_3 +range_rel|range_rel_4 +range_rel|range_rel_5 +range_rel|range_rel_6 +range_rel|range_rel_7 +range_rel|range_rel_8 +range_rel|range_rel_9 +range_rel|range_rel_10 +(10 rows) + step rollback: ROLLBACK; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel starting permutation: begin insert_data savepoint_a create_partitions savepoint_b drop_partitions savepoint_c rollback_b show_rel commit show_rel step begin: BEGIN; @@ -153,44 +209,60 @@ step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step savepoint_a: SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) -10 step savepoint_b: SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 step savepoint_c: SAVEPOINT c; step rollback_b: ROLLBACK TO SAVEPOINT b; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN - -Append - -> Seq Scan on range_rel_1 - -> Seq Scan on range_rel_2 - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 - -> Seq Scan on range_rel_5 - -> Seq Scan on range_rel_6 - -> Seq Scan on range_rel_7 - -> Seq Scan on range_rel_8 - -> Seq Scan on range_rel_9 - -> Seq Scan on range_rel_10 +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent |partition +---------+------------ +range_rel|range_rel_1 +range_rel|range_rel_2 +range_rel|range_rel_3 +range_rel|range_rel_4 +range_rel|range_rel_5 +range_rel|range_rel_6 +range_rel|range_rel_7 +range_rel|range_rel_8 +range_rel|range_rel_9 +range_rel|range_rel_10 +(10 rows) + step commit: COMMIT; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN - -Append - -> Seq Scan on range_rel_1 - -> Seq Scan on range_rel_2 - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 - -> Seq Scan on range_rel_5 - -> Seq Scan on range_rel_6 - -> Seq Scan on range_rel_7 - -> Seq Scan on range_rel_8 - -> Seq Scan on range_rel_9 - -> Seq Scan on range_rel_10 +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent |partition +---------+------------ +range_rel|range_rel_1 +range_rel|range_rel_2 +range_rel|range_rel_3 +range_rel|range_rel_4 +range_rel|range_rel_5 +range_rel|range_rel_6 +range_rel|range_rel_7 +range_rel|range_rel_8 +range_rel|range_rel_9 +range_rel|range_rel_10 +(10 rows) + starting permutation: begin insert_data savepoint_a create_partitions savepoint_b drop_partitions show_rel savepoint_c rollback_a show_rel rollback show_rel step begin: BEGIN; @@ -198,28 +270,45 @@ step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step savepoint_a: SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) -10 step savepoint_b: SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel step savepoint_c: SAVEPOINT c; step rollback_a: ROLLBACK TO SAVEPOINT a; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel step rollback: ROLLBACK; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel starting permutation: begin insert_data savepoint_a create_partitions savepoint_b drop_partitions show_rel savepoint_c rollback_a show_rel commit show_rel step begin: BEGIN; @@ -227,28 +316,45 @@ step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step savepoint_a: SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) -10 step savepoint_b: SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel step savepoint_c: SAVEPOINT c; step rollback_a: ROLLBACK TO SAVEPOINT a; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel step commit: COMMIT; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel starting permutation: begin insert_data savepoint_a create_partitions savepoint_b drop_partitions show_rel savepoint_c rollback_b drop_partitions show_rel rollback show_rel step begin: BEGIN; @@ -256,32 +362,61 @@ step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step savepoint_a: SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) -10 step savepoint_b: SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel step savepoint_c: SAVEPOINT c; step rollback_b: ROLLBACK TO SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel step rollback: ROLLBACK; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel starting permutation: begin insert_data savepoint_a create_partitions savepoint_b drop_partitions show_rel savepoint_c rollback_b drop_partitions show_rel commit show_rel step begin: BEGIN; @@ -289,32 +424,61 @@ step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step savepoint_a: SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) -10 step savepoint_b: SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel step savepoint_c: SAVEPOINT c; step rollback_b: ROLLBACK TO SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel step commit: COMMIT; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel starting permutation: begin insert_data savepoint_a create_partitions savepoint_b drop_partitions rollback_a create_partitions show_rel rollback show_rel step begin: BEGIN; @@ -322,37 +486,55 @@ step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step savepoint_a: SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) -10 step savepoint_b: SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 step rollback_a: ROLLBACK TO SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) + +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent |partition +---------+------------ +range_rel|range_rel_1 +range_rel|range_rel_2 +range_rel|range_rel_3 +range_rel|range_rel_4 +range_rel|range_rel_5 +range_rel|range_rel_6 +range_rel|range_rel_7 +range_rel|range_rel_8 +range_rel|range_rel_9 +range_rel|range_rel_10 +(10 rows) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN - -Append - -> Seq Scan on range_rel_1 - -> Seq Scan on range_rel_2 - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 - -> Seq Scan on range_rel_5 - -> Seq Scan on range_rel_6 - -> Seq Scan on range_rel_7 - -> Seq Scan on range_rel_8 - -> Seq Scan on range_rel_9 - -> Seq Scan on range_rel_10 step rollback: ROLLBACK; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel starting permutation: begin insert_data savepoint_a create_partitions savepoint_b drop_partitions rollback_a create_partitions show_rel commit show_rel step begin: BEGIN; @@ -360,44 +542,62 @@ step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step savepoint_a: SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) -10 step savepoint_b: SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 step rollback_a: ROLLBACK TO SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) + +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent |partition +---------+------------ +range_rel|range_rel_1 +range_rel|range_rel_2 +range_rel|range_rel_3 +range_rel|range_rel_4 +range_rel|range_rel_5 +range_rel|range_rel_6 +range_rel|range_rel_7 +range_rel|range_rel_8 +range_rel|range_rel_9 +range_rel|range_rel_10 +(10 rows) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN - -Append - -> Seq Scan on range_rel_1 - -> Seq Scan on range_rel_2 - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 - -> Seq Scan on range_rel_5 - -> Seq Scan on range_rel_6 - -> Seq Scan on range_rel_7 - -> Seq Scan on range_rel_8 - -> Seq Scan on range_rel_9 - -> Seq Scan on range_rel_10 step commit: COMMIT; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN - -Append - -> Seq Scan on range_rel_1 - -> Seq Scan on range_rel_2 - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 - -> Seq Scan on range_rel_5 - -> Seq Scan on range_rel_6 - -> Seq Scan on range_rel_7 - -> Seq Scan on range_rel_8 - -> Seq Scan on range_rel_9 - -> Seq Scan on range_rel_10 +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent |partition +---------+------------ +range_rel|range_rel_1 +range_rel|range_rel_2 +range_rel|range_rel_3 +range_rel|range_rel_4 +range_rel|range_rel_5 +range_rel|range_rel_6 +range_rel|range_rel_7 +range_rel|range_rel_8 +range_rel|range_rel_9 +range_rel|range_rel_10 +(10 rows) + diff --git a/specs/for_update.spec b/specs/for_update.spec index f7a8f758..c18cd4f8 100644 --- a/specs/for_update.spec +++ b/specs/for_update.spec @@ -19,8 +19,6 @@ step "s1_r" { rollback; } step "s1_update" { update test_tbl set id = 2 where id = 1; } session "s2" -step "s2_b" { begin; } -step "s2_c" { commit; } step "s2_select_locked" { select * from test_tbl where id = 1 for share; } step "s2_select" { select * from test_tbl where id = 1; } diff --git a/specs/insert_nodes.spec b/specs/insert_nodes.spec index 3bb67746..5ceea0d4 100644 --- a/specs/insert_nodes.spec +++ b/specs/insert_nodes.spec @@ -17,18 +17,17 @@ session "s1" step "s1b" { BEGIN; } step "s1_insert_150" { INSERT INTO range_rel SELECT generate_series(1, 150); } step "s1_insert_300" { INSERT INTO range_rel SELECT generate_series(151, 300); } -step "s1_show_partitions" { SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c +step "s1_show_partitions" { SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid WHERE i.inhparent = 'range_rel'::regclass ORDER BY c.oid; } step "s1r" { ROLLBACK; } -step "s1c" { COMMIT; } session "s2" step "s2b" { BEGIN; } step "s2_insert_150" { INSERT INTO range_rel SELECT generate_series(1, 150); } step "s2_insert_300" { INSERT INTO range_rel SELECT generate_series(151, 300); } -step "s2_show_partitions" { SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c +step "s2_show_partitions" { SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid WHERE i.inhparent = 'range_rel'::regclass ORDER BY c.oid; } diff --git a/specs/rollback_on_create_partitions.spec b/specs/rollback_on_create_partitions.spec index a24c2897..806e6072 100644 --- a/specs/rollback_on_create_partitions.spec +++ b/specs/rollback_on_create_partitions.spec @@ -22,7 +22,7 @@ step "rollback_a" { ROLLBACK TO SAVEPOINT a; } step "savepoint_b" { SAVEPOINT b; } step "rollback_b" { ROLLBACK TO SAVEPOINT b; } step "savepoint_c" { SAVEPOINT c; } -step "show_rel" { EXPLAIN (COSTS OFF) SELECT * FROM range_rel; } +step "show_rel" { SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; } permutation "begin" "insert_data" "create_partitions" "show_rel" "rollback" "show_rel" From 47b75b0d57a720eaf0e6f732bd6a4f692aea904b Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Fri, 15 Dec 2023 13:55:41 +0300 Subject: [PATCH 516/528] [PGPRO-9334] Enable isolation tests Tags: pg_pathman --- .gitignore | 1 - Makefile | 26 ++++++++++------------ expected/insert_nodes.out | 46 ++++++++++++--------------------------- specs/insert_nodes.spec | 4 ++-- 4 files changed, 28 insertions(+), 49 deletions(-) diff --git a/.gitignore b/.gitignore index f627990d..1bc422a5 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,4 @@ .deps -isolation_output results/* regression.diffs regression.out diff --git a/Makefile b/Makefile index ce7a3b0c..004f747f 100644 --- a/Makefile +++ b/Makefile @@ -64,11 +64,13 @@ REGRESS = pathman_array_qual \ pathman_views \ pathman_CVE-2020-14350 +ISOLATION = insert_nodes for_update rollback_on_create_partitions -EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add +REGRESS_OPTS = --temp-config $(top_srcdir)/$(subdir)/conf.add +ISOLATION_OPTS = --temp-config $(top_srcdir)/$(subdir)/conf.add CMOCKA_EXTRA_CLEAN = missing_basic.o missing_list.o missing_stringinfo.o missing_bitmapset.o rangeset_tests.o rangeset_tests -EXTRA_CLEAN = ./isolation_output $(patsubst %,tests/cmocka/%, $(CMOCKA_EXTRA_CLEAN)) +EXTRA_CLEAN = $(patsubst %,tests/cmocka/%, $(CMOCKA_EXTRA_CLEAN)) ifdef USE_PGXS PG_CONFIG=pg_config @@ -83,6 +85,14 @@ OBJS += src/declarative.o override PG_CPPFLAGS += -DENABLE_DECLARATIVE endif +# We cannot run isolation test for versions 12,13 in PGXS case +# because 'pg_isolation_regress' is not copied to install +# directory, see src/test/isolation/Makefile +ifeq ($(VNUM),$(filter 12% 13%,$(VNUM))) +undefine ISOLATION +undefine ISOLATION_OPTS +endif + include $(PGXS) else subdir = contrib/pg_pathman @@ -94,18 +104,6 @@ endif $(EXTENSION)--$(EXTVERSION).sql: init.sql hash.sql range.sql cat $^ > $@ -ISOLATIONCHECKS=insert_nodes for_update rollback_on_create_partitions - -submake-isolation: - $(MAKE) -C $(top_builddir)/src/test/isolation all - -isolationcheck: | submake-isolation temp-install - $(MKDIR_P) isolation_output - $(pg_isolation_regress_check) \ - --temp-config=$(top_srcdir)/$(subdir)/conf.add \ - --outputdir=./isolation_output \ - $(ISOLATIONCHECKS) - python_tests: $(MAKE) -C tests/python partitioning_tests CASE=$(CASE) diff --git a/expected/insert_nodes.out b/expected/insert_nodes.out index 5ff8d63d..8f725216 100644 --- a/expected/insert_nodes.out +++ b/expected/insert_nodes.out @@ -11,30 +11,26 @@ step s1_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); step s1r: ROLLBACK; step s1_show_partitions: SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid - WHERE i.inhparent = 'range_rel'::regclass + WHERE i.inhparent = 'range_rel'::regclass AND c.contype = 'c' ORDER BY c.oid; pg_get_constraintdef ------------------------------------ -PRIMARY KEY (id) CHECK (((id >= 1) AND (id < 101))) -PRIMARY KEY (id) CHECK (((id >= 101) AND (id < 201))) -(4 rows) +(2 rows) step s2b: BEGIN; step s2_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); step s2c: COMMIT; step s2_show_partitions: SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid - WHERE i.inhparent = 'range_rel'::regclass + WHERE i.inhparent = 'range_rel'::regclass AND c.contype = 'c' ORDER BY c.oid; pg_get_constraintdef ------------------------------------ -PRIMARY KEY (id) CHECK (((id >= 1) AND (id < 101))) -PRIMARY KEY (id) CHECK (((id >= 101) AND (id < 201))) -(4 rows) +(2 rows) starting permutation: s1b s1_insert_150 s1r s1_show_partitions s2b s2_insert_300 s2c s2_show_partitions @@ -48,32 +44,27 @@ step s1_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); step s1r: ROLLBACK; step s1_show_partitions: SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid - WHERE i.inhparent = 'range_rel'::regclass + WHERE i.inhparent = 'range_rel'::regclass AND c.contype = 'c' ORDER BY c.oid; pg_get_constraintdef ------------------------------------ -PRIMARY KEY (id) CHECK (((id >= 1) AND (id < 101))) -PRIMARY KEY (id) CHECK (((id >= 101) AND (id < 201))) -(4 rows) +(2 rows) step s2b: BEGIN; step s2_insert_300: INSERT INTO range_rel SELECT generate_series(151, 300); step s2c: COMMIT; step s2_show_partitions: SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid - WHERE i.inhparent = 'range_rel'::regclass + WHERE i.inhparent = 'range_rel'::regclass AND c.contype = 'c' ORDER BY c.oid; pg_get_constraintdef ------------------------------------ -PRIMARY KEY (id) CHECK (((id >= 1) AND (id < 101))) -PRIMARY KEY (id) CHECK (((id >= 101) AND (id < 201))) -PRIMARY KEY (id) CHECK (((id >= 201) AND (id < 301))) -(6 rows) +(3 rows) starting permutation: s1b s1_insert_300 s1r s1_show_partitions s2b s2_insert_150 s2c s2_show_partitions @@ -87,34 +78,28 @@ step s1_insert_300: INSERT INTO range_rel SELECT generate_series(151, 300); step s1r: ROLLBACK; step s1_show_partitions: SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid - WHERE i.inhparent = 'range_rel'::regclass + WHERE i.inhparent = 'range_rel'::regclass AND c.contype = 'c' ORDER BY c.oid; pg_get_constraintdef ------------------------------------ -PRIMARY KEY (id) CHECK (((id >= 1) AND (id < 101))) -PRIMARY KEY (id) CHECK (((id >= 101) AND (id < 201))) -PRIMARY KEY (id) CHECK (((id >= 201) AND (id < 301))) -(6 rows) +(3 rows) step s2b: BEGIN; step s2_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); step s2c: COMMIT; step s2_show_partitions: SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid - WHERE i.inhparent = 'range_rel'::regclass + WHERE i.inhparent = 'range_rel'::regclass AND c.contype = 'c' ORDER BY c.oid; pg_get_constraintdef ------------------------------------ -PRIMARY KEY (id) CHECK (((id >= 1) AND (id < 101))) -PRIMARY KEY (id) CHECK (((id >= 101) AND (id < 201))) -PRIMARY KEY (id) CHECK (((id >= 201) AND (id < 301))) -(6 rows) +(3 rows) starting permutation: s1b s1_insert_150 s2b s2_insert_300 s1r s2r s2_show_partitions @@ -131,15 +116,12 @@ step s1r: ROLLBACK; step s2r: ROLLBACK; step s2_show_partitions: SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid - WHERE i.inhparent = 'range_rel'::regclass + WHERE i.inhparent = 'range_rel'::regclass AND c.contype = 'c' ORDER BY c.oid; pg_get_constraintdef ------------------------------------ -PRIMARY KEY (id) CHECK (((id >= 1) AND (id < 101))) -PRIMARY KEY (id) CHECK (((id >= 101) AND (id < 201))) -PRIMARY KEY (id) CHECK (((id >= 201) AND (id < 301))) -(6 rows) +(3 rows) diff --git a/specs/insert_nodes.spec b/specs/insert_nodes.spec index 5ceea0d4..a5d0c7f9 100644 --- a/specs/insert_nodes.spec +++ b/specs/insert_nodes.spec @@ -19,7 +19,7 @@ step "s1_insert_150" { INSERT INTO range_rel SELECT generate_series(1, 150); step "s1_insert_300" { INSERT INTO range_rel SELECT generate_series(151, 300); } step "s1_show_partitions" { SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid - WHERE i.inhparent = 'range_rel'::regclass + WHERE i.inhparent = 'range_rel'::regclass AND c.contype = 'c' ORDER BY c.oid; } step "s1r" { ROLLBACK; } @@ -29,7 +29,7 @@ step "s2_insert_150" { INSERT INTO range_rel SELECT generate_series(1, 150); step "s2_insert_300" { INSERT INTO range_rel SELECT generate_series(151, 300); } step "s2_show_partitions" { SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid - WHERE i.inhparent = 'range_rel'::regclass + WHERE i.inhparent = 'range_rel'::regclass AND c.contype = 'c' ORDER BY c.oid; } step "s2r" { ROLLBACK; } step "s2c" { COMMIT; } From 1857bde09f87f168ae1e218a92f337e471dba98b Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Fri, 22 Dec 2023 19:15:08 +0300 Subject: [PATCH 517/528] Correction for docker-compose.yml --- docker-compose.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docker-compose.yml b/docker-compose.yml index 471ab779..0544d859 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,2 +1,3 @@ -tests: +services: + tests: build: . From f5605c5dc340753410e958c6b1852691d87ec67a Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Wed, 27 Mar 2024 12:40:03 +0300 Subject: [PATCH 518/528] [PGPRO-9977] Added new expected results after vanilla commit Tags: pg_pathman See b262ad440ede - Add better handling of redundant IS [NOT] NULL quals --- expected/pathman_hashjoin_6.out | 75 ++++++++++++++++++++++++++++++ expected/pathman_mergejoin_6.out | 80 ++++++++++++++++++++++++++++++++ 2 files changed, 155 insertions(+) create mode 100644 expected/pathman_hashjoin_6.out create mode 100644 expected/pathman_mergejoin_6.out diff --git a/expected/pathman_hashjoin_6.out b/expected/pathman_hashjoin_6.out new file mode 100644 index 00000000..1c57f49b --- /dev/null +++ b/expected/pathman_hashjoin_6.out @@ -0,0 +1,75 @@ +/* + * pathman_hashjoin_1.out and pathman_hashjoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_hashjoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) + SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SET pg_pathman.enable_runtimeappend = OFF; +SET pg_pathman.enable_runtimemergeappend = OFF; +VACUUM; +/* + * Hash join + */ +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +SET enable_nestloop = OFF; +SET enable_hashjoin = ON; +SET enable_mergejoin = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Hash Join + Hash Cond: (j3.id = j2.id) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3_1 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_2 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_3 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_4 + -> Hash + -> Index Scan using range_rel_2_dt_idx on range_rel_2 j2 +(11 rows) + +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_mergejoin_6.out b/expected/pathman_mergejoin_6.out new file mode 100644 index 00000000..0cca2aef --- /dev/null +++ b/expected/pathman_mergejoin_6.out @@ -0,0 +1,80 @@ +/* + * pathman_mergejoin_1.out and pathman_mergejoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_mergejoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + * + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +INSERT INTO test.num_range_rel SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +/* + * Merge join between 3 partitioned tables + * + * test case for the fix of sorting, merge append and index scan issues + * details in commit 54dd0486fc55b2d25cf7d095f83dee6ff4adee06 + */ +SET enable_hashjoin = OFF; +SET enable_nestloop = OFF; +SET enable_mergejoin = ON; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Merge Join + Merge Cond: (j2.id = j3.id) + -> Index Scan using range_rel_2_pkey on range_rel_2 j2 + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3_1 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_2 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_3 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_4 +(10 rows) + +SET enable_hashjoin = ON; +SET enable_nestloop = ON; +SET enable_seqscan = ON; +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman; +DROP SCHEMA pathman; From 07f0a98b060ed85fc52847b61654c6576dd2a586 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Wed, 27 Mar 2024 14:43:30 +0300 Subject: [PATCH 519/528] [PGPRO-9977] Updated patches for v15, v16 Tags: pg_pathman --- patches/REL_15_STABLE-pg_pathman-core.diff | 52 +++++++++++----------- patches/REL_16_STABLE-pg_pathman-core.diff | 50 ++++++++++----------- 2 files changed, 51 insertions(+), 51 deletions(-) diff --git a/patches/REL_15_STABLE-pg_pathman-core.diff b/patches/REL_15_STABLE-pg_pathman-core.diff index 04fae9aa..b8db29fd 100644 --- a/patches/REL_15_STABLE-pg_pathman-core.diff +++ b/patches/REL_15_STABLE-pg_pathman-core.diff @@ -11,7 +11,7 @@ index bbf220407b..9a82a2db04 100644 pg_stat_statements \ pg_surgery \ diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c -index d0e5bc26a7..5ca196518e 100644 +index 7a3d9b4b01..0c3d2dec6c 100644 --- a/src/backend/access/transam/xact.c +++ b/src/backend/access/transam/xact.c @@ -78,7 +78,7 @@ int DefaultXactIsoLevel = XACT_READ_COMMITTED; @@ -24,7 +24,7 @@ index d0e5bc26a7..5ca196518e 100644 bool DefaultXactDeferrable = false; bool XactDeferrable; diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c -index d5e46098c2..d3c02c1def 100644 +index 87c7603f2b..9cc0bc0da8 100644 --- a/src/backend/executor/execExprInterp.c +++ b/src/backend/executor/execExprInterp.c @@ -1801,6 +1801,16 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) @@ -45,7 +45,7 @@ index d5e46098c2..d3c02c1def 100644 return state->resvalue; } diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c -index ef2fd46092..8551733c55 100644 +index 0ba61fd547..29d93998b2 100644 --- a/src/backend/executor/execMain.c +++ b/src/backend/executor/execMain.c @@ -826,6 +826,13 @@ InitPlan(QueryDesc *queryDesc, int eflags) @@ -62,7 +62,7 @@ index ef2fd46092..8551733c55 100644 /* * Next, build the ExecRowMark array from the PlanRowMark(s), if any. */ -@@ -2811,6 +2818,13 @@ EvalPlanQualStart(EPQState *epqstate, Plan *planTree) +@@ -2849,6 +2856,13 @@ EvalPlanQualStart(EPQState *epqstate, Plan *planTree) rcestate->es_junkFilter = parentestate->es_junkFilter; rcestate->es_output_cid = parentestate->es_output_cid; @@ -77,7 +77,7 @@ index ef2fd46092..8551733c55 100644 * ResultRelInfos needed by subplans are initialized from scratch when the * subplans themselves are initialized. diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c -index 2f6e66b641..d4a1e48c20 100644 +index 1ad5dcb406..047508e0da 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c @@ -641,6 +641,13 @@ ExecInitUpdateProjection(ModifyTableState *mtstate, @@ -94,7 +94,7 @@ index 2f6e66b641..d4a1e48c20 100644 /* * ExecGetInsertNewTuple * This prepares a "new" tuple ready to be inserted into given result -@@ -3524,6 +3531,7 @@ ExecModifyTable(PlanState *pstate) +@@ -3581,6 +3588,7 @@ ExecModifyTable(PlanState *pstate) HeapTupleData oldtupdata; HeapTuple oldtuple; ItemPointer tupleid; @@ -102,7 +102,7 @@ index 2f6e66b641..d4a1e48c20 100644 CHECK_FOR_INTERRUPTS(); -@@ -3565,6 +3573,8 @@ ExecModifyTable(PlanState *pstate) +@@ -3622,6 +3630,8 @@ ExecModifyTable(PlanState *pstate) context.mtstate = node; context.epqstate = &node->mt_epqstate; context.estate = estate; @@ -111,7 +111,7 @@ index 2f6e66b641..d4a1e48c20 100644 /* * Fetch rows from subplan, and execute the required table modification -@@ -3572,6 +3582,14 @@ ExecModifyTable(PlanState *pstate) +@@ -3629,6 +3639,14 @@ ExecModifyTable(PlanState *pstate) */ for (;;) { @@ -126,7 +126,7 @@ index 2f6e66b641..d4a1e48c20 100644 /* * Reset the per-output-tuple exprcontext. This is needed because * triggers expect to use that context as workspace. It's a bit ugly -@@ -3605,7 +3623,9 @@ ExecModifyTable(PlanState *pstate) +@@ -3662,7 +3680,9 @@ ExecModifyTable(PlanState *pstate) bool isNull; Oid resultoid; @@ -137,7 +137,7 @@ index 2f6e66b641..d4a1e48c20 100644 &isNull); if (isNull) { -@@ -3642,6 +3662,8 @@ ExecModifyTable(PlanState *pstate) +@@ -3699,6 +3719,8 @@ ExecModifyTable(PlanState *pstate) if (resultRelInfo->ri_usesFdwDirectModify) { Assert(resultRelInfo->ri_projectReturning); @@ -146,7 +146,7 @@ index 2f6e66b641..d4a1e48c20 100644 /* * A scan slot containing the data that was actually inserted, -@@ -3651,6 +3673,7 @@ ExecModifyTable(PlanState *pstate) +@@ -3708,6 +3730,7 @@ ExecModifyTable(PlanState *pstate) */ slot = ExecProcessReturning(resultRelInfo, NULL, context.planSlot); @@ -154,7 +154,7 @@ index 2f6e66b641..d4a1e48c20 100644 return slot; } -@@ -3681,7 +3704,8 @@ ExecModifyTable(PlanState *pstate) +@@ -3738,7 +3761,8 @@ ExecModifyTable(PlanState *pstate) { /* ri_RowIdAttNo refers to a ctid attribute */ Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)); @@ -164,7 +164,7 @@ index 2f6e66b641..d4a1e48c20 100644 resultRelInfo->ri_RowIdAttNo, &isNull); -@@ -3729,7 +3753,8 @@ ExecModifyTable(PlanState *pstate) +@@ -3786,7 +3810,8 @@ ExecModifyTable(PlanState *pstate) */ else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)) { @@ -174,7 +174,7 @@ index 2f6e66b641..d4a1e48c20 100644 resultRelInfo->ri_RowIdAttNo, &isNull); /* shouldn't ever get a null result... */ -@@ -3760,9 +3785,12 @@ ExecModifyTable(PlanState *pstate) +@@ -3817,9 +3842,12 @@ ExecModifyTable(PlanState *pstate) /* Initialize projection info if first time for this table */ if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) ExecInitInsertProjection(node, resultRelInfo); @@ -190,7 +190,7 @@ index 2f6e66b641..d4a1e48c20 100644 break; case CMD_UPDATE: -@@ -3770,6 +3798,13 @@ ExecModifyTable(PlanState *pstate) +@@ -3827,6 +3855,13 @@ ExecModifyTable(PlanState *pstate) if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) ExecInitUpdateProjection(node, resultRelInfo); @@ -204,7 +204,7 @@ index 2f6e66b641..d4a1e48c20 100644 /* * Make the new tuple by combining plan's output tuple with * the old tuple being updated. -@@ -3793,14 +3828,19 @@ ExecModifyTable(PlanState *pstate) +@@ -3850,14 +3885,19 @@ ExecModifyTable(PlanState *pstate) slot = ExecGetUpdateNewTuple(resultRelInfo, context.planSlot, oldSlot); context.relaction = NULL; @@ -223,10 +223,10 @@ index 2f6e66b641..d4a1e48c20 100644 + slot = ExecDelete(&context, estate->es_result_relation_info ? + estate->es_result_relation_info : resultRelInfo, + tupleid, oldtuple, - true, false, node->canSetTag, NULL, NULL); + true, false, node->canSetTag, NULL, NULL, NULL); break; -@@ -3818,7 +3858,10 @@ ExecModifyTable(PlanState *pstate) +@@ -3875,7 +3915,10 @@ ExecModifyTable(PlanState *pstate) * the work on next call. */ if (slot) @@ -237,7 +237,7 @@ index 2f6e66b641..d4a1e48c20 100644 } /* -@@ -3834,6 +3877,7 @@ ExecModifyTable(PlanState *pstate) +@@ -3891,6 +3934,7 @@ ExecModifyTable(PlanState *pstate) node->mt_done = true; @@ -245,7 +245,7 @@ index 2f6e66b641..d4a1e48c20 100644 return NULL; } -@@ -3908,6 +3952,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) +@@ -3965,6 +4009,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) ListCell *l; int i; Relation rel; @@ -253,7 +253,7 @@ index 2f6e66b641..d4a1e48c20 100644 /* check for unsupported flags */ Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); -@@ -4008,6 +4053,13 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) +@@ -4067,6 +4112,13 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) i++; } @@ -267,7 +267,7 @@ index 2f6e66b641..d4a1e48c20 100644 /* * Now we may initialize the subplan. */ -@@ -4102,6 +4154,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) +@@ -4161,6 +4213,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) ExecInitStoredGenerated(resultRelInfo, estate, operation); } @@ -303,10 +303,10 @@ index 8d46a781bb..150d70cb64 100644 /* flag for logging statements in this transaction */ diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h -index 82925b4b63..de23622ca2 100644 +index 7cd9b2f2bf..b31a7934a4 100644 --- a/src/include/executor/executor.h +++ b/src/include/executor/executor.h -@@ -659,5 +659,17 @@ extern ResultRelInfo *ExecLookupResultRelByOid(ModifyTableState *node, +@@ -662,5 +662,17 @@ extern ResultRelInfo *ExecLookupResultRelByOid(ModifyTableState *node, Oid resultoid, bool missing_ok, bool update_cache); @@ -325,7 +325,7 @@ index 82925b4b63..de23622ca2 100644 #endif /* EXECUTOR_H */ diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h -index f34d06eff4..0970e5f110 100644 +index 9f176b0e37..a65799dcce 100644 --- a/src/include/nodes/execnodes.h +++ b/src/include/nodes/execnodes.h @@ -624,6 +624,12 @@ typedef struct EState @@ -374,7 +374,7 @@ index 8de79c618c..c9226ba5ad 100644 sub CopyIncludeFiles diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm -index ef0a33c10f..27033b0a45 100644 +index 990c223a9b..cd5048f8d5 100644 --- a/src/tools/msvc/Mkvcbuild.pm +++ b/src/tools/msvc/Mkvcbuild.pm @@ -39,8 +39,8 @@ my $contrib_defines = {}; diff --git a/patches/REL_16_STABLE-pg_pathman-core.diff b/patches/REL_16_STABLE-pg_pathman-core.diff index 63d88a38..50dad389 100644 --- a/patches/REL_16_STABLE-pg_pathman-core.diff +++ b/patches/REL_16_STABLE-pg_pathman-core.diff @@ -11,7 +11,7 @@ index bbf220407b..9a82a2db04 100644 pg_stat_statements \ pg_surgery \ diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c -index 37c5e34cce..d4bad64db1 100644 +index 4a2ea4adba..7cadde5499 100644 --- a/src/backend/access/transam/xact.c +++ b/src/backend/access/transam/xact.c @@ -79,7 +79,7 @@ int DefaultXactIsoLevel = XACT_READ_COMMITTED; @@ -24,7 +24,7 @@ index 37c5e34cce..d4bad64db1 100644 bool DefaultXactDeferrable = false; bool XactDeferrable; diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c -index 851946a927..32758378c7 100644 +index 6b7997465d..5e9e878d3b 100644 --- a/src/backend/executor/execExprInterp.c +++ b/src/backend/executor/execExprInterp.c @@ -1845,6 +1845,16 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) @@ -117,7 +117,7 @@ index 4c5a7bbf62..7d638aa22d 100644 * ResultRelInfos needed by subplans are initialized from scratch when the * subplans themselves are initialized. diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c -index 5005d8c0d1..e664848393 100644 +index c84caeeaee..2a355607e9 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c @@ -660,6 +660,13 @@ ExecInitUpdateProjection(ModifyTableState *mtstate, @@ -134,7 +134,7 @@ index 5005d8c0d1..e664848393 100644 /* * ExecGetInsertNewTuple * This prepares a "new" tuple ready to be inserted into given result -@@ -3550,6 +3557,7 @@ ExecModifyTable(PlanState *pstate) +@@ -3570,6 +3577,7 @@ ExecModifyTable(PlanState *pstate) HeapTupleData oldtupdata; HeapTuple oldtuple; ItemPointer tupleid; @@ -142,7 +142,7 @@ index 5005d8c0d1..e664848393 100644 CHECK_FOR_INTERRUPTS(); -@@ -3591,6 +3599,8 @@ ExecModifyTable(PlanState *pstate) +@@ -3611,6 +3619,8 @@ ExecModifyTable(PlanState *pstate) context.mtstate = node; context.epqstate = &node->mt_epqstate; context.estate = estate; @@ -151,7 +151,7 @@ index 5005d8c0d1..e664848393 100644 /* * Fetch rows from subplan, and execute the required table modification -@@ -3598,6 +3608,14 @@ ExecModifyTable(PlanState *pstate) +@@ -3618,6 +3628,14 @@ ExecModifyTable(PlanState *pstate) */ for (;;) { @@ -166,7 +166,7 @@ index 5005d8c0d1..e664848393 100644 /* * Reset the per-output-tuple exprcontext. This is needed because * triggers expect to use that context as workspace. It's a bit ugly -@@ -3631,7 +3649,9 @@ ExecModifyTable(PlanState *pstate) +@@ -3651,7 +3669,9 @@ ExecModifyTable(PlanState *pstate) bool isNull; Oid resultoid; @@ -177,7 +177,7 @@ index 5005d8c0d1..e664848393 100644 &isNull); if (isNull) { -@@ -3668,6 +3688,8 @@ ExecModifyTable(PlanState *pstate) +@@ -3688,6 +3708,8 @@ ExecModifyTable(PlanState *pstate) if (resultRelInfo->ri_usesFdwDirectModify) { Assert(resultRelInfo->ri_projectReturning); @@ -186,7 +186,7 @@ index 5005d8c0d1..e664848393 100644 /* * A scan slot containing the data that was actually inserted, -@@ -3677,6 +3699,7 @@ ExecModifyTable(PlanState *pstate) +@@ -3697,6 +3719,7 @@ ExecModifyTable(PlanState *pstate) */ slot = ExecProcessReturning(resultRelInfo, NULL, context.planSlot); @@ -194,7 +194,7 @@ index 5005d8c0d1..e664848393 100644 return slot; } -@@ -3707,7 +3730,8 @@ ExecModifyTable(PlanState *pstate) +@@ -3727,7 +3750,8 @@ ExecModifyTable(PlanState *pstate) { /* ri_RowIdAttNo refers to a ctid attribute */ Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)); @@ -204,7 +204,7 @@ index 5005d8c0d1..e664848393 100644 resultRelInfo->ri_RowIdAttNo, &isNull); -@@ -3755,7 +3779,8 @@ ExecModifyTable(PlanState *pstate) +@@ -3775,7 +3799,8 @@ ExecModifyTable(PlanState *pstate) */ else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)) { @@ -214,7 +214,7 @@ index 5005d8c0d1..e664848393 100644 resultRelInfo->ri_RowIdAttNo, &isNull); /* shouldn't ever get a null result... */ -@@ -3786,9 +3811,12 @@ ExecModifyTable(PlanState *pstate) +@@ -3806,9 +3831,12 @@ ExecModifyTable(PlanState *pstate) /* Initialize projection info if first time for this table */ if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) ExecInitInsertProjection(node, resultRelInfo); @@ -230,7 +230,7 @@ index 5005d8c0d1..e664848393 100644 break; case CMD_UPDATE: -@@ -3796,6 +3824,13 @@ ExecModifyTable(PlanState *pstate) +@@ -3816,6 +3844,13 @@ ExecModifyTable(PlanState *pstate) if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) ExecInitUpdateProjection(node, resultRelInfo); @@ -244,7 +244,7 @@ index 5005d8c0d1..e664848393 100644 /* * Make the new tuple by combining plan's output tuple with * the old tuple being updated. -@@ -3819,14 +3854,19 @@ ExecModifyTable(PlanState *pstate) +@@ -3839,14 +3874,19 @@ ExecModifyTable(PlanState *pstate) slot = ExecGetUpdateNewTuple(resultRelInfo, context.planSlot, oldSlot); context.relaction = NULL; @@ -263,10 +263,10 @@ index 5005d8c0d1..e664848393 100644 + slot = ExecDelete(&context, estate->es_result_relation_info ? + estate->es_result_relation_info : resultRelInfo, + tupleid, oldtuple, - true, false, node->canSetTag, NULL, NULL); + true, false, node->canSetTag, NULL, NULL, NULL); break; -@@ -3844,7 +3884,10 @@ ExecModifyTable(PlanState *pstate) +@@ -3864,7 +3904,10 @@ ExecModifyTable(PlanState *pstate) * the work on next call. */ if (slot) @@ -277,7 +277,7 @@ index 5005d8c0d1..e664848393 100644 } /* -@@ -3860,6 +3903,7 @@ ExecModifyTable(PlanState *pstate) +@@ -3880,6 +3923,7 @@ ExecModifyTable(PlanState *pstate) node->mt_done = true; @@ -285,7 +285,7 @@ index 5005d8c0d1..e664848393 100644 return NULL; } -@@ -3934,6 +3978,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) +@@ -3954,6 +3998,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) ListCell *l; int i; Relation rel; @@ -293,7 +293,7 @@ index 5005d8c0d1..e664848393 100644 /* check for unsupported flags */ Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); -@@ -4035,6 +4080,13 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) +@@ -4056,6 +4101,13 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) i++; } @@ -307,7 +307,7 @@ index 5005d8c0d1..e664848393 100644 /* * Now we may initialize the subplan. */ -@@ -4117,6 +4169,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) +@@ -4138,6 +4190,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) } } @@ -375,7 +375,7 @@ index ac02247947..c39ae13a8e 100644 #endif /* EXECUTOR_H */ diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h -index cb714f4a19..d34a103fc6 100644 +index 869465d6f8..6bdde351d7 100644 --- a/src/include/nodes/execnodes.h +++ b/src/include/nodes/execnodes.h @@ -638,6 +638,12 @@ typedef struct EState @@ -428,7 +428,7 @@ index 05548d7c0a..37754370e0 100644 sub CopyIncludeFiles diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm -index 9e05eb91b1..baedbb784a 100644 +index 6a79a0e037..93696f53ae 100644 --- a/src/tools/msvc/Mkvcbuild.pm +++ b/src/tools/msvc/Mkvcbuild.pm @@ -40,7 +40,7 @@ my @contrib_uselibpq = (); @@ -440,7 +440,7 @@ index 9e05eb91b1..baedbb784a 100644 my $contrib_extrasource = {}; my @contrib_excludes = ( 'bool_plperl', 'commit_ts', -@@ -979,6 +979,7 @@ sub AddContrib +@@ -980,6 +980,7 @@ sub AddContrib my $dn = $1; my $proj = $solution->AddProject($dn, 'dll', 'contrib', "$subdir/$n"); $proj->AddReference($postgres); @@ -448,7 +448,7 @@ index 9e05eb91b1..baedbb784a 100644 AdjustContribProj($proj); push @projects, $proj; } -@@ -1082,6 +1083,22 @@ sub AddContrib +@@ -1083,6 +1084,22 @@ sub AddContrib return; } @@ -471,7 +471,7 @@ index 9e05eb91b1..baedbb784a 100644 sub GenerateContribSqlFiles { my $n = shift; -@@ -1106,23 +1123,59 @@ sub GenerateContribSqlFiles +@@ -1107,23 +1124,59 @@ sub GenerateContribSqlFiles substr($l, 0, index($l, '$(addsuffix ')) . substr($l, $i + 1); } From ba5c4c790074d5c923f6eb08ea46792c48a38f49 Mon Sep 17 00:00:00 2001 From: Svetlana Derevyanko Date: Thu, 28 Mar 2024 11:37:28 +0300 Subject: [PATCH 520/528] [PGPRO-9874] Added check on SearchSysCache returning NULL Tags: pg_pathman --- src/partition_creation.c | 3 ++- src/relation_info.c | 15 +++++++++++---- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index eb438b91..a0bdaa55 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -606,7 +606,8 @@ spawn_partitions_val(Oid parent_relid, /* parent's Oid */ /* Get typname of range_bound_type to perform cast */ typeTuple = SearchSysCache1(TYPEOID, ObjectIdGetDatum(range_bound_type)); - Assert(HeapTupleIsValid(typeTuple)); + if (!HeapTupleIsValid(typeTuple)) + elog(ERROR, "cache lookup failed for type %u", range_bound_type); typname = pstrdup(NameStr(((Form_pg_type) GETSTRUCT(typeTuple))->typname)); ReleaseSysCache(typeTuple); diff --git a/src/relation_info.c b/src/relation_info.c index e3ba540c..db75646f 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -1167,7 +1167,7 @@ invalidate_bounds_cache(void) /* * Get constraint expression tree of a partition. * - * build_check_constraint_name_internal() is used to build conname. + * build_check_constraint_name_relid_internal() is used to build conname. */ Expr * get_partition_constraint_expr(Oid partition, bool raise_error) @@ -1193,6 +1193,16 @@ get_partition_constraint_expr(Oid partition, bool raise_error) } con_tuple = SearchSysCache1(CONSTROID, ObjectIdGetDatum(conid)); + if (!HeapTupleIsValid(con_tuple)) + { + if (!raise_error) + return NULL; + + ereport(ERROR, + (errmsg("cache lookup failed for constraint \"%s\" of partition \"%s\"", + conname, get_rel_name_or_relid(partition)))); + } + conbin_datum = SysCacheGetAttr(CONSTROID, con_tuple, Anum_pg_constraint_conbin, &conbin_isnull); @@ -1204,9 +1214,6 @@ get_partition_constraint_expr(Oid partition, bool raise_error) ereport(ERROR, (errmsg("constraint \"%s\" of partition \"%s\" has NULL conbin", conname, get_rel_name_or_relid(partition)))); - pfree(conname); - - return NULL; /* could not parse */ } pfree(conname); From eab5f7d2b1bc952d7aa452fbe01d87861a84f731 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Wed, 3 Apr 2024 14:35:37 +0300 Subject: [PATCH 521/528] [PGPRO-9977] Fix after vanilla commit 5f2e179bd31e Tags: pg_pathman --- src/include/compat/pg_compat.h | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 5a12b528..2cc9e96d 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -119,7 +119,10 @@ /* * CheckValidResultRel() */ -#if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 170000 +#define CheckValidResultRelCompat(rri, cmd) \ + CheckValidResultRel((rri), (cmd), NIL) +#elif PG_VERSION_NUM >= 100000 #define CheckValidResultRelCompat(rri, cmd) \ CheckValidResultRel((rri), (cmd)) #elif PG_VERSION_NUM >= 90500 @@ -237,18 +240,6 @@ #endif -/* - * CheckValidResultRel() - */ -#if PG_VERSION_NUM >= 100000 -#define CheckValidResultRelCompat(rri, cmd) \ - CheckValidResultRel((rri), (cmd)) -#elif PG_VERSION_NUM >= 90500 -#define CheckValidResultRelCompat(rri, cmd) \ - CheckValidResultRel((rri)->ri_RelationDesc, (cmd)) -#endif - - /* * create_append_path() */ From 5376dfba1b459de1935982964b2ba94a03fdcd6b Mon Sep 17 00:00:00 2001 From: "Anton A. Melnikov" Date: Wed, 17 Apr 2024 10:47:46 +0300 Subject: [PATCH 522/528] PGPRO-9797: Temporary disable test pathman_upd_del.sql To be fixed in PGPRO-10100. Tags: joinsel --- Makefile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Makefile b/Makefile index f6780044..f32398da 100644 --- a/Makefile +++ b/Makefile @@ -64,6 +64,8 @@ REGRESS = pathman_array_qual \ pathman_utility_stmt \ pathman_views \ pathman_CVE-2020-14350 + +REGRESS := $(filter-out pathman_upd_del, $(REGRESS)) endif ISOLATION = insert_nodes for_update rollback_on_create_partitions From f03128e83bd959756c191f27307615f28a042545 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Mon, 20 May 2024 14:25:13 +0300 Subject: [PATCH 523/528] Pgindent fixes --- src/include/compat/pg_compat.h | 82 +++++++++++++++++----------------- 1 file changed, 42 insertions(+), 40 deletions(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 2cc9e96d..f6330627 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -65,11 +65,11 @@ */ #if PG_VERSION_NUM >= 110000 #define calc_nestloop_required_outer_compat(outer, inner) \ - calc_nestloop_required_outer((outer)->parent->relids, PATH_REQ_OUTER(outer), \ + calc_nestloop_required_outer((outer)->parent->relids, PATH_REQ_OUTER(outer), \ (inner)->parent->relids, PATH_REQ_OUTER(inner)) #else #define calc_nestloop_required_outer_compat(outer, inner) \ - calc_nestloop_required_outer((outer), (inner)) + calc_nestloop_required_outer((outer), (inner)) #endif @@ -120,14 +120,14 @@ * CheckValidResultRel() */ #if PG_VERSION_NUM >= 170000 -#define CheckValidResultRelCompat(rri, cmd) \ - CheckValidResultRel((rri), (cmd), NIL) +#define CheckValidResultRelCompat(rri, cmd) \ + CheckValidResultRel((rri), (cmd), NIL) #elif PG_VERSION_NUM >= 100000 -#define CheckValidResultRelCompat(rri, cmd) \ - CheckValidResultRel((rri), (cmd)) +#define CheckValidResultRelCompat(rri, cmd) \ + CheckValidResultRel((rri), (cmd)) #elif PG_VERSION_NUM >= 90500 -#define CheckValidResultRelCompat(rri, cmd) \ - CheckValidResultRel((rri)->ri_RelationDesc, (cmd)) +#define CheckValidResultRelCompat(rri, cmd) \ + CheckValidResultRel((rri)->ri_RelationDesc, (cmd)) #endif /* @@ -265,7 +265,7 @@ #define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ create_append_path(NULL, (rel), (subpaths), NIL, NIL, (required_outer), \ (parallel_workers), false, NIL, -1, false) -#endif /* PGPRO_VERSION */ +#endif /* PGPRO_VERSION */ #elif PG_VERSION_NUM >= 110000 @@ -277,7 +277,7 @@ #define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ create_append_path(NULL, (rel), (subpaths), NIL, (required_outer), \ (parallel_workers), false, NIL, -1, false, NIL) -#endif /* PGPRO_VERSION */ +#endif /* PGPRO_VERSION */ #elif PG_VERSION_NUM >= 100000 @@ -288,7 +288,7 @@ #define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ create_append_path((rel), (subpaths), (required_outer), (parallel_workers), NIL, \ false, NIL) -#endif /* PGPRO_VERSION */ +#endif /* PGPRO_VERSION */ #elif PG_VERSION_NUM >= 90600 @@ -299,12 +299,12 @@ #define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ create_append_path((rel), (subpaths), (required_outer), \ false, NIL, (parallel_workers)) -#endif /* PGPRO_VERSION */ +#endif /* PGPRO_VERSION */ #elif PG_VERSION_NUM >= 90500 #define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ create_append_path((rel), (subpaths), (required_outer)) -#endif /* PG_VERSION_NUM */ +#endif /* PG_VERSION_NUM */ /* @@ -414,8 +414,8 @@ extern void create_plain_partial_paths(PlannerInfo *root, static inline Datum ExecEvalExprCompat(ExprState *expr, ExprContext *econtext, bool *isnull) { - ExprDoneCond isdone; - Datum result = ExecEvalExpr(expr, econtext, isnull, &isdone); + ExprDoneCond isdone; + Datum result = ExecEvalExpr(expr, econtext, isnull, &isdone); if (isdone != ExprSingleResult) elog(ERROR, "expression should return single value"); @@ -432,9 +432,9 @@ ExecEvalExprCompat(ExprState *expr, ExprContext *econtext, bool *isnull) static inline bool ExecCheck(ExprState *state, ExprContext *econtext) { - Datum ret; - bool isnull; - MemoryContext old_mcxt; + Datum ret; + bool isnull; + MemoryContext old_mcxt; /* short-circuit (here and in ExecInitCheck) for empty restriction list */ if (state == NULL) @@ -530,7 +530,7 @@ extern List *get_all_actual_clauses(List *restrictinfo_list); * get_rel_persistence() */ #if PG_VERSION_NUM >= 90500 && PG_VERSION_NUM < 90600 -char get_rel_persistence(Oid relid); +char get_rel_persistence(Oid relid); #endif @@ -583,8 +583,8 @@ char get_rel_persistence(Oid relid); * make_restrictinfo() */ #if PG_VERSION_NUM >= 100000 -extern List * make_restrictinfos_from_actual_clauses(PlannerInfo *root, - List *clause_list); +extern List *make_restrictinfos_from_actual_clauses(PlannerInfo *root, + List *clause_list); #endif @@ -607,9 +607,9 @@ extern Result *make_result(List *tlist, * McxtStatsInternal() */ #if PG_VERSION_NUM >= 90600 -void McxtStatsInternal(MemoryContext context, int level, - bool examine_children, - MemoryContextCounters *totals); +void McxtStatsInternal(MemoryContext context, int level, + bool examine_children, + MemoryContextCounters *totals); #endif @@ -617,7 +617,7 @@ void McxtStatsInternal(MemoryContext context, int level, * oid_cmp() */ #if PG_VERSION_NUM >=90500 && PG_VERSION_NUM < 100000 -extern int oid_cmp(const void *p1, const void *p2); +extern int oid_cmp(const void *p1, const void *p2); #endif @@ -626,7 +626,7 @@ extern int oid_cmp(const void *p1, const void *p2); * * for v10 cast first arg to RawStmt type */ -#if PG_VERSION_NUM >= 150000 /* for commit 791b1b71da35 */ +#if PG_VERSION_NUM >= 150000 /* for commit 791b1b71da35 */ #define parse_analyze_compat(parse_tree, query_string, param_types, nparams, \ query_env) \ parse_analyze_fixedparams((RawStmt *) (parse_tree), (query_string), (param_types), \ @@ -649,7 +649,7 @@ extern int oid_cmp(const void *p1, const void *p2); * * for v10 cast first arg to RawStmt type */ -#if PG_VERSION_NUM >= 150000 /* for commit 791b1b71da35 */ +#if PG_VERSION_NUM >= 150000 /* for commit 791b1b71da35 */ #define pg_analyze_and_rewrite_compat(parsetree, query_string, param_types, \ nparams, query_env) \ pg_analyze_and_rewrite_fixedparams((RawStmt *) (parsetree), (query_string), \ @@ -722,7 +722,7 @@ extern int oid_cmp(const void *p1, const void *p2); * set_dummy_rel_pathlist() */ #if PG_VERSION_NUM >= 90500 && PG_VERSION_NUM < 90600 -void set_dummy_rel_pathlist(RelOptInfo *rel); +void set_dummy_rel_pathlist(RelOptInfo *rel); #endif @@ -744,8 +744,9 @@ extern void set_rel_consider_parallel(PlannerInfo *root, * in compat version the type of first argument is (Expr *) */ #if PG_VERSION_NUM >= 100000 -#if PG_VERSION_NUM >= 140000 /* function removed in 375398244168add84a884347625d14581a421e71 */ -extern TargetEntry *tlist_member_ignore_relabel(Expr * node, List * targetlist); +#if PG_VERSION_NUM >= 140000 /* function removed in + * 375398244168add84a884347625d14581a421e71 */ +extern TargetEntry *tlist_member_ignore_relabel(Expr *node, List *targetlist); #endif #define tlist_member_ignore_relabel_compat(expr, targetlist) \ tlist_member_ignore_relabel((expr), (targetlist)) @@ -775,7 +776,7 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, tupleid, fdw_trigtuple, newslot) \ ExecBRUpdateTriggers((estate), (epqstate), (relinfo), (tupleid), \ (fdw_trigtuple), (newslot), NULL, NULL) -#elif PG_VERSION_NUM >= 150000 /* for commit 7103ebb7aae8 */ +#elif PG_VERSION_NUM >= 150000 /* for commit 7103ebb7aae8 */ #define ExecBRUpdateTriggersCompat(estate, epqstate, relinfo, \ tupleid, fdw_trigtuple, newslot) \ ExecBRUpdateTriggers((estate), (epqstate), (relinfo), (tupleid), \ @@ -826,7 +827,7 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, /* * ExecARDeleteTriggers() */ -#if PG_VERSION_NUM >= 150000 /* for commit ba9a7e392171 */ +#if PG_VERSION_NUM >= 150000 /* for commit ba9a7e392171 */ #define ExecARDeleteTriggersCompat(estate, relinfo, tupleid, \ fdw_trigtuple, transition_capture) \ ExecARDeleteTriggers((estate), (relinfo), (tupleid), \ @@ -970,9 +971,9 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, * we need access to entire tuple, not just its header. */ #ifdef XID_IS_64BIT -# define HeapTupleGetXminCompat(htup) HeapTupleGetXmin(htup) +#define HeapTupleGetXminCompat(htup) HeapTupleGetXmin(htup) #else -# define HeapTupleGetXminCompat(htup) HeapTupleHeaderGetXmin((htup)->t_data) +#define HeapTupleGetXminCompat(htup) HeapTupleHeaderGetXmin((htup)->t_data) #endif /* @@ -1115,9 +1116,10 @@ static inline TupleTableSlot * ExecInitExtraTupleSlotCompatHorse(EState *s, TupleDesc t) { #if PG_VERSION_NUM >= 110000 - return ExecInitExtraTupleSlot(s,t); + return ExecInitExtraTupleSlot(s, t); #else - TupleTableSlot *res = ExecInitExtraTupleSlot(s); + TupleTableSlot *res = ExecInitExtraTupleSlot(s); + if (t) ExecSetSlotDescriptor(res, t); @@ -1149,7 +1151,7 @@ CustomEvalParamExternCompat(Param *param, return prm; } -void set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti); +void set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti); /* * lnext() @@ -1210,8 +1212,8 @@ void set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti); #define make_restrictinfo_compat(r, c, ipd, od, p, sl, rr, or, nr) make_restrictinfo((r), (c), (ipd), (od), (p), (sl), (rr), (or), (nr)) #else #define make_restrictinfo_compat(r, c, ipd, od, p, sl, rr, or, nr) make_restrictinfo((c), (ipd), (od), (p), (sl), (rr), (or), (nr)) -#endif /* #if PG_VERSION_NUM >= 140000 */ -#endif /* #if PG_VERSION_NUM >= 160000 */ +#endif /* #if PG_VERSION_NUM >= 140000 */ +#endif /* #if PG_VERSION_NUM >= 160000 */ /* * pull_varnos() @@ -1243,4 +1245,4 @@ void set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti); #define EvalPlanQualInit_compat(epqstate, parentestate, subplan, auxrowmarks, epqParam) EvalPlanQualInit(epqstate, parentestate, subplan, auxrowmarks, epqParam) #endif -#endif /* PG_COMPAT_H */ +#endif /* PG_COMPAT_H */ From d67141658ee6f8d7e7bf11f9c74fd1c14035445e Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Mon, 3 Jun 2024 22:28:56 +0300 Subject: [PATCH 524/528] [PGPRO-10286] Added error processing for some cases Tags: pg_pathman --- src/declarative.c | 4 ++++ src/init.c | 3 ++- src/partition_creation.c | 16 ++++++++++++++-- src/pathman_workers.c | 14 ++++++++++++-- src/pl_hash_funcs.c | 7 ++++++- 5 files changed, 38 insertions(+), 6 deletions(-) diff --git a/src/declarative.c b/src/declarative.c index 367df752..42e9ffac 100644 --- a/src/declarative.c +++ b/src/declarative.c @@ -237,6 +237,8 @@ handle_attach_partition(Oid parent_relid, AlterTableCmd *cmd) /* Fetch pg_pathman's schema */ pathman_schema = get_namespace_name(get_pathman_schema()); + if (pathman_schema == NULL) + elog(ERROR, "pg_pathman schema not initialized"); /* Build function's name */ proc_name = list_make2(makeString(pathman_schema), @@ -296,6 +298,8 @@ handle_detach_partition(AlterTableCmd *cmd) /* Fetch pg_pathman's schema */ pathman_schema = get_namespace_name(get_pathman_schema()); + if (pathman_schema == NULL) + elog(ERROR, "pg_pathman schema not initialized"); /* Build function's name */ proc_name = list_make2(makeString(pathman_schema), diff --git a/src/init.c b/src/init.c index 4341d406..1907d9dc 100644 --- a/src/init.c +++ b/src/init.c @@ -273,7 +273,8 @@ static bool init_pathman_relation_oids(void) { Oid schema = get_pathman_schema(); - Assert(schema != InvalidOid); + if (schema == InvalidOid) + return false; /* extension can be dropped by another backend */ /* Cache PATHMAN_CONFIG relation's Oid */ pathman_config_relid = get_relname_relid(PATHMAN_CONFIG, schema); diff --git a/src/partition_creation.c b/src/partition_creation.c index a0bdaa55..d6080c85 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -585,6 +585,7 @@ spawn_partitions_val(Oid parent_relid, /* parent's Oid */ Oid parent_nsp = get_rel_namespace(parent_relid); char *parent_nsp_name = get_namespace_name(parent_nsp); char *partition_name = choose_range_partition_name(parent_relid, parent_nsp); + char *pathman_schema; /* Assign the 'following' boundary to current 'leading' value */ cur_following_bound = cur_leading_bound; @@ -611,10 +612,14 @@ spawn_partitions_val(Oid parent_relid, /* parent's Oid */ typname = pstrdup(NameStr(((Form_pg_type) GETSTRUCT(typeTuple))->typname)); ReleaseSysCache(typeTuple); + pathman_schema = get_namespace_name(get_pathman_schema()); + if (pathman_schema == NULL) + elog(ERROR, "pg_pathman schema not initialized"); + /* Construct call to create_single_range_partition() */ create_sql = psprintf( "select %s.create_single_range_partition('%s.%s'::regclass, '%s'::%s, '%s'::%s, '%s.%s', NULL::text)", - quote_identifier(get_namespace_name(get_pathman_schema())), + quote_identifier(pathman_schema), quote_identifier(parent_nsp_name), quote_identifier(get_rel_name(parent_relid)), IsInfinite(&bounds[0]) ? "NULL" : datum_to_cstring(bounds[0].value, range_bound_type), @@ -1195,6 +1200,8 @@ copy_foreign_keys(Oid parent_relid, Oid partition_oid) /* Fetch pg_pathman's schema */ pathman_schema = get_namespace_name(get_pathman_schema()); + if (pathman_schema == NULL) + elog(ERROR, "pg_pathman schema not initialized"); /* Build function's name */ copy_fkeys_proc_name = list_make2(makeString(pathman_schema), @@ -1564,6 +1571,7 @@ build_raw_hash_check_tree(Node *raw_expression, Oid hash_proc; TypeCacheEntry *tce; + char *pathman_schema; tce = lookup_type_cache(value_type, TYPECACHE_HASH_PROC); hash_proc = tce->hash_proc; @@ -1596,9 +1604,13 @@ build_raw_hash_check_tree(Node *raw_expression, hash_call->over = NULL; hash_call->location = -1; + pathman_schema = get_namespace_name(get_pathman_schema()); + if (pathman_schema == NULL) + elog(ERROR, "pg_pathman schema not initialized"); + /* Build schema-qualified name of function get_hash_part_idx() */ get_hash_part_idx_proc = - list_make2(makeString(get_namespace_name(get_pathman_schema())), + list_make2(makeString(pathman_schema), makeString("get_hash_part_idx")); /* Call get_hash_part_idx() */ diff --git a/src/pathman_workers.c b/src/pathman_workers.c index 3eb82ab7..bf23bd94 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -520,6 +520,11 @@ bgw_main_concurrent_part(Datum main_arg) if (sql == NULL) { MemoryContext current_mcxt; + char *pathman_schema; + + pathman_schema = get_namespace_name(get_pathman_schema()); + if (pathman_schema == NULL) + elog(ERROR, "pg_pathman schema not initialized"); /* * Allocate SQL query in TopPathmanContext because current @@ -527,7 +532,7 @@ bgw_main_concurrent_part(Datum main_arg) */ current_mcxt = MemoryContextSwitchTo(TopPathmanContext); sql = psprintf("SELECT %s._partition_data_concurrent($1::regclass, NULL::text, NULL::text, p_limit:=$2)", - get_namespace_name(get_pathman_schema())); + pathman_schema); MemoryContextSwitchTo(current_mcxt); } @@ -700,6 +705,7 @@ partition_table_concurrently(PG_FUNCTION_ARGS) i; TransactionId rel_xmin; LOCKMODE lockmode = ShareUpdateExclusiveLock; + char *pathman_schema; /* Check batch_size */ if (batch_size < 1 || batch_size > 10000) @@ -800,11 +806,15 @@ partition_table_concurrently(PG_FUNCTION_ARGS) start_bgworker_errmsg(concurrent_part_bgw); } + pathman_schema = get_namespace_name(get_pathman_schema()); + if (pathman_schema == NULL) + elog(ERROR, "pg_pathman schema not initialized"); + /* Tell user everything's fine */ elog(NOTICE, "worker started, you can stop it " "with the following command: select %s.%s('%s');", - get_namespace_name(get_pathman_schema()), + pathman_schema, CppAsString(stop_concurrent_part_task), get_rel_name(relid)); diff --git a/src/pl_hash_funcs.c b/src/pl_hash_funcs.c index ddaaa8c0..4b08c324 100644 --- a/src/pl_hash_funcs.c +++ b/src/pl_hash_funcs.c @@ -122,6 +122,7 @@ build_hash_condition(PG_FUNCTION_ARGS) char *expr_cstr = TextDatumGetCString(PG_GETARG_DATUM(1)); uint32 part_count = PG_GETARG_UINT32(2), part_idx = PG_GETARG_UINT32(3); + char *pathman_schema; TypeCacheEntry *tce; @@ -141,9 +142,13 @@ build_hash_condition(PG_FUNCTION_ARGS) errmsg("no hash function for type %s", format_type_be(expr_type)))); + pathman_schema = get_namespace_name(get_pathman_schema()); + if (pathman_schema == NULL) + elog(ERROR, "pg_pathman schema not initialized"); + /* Create hash condition CSTRING */ result = psprintf("%s.get_hash_part_idx(%s(%s), %u) = %u", - get_namespace_name(get_pathman_schema()), + pathman_schema, get_func_name(tce->hash_proc), expr_cstr, part_count, From 92b69d85b777d6a9f9246017e77caeed532396c7 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Mon, 3 Jun 2024 23:22:14 +0300 Subject: [PATCH 525/528] Replaced deprecated python LooseVersion function --- tests/python/partitioning_test.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 152b8b19..ba4b205f 100644 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -20,7 +20,7 @@ import time import unittest -from distutils.version import LooseVersion +from packaging.version import Version from testgres import get_new_node, get_pg_version, configure_testgres # set setup base logging config, it can be turned on by `use_python_logging` @@ -58,7 +58,7 @@ } logging.config.dictConfig(LOG_CONFIG) -version = LooseVersion(get_pg_version()) +version = Version(get_pg_version()) # Helper function for json equality @@ -448,7 +448,7 @@ def test_parallel_nodes(self): # Check version of postgres server # If version < 9.6 skip all tests for parallel queries - if version < LooseVersion('9.6.0'): + if version < Version('9.6.0'): return # Prepare test database @@ -485,7 +485,7 @@ def test_parallel_nodes(self): # Test parallel select with node.connect() as con: con.execute('set max_parallel_workers_per_gather = 2') - if version >= LooseVersion('10'): + if version >= Version('10'): con.execute('set min_parallel_table_scan_size = 0') else: con.execute('set min_parallel_relation_size = 0') @@ -1045,7 +1045,7 @@ def test_update_node_plan1(self): self.assertEqual(len(plan["Target Tables"]), 11) # Plan was seriously changed in vanilla since v14 - if version < LooseVersion('14'): + if version < Version('14'): expected_format = ''' { "Plans": [ From afbec7faa3b8c86a6ad4ab386a7abe6b43027d9c Mon Sep 17 00:00:00 2001 From: Ekaterina Sokolova Date: Tue, 18 Jun 2024 11:48:15 +0300 Subject: [PATCH 526/528] Update pg_pathman due to vanilla PostgreSQL. 1. Fix regression output due to fd0398fcb099. Changed tests: pathman_only and pathman_rowmarks. 2. Fix code due to commit d20d8fbd3e4d. 3. Fix comments in test files due to alternate outputs. --- expected/pathman_only.out | 26 +- expected/pathman_only_1.out | 26 +- expected/pathman_only_2.out | 26 +- expected/pathman_only_3.out | 26 +- expected/pathman_only_4.out | 299 +++++++++++++++++++++++ expected/pathman_rowmarks.out | 27 ++- expected/pathman_rowmarks_1.out | 27 ++- expected/pathman_rowmarks_2.out | 27 ++- expected/pathman_rowmarks_3.out | 27 ++- expected/pathman_rowmarks_4.out | 407 ++++++++++++++++++++++++++++++++ sql/pathman_only.sql | 26 +- sql/pathman_rowmarks.sql | 27 ++- src/pl_funcs.c | 11 + src/relation_info.c | 3 +- 14 files changed, 939 insertions(+), 46 deletions(-) create mode 100644 expected/pathman_only_4.out create mode 100644 expected/pathman_rowmarks_4.out diff --git a/expected/pathman_only.out b/expected/pathman_only.out index 1b9f6a6b..f44f2256 100644 --- a/expected/pathman_only.out +++ b/expected/pathman_only.out @@ -3,13 +3,31 @@ * NOTE: This test behaves differenly on PgPro * --------------------------------------------- * - * Since 12 (608b167f9f), CTEs which are scanned once are no longer an - * optimization fence, which changes practically all plans here. There is + * -------------------- + * pathman_only_1.sql + * -------------------- + * Since 608b167f9f in PostgreSQL 12, CTEs which are scanned once are no longer + * an optimization fence, which changes practically all plans here. There is * an option to forcibly make them MATERIALIZED, but we also need to run tests * on older versions, so create pathman_only_1.out instead. * - * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, - * now it includes aliases for inherited tables. + * -------------------- + * pathman_only_2.sql + * -------------------- + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13, output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * -------------------- + * pathman_only_3.sql + * -------------------- + * Since a5fc46414de in PostgreSQL 16, the order of the operands was changed, + * which affected the output of the "Prune by" in EXPLAIN. + * + * -------------------- + * pathman_only_4.sql + * -------------------- + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_only_1.out b/expected/pathman_only_1.out index b92a8eaf..ce6fd127 100644 --- a/expected/pathman_only_1.out +++ b/expected/pathman_only_1.out @@ -3,13 +3,31 @@ * NOTE: This test behaves differenly on PgPro * --------------------------------------------- * - * Since 12 (608b167f9f), CTEs which are scanned once are no longer an - * optimization fence, which changes practically all plans here. There is + * -------------------- + * pathman_only_1.sql + * -------------------- + * Since 608b167f9f in PostgreSQL 12, CTEs which are scanned once are no longer + * an optimization fence, which changes practically all plans here. There is * an option to forcibly make them MATERIALIZED, but we also need to run tests * on older versions, so create pathman_only_1.out instead. * - * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, - * now it includes aliases for inherited tables. + * -------------------- + * pathman_only_2.sql + * -------------------- + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13, output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * -------------------- + * pathman_only_3.sql + * -------------------- + * Since a5fc46414de in PostgreSQL 16, the order of the operands was changed, + * which affected the output of the "Prune by" in EXPLAIN. + * + * -------------------- + * pathman_only_4.sql + * -------------------- + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_only_2.out b/expected/pathman_only_2.out index c37dd5f4..6aeadb76 100644 --- a/expected/pathman_only_2.out +++ b/expected/pathman_only_2.out @@ -3,13 +3,31 @@ * NOTE: This test behaves differenly on PgPro * --------------------------------------------- * - * Since 12 (608b167f9f), CTEs which are scanned once are no longer an - * optimization fence, which changes practically all plans here. There is + * -------------------- + * pathman_only_1.sql + * -------------------- + * Since 608b167f9f in PostgreSQL 12, CTEs which are scanned once are no longer + * an optimization fence, which changes practically all plans here. There is * an option to forcibly make them MATERIALIZED, but we also need to run tests * on older versions, so create pathman_only_1.out instead. * - * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, - * now it includes aliases for inherited tables. + * -------------------- + * pathman_only_2.sql + * -------------------- + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13, output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * -------------------- + * pathman_only_3.sql + * -------------------- + * Since a5fc46414de in PostgreSQL 16, the order of the operands was changed, + * which affected the output of the "Prune by" in EXPLAIN. + * + * -------------------- + * pathman_only_4.sql + * -------------------- + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_only_3.out b/expected/pathman_only_3.out index 2f2fcc75..1999309d 100644 --- a/expected/pathman_only_3.out +++ b/expected/pathman_only_3.out @@ -3,13 +3,31 @@ * NOTE: This test behaves differenly on PgPro * --------------------------------------------- * - * Since 12 (608b167f9f), CTEs which are scanned once are no longer an - * optimization fence, which changes practically all plans here. There is + * -------------------- + * pathman_only_1.sql + * -------------------- + * Since 608b167f9f in PostgreSQL 12, CTEs which are scanned once are no longer + * an optimization fence, which changes practically all plans here. There is * an option to forcibly make them MATERIALIZED, but we also need to run tests * on older versions, so create pathman_only_1.out instead. * - * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, - * now it includes aliases for inherited tables. + * -------------------- + * pathman_only_2.sql + * -------------------- + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13, output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * -------------------- + * pathman_only_3.sql + * -------------------- + * Since a5fc46414de in PostgreSQL 16, the order of the operands was changed, + * which affected the output of the "Prune by" in EXPLAIN. + * + * -------------------- + * pathman_only_4.sql + * -------------------- + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_only_4.out b/expected/pathman_only_4.out new file mode 100644 index 00000000..fbcc397c --- /dev/null +++ b/expected/pathman_only_4.out @@ -0,0 +1,299 @@ +/* + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- + * + * -------------------- + * pathman_only_1.sql + * -------------------- + * Since 608b167f9f in PostgreSQL 12, CTEs which are scanned once are no longer + * an optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_only_1.out instead. + * + * -------------------- + * pathman_only_2.sql + * -------------------- + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13, output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * -------------------- + * pathman_only_3.sql + * -------------------- + * Since a5fc46414de in PostgreSQL 16, the order of the operands was changed, + * which affected the output of the "Prune by" in EXPLAIN. + * + * -------------------- + * pathman_only_4.sql + * -------------------- + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_only; +/* Test special case: ONLY statement with not-ONLY for partitioned table */ +CREATE TABLE test_only.from_only_test(val INT NOT NULL); +INSERT INTO test_only.from_only_test SELECT generate_series(1, 20); +SELECT create_range_partitions('test_only.from_only_test', 'val', 1, 2); + create_range_partitions +------------------------- + 10 +(1 row) + +VACUUM ANALYZE; +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test; + QUERY PLAN +------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Seq Scan on from_only_test + -> Append + -> Seq Scan on from_only_test_1 from_only_test_2 + -> Seq Scan on from_only_test_2 from_only_test_3 + -> Seq Scan on from_only_test_3 from_only_test_4 + -> Seq Scan on from_only_test_4 from_only_test_5 + -> Seq Scan on from_only_test_5 from_only_test_6 + -> Seq Scan on from_only_test_6 from_only_test_7 + -> Seq Scan on from_only_test_7 from_only_test_8 + -> Seq Scan on from_only_test_8 from_only_test_9 + -> Seq Scan on from_only_test_9 from_only_test_10 + -> Seq Scan on from_only_test_10 from_only_test_11 +(15 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +UNION SELECT * FROM ONLY test_only.from_only_test; + QUERY PLAN +---------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Seq Scan on from_only_test from_only_test_11 +(15 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test +UNION SELECT * FROM ONLY test_only.from_only_test; + QUERY PLAN +------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Append + -> Seq Scan on from_only_test_1 from_only_test_12 + -> Seq Scan on from_only_test_2 from_only_test_13 + -> Seq Scan on from_only_test_3 from_only_test_14 + -> Seq Scan on from_only_test_4 from_only_test_15 + -> Seq Scan on from_only_test_5 from_only_test_16 + -> Seq Scan on from_only_test_6 from_only_test_17 + -> Seq Scan on from_only_test_7 from_only_test_18 + -> Seq Scan on from_only_test_8 from_only_test_19 + -> Seq Scan on from_only_test_9 from_only_test_20 + -> Seq Scan on from_only_test_10 from_only_test_21 + -> Seq Scan on from_only_test from_only_test_22 +(26 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test; + QUERY PLAN +------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Seq Scan on from_only_test + -> Append + -> Seq Scan on from_only_test_1 from_only_test_2 + -> Seq Scan on from_only_test_2 from_only_test_3 + -> Seq Scan on from_only_test_3 from_only_test_4 + -> Seq Scan on from_only_test_4 from_only_test_5 + -> Seq Scan on from_only_test_5 from_only_test_6 + -> Seq Scan on from_only_test_6 from_only_test_7 + -> Seq Scan on from_only_test_7 from_only_test_8 + -> Seq Scan on from_only_test_8 from_only_test_9 + -> Seq Scan on from_only_test_9 from_only_test_10 + -> Seq Scan on from_only_test_10 from_only_test_11 + -> Append + -> Seq Scan on from_only_test_1 from_only_test_13 + -> Seq Scan on from_only_test_2 from_only_test_14 + -> Seq Scan on from_only_test_3 from_only_test_15 + -> Seq Scan on from_only_test_4 from_only_test_16 + -> Seq Scan on from_only_test_5 from_only_test_17 + -> Seq Scan on from_only_test_6 from_only_test_18 + -> Seq Scan on from_only_test_7 from_only_test_19 + -> Seq Scan on from_only_test_8 from_only_test_20 + -> Seq Scan on from_only_test_9 from_only_test_21 + -> Seq Scan on from_only_test_10 from_only_test_22 +(26 rows) + +/* not ok, ONLY|non-ONLY in one query (this is not the case for PgPro) */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test a +JOIN ONLY test_only.from_only_test b USING(val); + QUERY PLAN +--------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test b + -> Custom Scan (RuntimeAppend) + Prune by: (a.val = b.val) + -> Seq Scan on from_only_test_1 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_2 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_3 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_4 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_5 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_6 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_7 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_8 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_9 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_10 a + Filter: (b.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM test_only.from_only_test), + q2 AS (SELECT * FROM ONLY test_only.from_only_test) +SELECT * FROM q1 JOIN q2 USING(val); + QUERY PLAN +--------------------------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test from_only_test_1 + -> Custom Scan (RuntimeAppend) + Prune by: (from_only_test.val = from_only_test_1.val) + -> Seq Scan on from_only_test_1 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (from_only_test_1.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM ONLY test_only.from_only_test) +SELECT * FROM test_only.from_only_test JOIN q1 USING(val); + QUERY PLAN +--------------------------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test from_only_test_1 + -> Custom Scan (RuntimeAppend) + Prune by: (from_only_test.val = from_only_test_1.val) + -> Seq Scan on from_only_test_1 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (from_only_test_1.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +WHERE val = (SELECT val FROM ONLY test_only.from_only_test + ORDER BY val ASC + LIMIT 1); + QUERY PLAN +----------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (from_only_test.val = (InitPlan 1).col1) + InitPlan 1 + -> Limit + -> Sort + Sort Key: from_only_test_1.val + -> Seq Scan on from_only_test from_only_test_1 + -> Seq Scan on from_only_test_1 from_only_test + Filter: (val = (InitPlan 1).col1) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (val = (InitPlan 1).col1) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (val = (InitPlan 1).col1) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (val = (InitPlan 1).col1) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (val = (InitPlan 1).col1) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (val = (InitPlan 1).col1) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (val = (InitPlan 1).col1) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (val = (InitPlan 1).col1) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (val = (InitPlan 1).col1) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (val = (InitPlan 1).col1) +(27 rows) + +DROP TABLE test_only.from_only_test CASCADE; +NOTICE: drop cascades to 11 other objects +DROP SCHEMA test_only; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_rowmarks.out b/expected/pathman_rowmarks.out index ea047c9e..6d4611ee 100644 --- a/expected/pathman_rowmarks.out +++ b/expected/pathman_rowmarks.out @@ -1,13 +1,30 @@ /* * ------------------------------------------- - * NOTE: This test behaves differenly on 9.5 + * NOTE: This test behaves differenly on PgPro * ------------------------------------------- * - * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, - * causing different output; pathman_rowmarks_2.out is the updated version. + * ------------------------ + * pathman_rowmarks_1.sql + * ------------------------ + * Since PostgreSQL 9.5, output of EXPLAIN was changed. * - * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, - * now it includes aliases for inherited tables. + * ------------------------ + * pathman_rowmarks_2.sql + * ------------------------ + * Since 8edd0e794 in PostgreSQL 12, append nodes with single subplan are + * eliminated, causing different output. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13 output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. */ SET search_path = 'public'; CREATE EXTENSION pg_pathman; diff --git a/expected/pathman_rowmarks_1.out b/expected/pathman_rowmarks_1.out index 256b8637..063fca8d 100644 --- a/expected/pathman_rowmarks_1.out +++ b/expected/pathman_rowmarks_1.out @@ -1,13 +1,30 @@ /* * ------------------------------------------- - * NOTE: This test behaves differenly on 9.5 + * NOTE: This test behaves differenly on PgPro * ------------------------------------------- * - * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, - * causing different output; pathman_rowmarks_2.out is the updated version. + * ------------------------ + * pathman_rowmarks_1.sql + * ------------------------ + * Since PostgreSQL 9.5, output of EXPLAIN was changed. * - * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, - * now it includes aliases for inherited tables. + * ------------------------ + * pathman_rowmarks_2.sql + * ------------------------ + * Since 8edd0e794 in PostgreSQL 12, append nodes with single subplan are + * eliminated, causing different output. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13 output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. */ SET search_path = 'public'; CREATE EXTENSION pg_pathman; diff --git a/expected/pathman_rowmarks_2.out b/expected/pathman_rowmarks_2.out index 06fb88ac..91d7804e 100644 --- a/expected/pathman_rowmarks_2.out +++ b/expected/pathman_rowmarks_2.out @@ -1,13 +1,30 @@ /* * ------------------------------------------- - * NOTE: This test behaves differenly on 9.5 + * NOTE: This test behaves differenly on PgPro * ------------------------------------------- * - * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, - * causing different output; pathman_rowmarks_2.out is the updated version. + * ------------------------ + * pathman_rowmarks_1.sql + * ------------------------ + * Since PostgreSQL 9.5, output of EXPLAIN was changed. * - * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, - * now it includes aliases for inherited tables. + * ------------------------ + * pathman_rowmarks_2.sql + * ------------------------ + * Since 8edd0e794 in PostgreSQL 12, append nodes with single subplan are + * eliminated, causing different output. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13 output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. */ SET search_path = 'public'; CREATE EXTENSION pg_pathman; diff --git a/expected/pathman_rowmarks_3.out b/expected/pathman_rowmarks_3.out index af61e5f7..e8644292 100644 --- a/expected/pathman_rowmarks_3.out +++ b/expected/pathman_rowmarks_3.out @@ -1,13 +1,30 @@ /* * ------------------------------------------- - * NOTE: This test behaves differenly on 9.5 + * NOTE: This test behaves differenly on PgPro * ------------------------------------------- * - * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, - * causing different output; pathman_rowmarks_2.out is the updated version. + * ------------------------ + * pathman_rowmarks_1.sql + * ------------------------ + * Since PostgreSQL 9.5, output of EXPLAIN was changed. * - * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, - * now it includes aliases for inherited tables. + * ------------------------ + * pathman_rowmarks_2.sql + * ------------------------ + * Since 8edd0e794 in PostgreSQL 12, append nodes with single subplan are + * eliminated, causing different output. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13 output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. */ SET search_path = 'public'; CREATE EXTENSION pg_pathman; diff --git a/expected/pathman_rowmarks_4.out b/expected/pathman_rowmarks_4.out new file mode 100644 index 00000000..5fbec84d --- /dev/null +++ b/expected/pathman_rowmarks_4.out @@ -0,0 +1,407 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * ------------------------------------------- + * + * ------------------------ + * pathman_rowmarks_1.sql + * ------------------------ + * Since PostgreSQL 9.5, output of EXPLAIN was changed. + * + * ------------------------ + * pathman_rowmarks_2.sql + * ------------------------ + * Since 8edd0e794 in PostgreSQL 12, append nodes with single subplan are + * eliminated, causing different output. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13 output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. + */ +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA rowmarks; +CREATE TABLE rowmarks.first(id int NOT NULL); +CREATE TABLE rowmarks.second(id int NOT NULL); +INSERT INTO rowmarks.first SELECT generate_series(1, 10); +INSERT INTO rowmarks.second SELECT generate_series(1, 10); +SELECT create_hash_partitions('rowmarks.first', 'id', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +VACUUM ANALYZE; +/* Not partitioned */ +SELECT * FROM rowmarks.second ORDER BY id FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +/* Simple case (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first ORDER BY id FOR UPDATE; + QUERY PLAN +----------------------------------------------- + LockRows + -> Sort + Sort Key: first.id + -> Append + -> Seq Scan on first_0 first_1 + -> Seq Scan on first_1 first_2 + -> Seq Scan on first_2 first_3 + -> Seq Scan on first_3 first_4 + -> Seq Scan on first_4 first_5 +(9 rows) + +/* Simple case (execution) */ +SELECT * FROM rowmarks.first ORDER BY id FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +SELECT FROM rowmarks.first ORDER BY id FOR UPDATE; +-- +(10 rows) + +SELECT tableoid > 0 FROM rowmarks.first ORDER BY id FOR UPDATE; + ?column? +---------- + t + t + t + t + t + t + t + t + t + t +(10 rows) + +/* A little harder (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.first + ORDER BY id + OFFSET 10 LIMIT 1 + FOR UPDATE) +FOR SHARE; + QUERY PLAN +------------------------------------------------------------- + LockRows + InitPlan 1 + -> Limit + -> LockRows + -> Sort + Sort Key: first_1.id + -> Append + -> Seq Scan on first_0 first_2 + -> Seq Scan on first_1 first_3 + -> Seq Scan on first_2 first_4 + -> Seq Scan on first_3 first_5 + -> Seq Scan on first_4 first_6 + -> Custom Scan (RuntimeAppend) + Prune by: (first.id = (InitPlan 1).col1) + -> Seq Scan on first_0 first + Filter: (id = (InitPlan 1).col1) + -> Seq Scan on first_1 first + Filter: (id = (InitPlan 1).col1) + -> Seq Scan on first_2 first + Filter: (id = (InitPlan 1).col1) + -> Seq Scan on first_3 first + Filter: (id = (InitPlan 1).col1) + -> Seq Scan on first_4 first + Filter: (id = (InitPlan 1).col1) +(24 rows) + +/* A little harder (execution) */ +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.first + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + id +---- + 6 +(1 row) + +/* Two tables (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.second + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + QUERY PLAN +-------------------------------------------------- + LockRows + InitPlan 1 + -> Limit + -> LockRows + -> Sort + Sort Key: second.id + -> Seq Scan on second + -> Custom Scan (RuntimeAppend) + Prune by: (first.id = (InitPlan 1).col1) + -> Seq Scan on first_0 first + Filter: (id = (InitPlan 1).col1) + -> Seq Scan on first_1 first + Filter: (id = (InitPlan 1).col1) + -> Seq Scan on first_2 first + Filter: (id = (InitPlan 1).col1) + -> Seq Scan on first_3 first + Filter: (id = (InitPlan 1).col1) + -> Seq Scan on first_4 first + Filter: (id = (InitPlan 1).col1) +(19 rows) + +/* Two tables (execution) */ +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.second + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + id +---- + 6 +(1 row) + +/* JOIN (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + QUERY PLAN +----------------------------------------------------- + LockRows + -> Sort + Sort Key: first.id + -> Hash Join + Hash Cond: (first.id = second.id) + -> Append + -> Seq Scan on first_0 first_1 + -> Seq Scan on first_1 first_2 + -> Seq Scan on first_2 first_3 + -> Seq Scan on first_3 first_4 + -> Seq Scan on first_4 first_5 + -> Hash + -> Seq Scan on second +(13 rows) + +/* JOIN (execution) */ +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +/* ONLY (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY rowmarks.first FOR SHARE; + QUERY PLAN +------------------------- + LockRows + -> Seq Scan on first +(2 rows) + +/* ONLY (execution) */ +SELECT * FROM ONLY rowmarks.first FOR SHARE; + id +---- +(0 rows) + +/* Check updates (plan) */ +SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ +SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); + QUERY PLAN +--------------------------------------- + Update on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Seq Scan on first_0 first + Filter: (id = 1) +(6 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); + QUERY PLAN +----------------------------------------------------- + Update on second + -> Nested Loop Semi Join + Join Filter: (second.id = first.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 first_1 + Filter: (id < 1) + -> Seq Scan on first_1 first_2 + Filter: (id < 1) + -> Seq Scan on first_2 first_3 + Filter: (id < 1) + -> Seq Scan on first_3 first_4 + Filter: (id < 1) + -> Seq Scan on first_4 first_5 + Filter: (id < 1) +(16 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); + QUERY PLAN +----------------------------------------------------- + Update on second + -> Nested Loop Semi Join + Join Filter: (second.id = first.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 first_1 + Filter: (id = 1) + -> Seq Scan on first_1 first_2 + Filter: (id = 2) +(10 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1) +RETURNING *, tableoid::regclass; + QUERY PLAN +--------------------------------------- + Update on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Seq Scan on first_0 first + Filter: (id = 1) +(6 rows) + +SET enable_hashjoin = t; +SET enable_mergejoin = t; +/* Check updates (execution) */ +UPDATE rowmarks.second SET id = 1 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2) +RETURNING *, tableoid::regclass; + id | tableoid +----+----------------- + 1 | rowmarks.second + 1 | rowmarks.second +(2 rows) + +/* Check deletes (plan) */ +SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ +SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); + QUERY PLAN +--------------------------------------- + Delete on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Seq Scan on first_0 first + Filter: (id = 1) +(6 rows) + +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); + QUERY PLAN +----------------------------------------------------- + Delete on second + -> Nested Loop Semi Join + Join Filter: (second.id = first.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 first_1 + Filter: (id < 1) + -> Seq Scan on first_1 first_2 + Filter: (id < 1) + -> Seq Scan on first_2 first_3 + Filter: (id < 1) + -> Seq Scan on first_3 first_4 + Filter: (id < 1) + -> Seq Scan on first_4 first_5 + Filter: (id < 1) +(16 rows) + +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); + QUERY PLAN +----------------------------------------------------- + Delete on second + -> Nested Loop Semi Join + Join Filter: (second.id = first.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 first_1 + Filter: (id = 1) + -> Seq Scan on first_1 first_2 + Filter: (id = 2) +(10 rows) + +SET enable_hashjoin = t; +SET enable_mergejoin = t; +DROP TABLE rowmarks.first CASCADE; +NOTICE: drop cascades to 5 other objects +DETAIL: drop cascades to table rowmarks.first_0 +drop cascades to table rowmarks.first_1 +drop cascades to table rowmarks.first_2 +drop cascades to table rowmarks.first_3 +drop cascades to table rowmarks.first_4 +DROP TABLE rowmarks.second CASCADE; +DROP SCHEMA rowmarks; +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_only.sql b/sql/pathman_only.sql index 88f4e88a..68dc4ca1 100644 --- a/sql/pathman_only.sql +++ b/sql/pathman_only.sql @@ -3,13 +3,31 @@ * NOTE: This test behaves differenly on PgPro * --------------------------------------------- * - * Since 12 (608b167f9f), CTEs which are scanned once are no longer an - * optimization fence, which changes practically all plans here. There is + * -------------------- + * pathman_only_1.sql + * -------------------- + * Since 608b167f9f in PostgreSQL 12, CTEs which are scanned once are no longer + * an optimization fence, which changes practically all plans here. There is * an option to forcibly make them MATERIALIZED, but we also need to run tests * on older versions, so create pathman_only_1.out instead. * - * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, - * now it includes aliases for inherited tables. + * -------------------- + * pathman_only_2.sql + * -------------------- + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13, output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * -------------------- + * pathman_only_3.sql + * -------------------- + * Since a5fc46414de in PostgreSQL 16, the order of the operands was changed, + * which affected the output of the "Prune by" in EXPLAIN. + * + * -------------------- + * pathman_only_4.sql + * -------------------- + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. */ \set VERBOSITY terse diff --git a/sql/pathman_rowmarks.sql b/sql/pathman_rowmarks.sql index bb7719ea..8847b80c 100644 --- a/sql/pathman_rowmarks.sql +++ b/sql/pathman_rowmarks.sql @@ -1,13 +1,30 @@ /* * ------------------------------------------- - * NOTE: This test behaves differenly on 9.5 + * NOTE: This test behaves differenly on PgPro * ------------------------------------------- * - * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, - * causing different output; pathman_rowmarks_2.out is the updated version. + * ------------------------ + * pathman_rowmarks_1.sql + * ------------------------ + * Since PostgreSQL 9.5, output of EXPLAIN was changed. * - * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, - * now it includes aliases for inherited tables. + * ------------------------ + * pathman_rowmarks_2.sql + * ------------------------ + * Since 8edd0e794 in PostgreSQL 12, append nodes with single subplan are + * eliminated, causing different output. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13 output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. */ SET search_path = 'public'; CREATE EXTENSION pg_pathman; diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 10538bea..75c1c12a 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -174,7 +174,12 @@ get_partition_cooked_key_pl(PG_FUNCTION_ARGS) expr_cstr = TextDatumGetCString(values[Anum_pathman_config_expr - 1]); expr = cook_partitioning_expression(relid, expr_cstr, NULL); + +#if PG_VERSION_NUM >= 170000 /* for commit d20d8fbd3e4d */ + cooked_cstr = nodeToStringWithLocations(expr); +#else cooked_cstr = nodeToString(expr); +#endif pfree(expr_cstr); pfree(expr); @@ -196,7 +201,13 @@ get_cached_partition_cooked_key_pl(PG_FUNCTION_ARGS) prel = get_pathman_relation_info(relid); shout_if_prel_is_invalid(relid, prel, PT_ANY); + +#if PG_VERSION_NUM >= 170000 /* for commit d20d8fbd3e4d */ + res = CStringGetTextDatum(nodeToStringWithLocations(prel->expr)); +#else res = CStringGetTextDatum(nodeToString(prel->expr)); +#endif + close_pathman_relation_info(prel); PG_RETURN_DATUM(res); diff --git a/src/relation_info.c b/src/relation_info.c index db75646f..2794a183 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -1491,7 +1491,8 @@ parse_partitioning_expression(const Oid relid, return ((ResTarget *) linitial(select_stmt->targetList))->val; } -/* Parse partitioning expression and return its type and nodeToString() as TEXT */ +/* Parse partitioning expression and return its type and nodeToString() + * (or nodeToStringWithLocations() in version 17 and higher) as TEXT */ Node * cook_partitioning_expression(const Oid relid, const char *expr_cstr, From a0025f4130261a200d2165db1809c022d570d05c Mon Sep 17 00:00:00 2001 From: Marina Polyakova Date: Tue, 6 Aug 2024 10:24:00 +0300 Subject: [PATCH 527/528] PGPRO-10100: Revert "PGPRO-9797: Temporary disable test pathman_upd_del.sql" This reverts commit 5376dfba1b459de1935982964b2ba94a03fdcd6b. --- Makefile | 2 -- 1 file changed, 2 deletions(-) diff --git a/Makefile b/Makefile index f32398da..f6780044 100644 --- a/Makefile +++ b/Makefile @@ -64,8 +64,6 @@ REGRESS = pathman_array_qual \ pathman_utility_stmt \ pathman_views \ pathman_CVE-2020-14350 - -REGRESS := $(filter-out pathman_upd_del, $(REGRESS)) endif ISOLATION = insert_nodes for_update rollback_on_create_partitions From 810d906815269135838a924942195d9470d3f2e6 Mon Sep 17 00:00:00 2001 From: Marina Polyakova Date: Fri, 1 Nov 2024 19:51:47 +0300 Subject: [PATCH 528/528] PGPRO-10245: fix pathman_upd_del test --- expected/pathman_upd_del.out | 3 + expected/pathman_upd_del_1.out | 3 + expected/pathman_upd_del_2.out | 3 + expected/pathman_upd_del_3.out | 3 + expected/pathman_upd_del_4.out | 464 +++++++++++++++++++++++++++++++++ sql/pathman_upd_del.sql | 3 + 6 files changed, 479 insertions(+) create mode 100644 expected/pathman_upd_del_4.out diff --git a/expected/pathman_upd_del.out b/expected/pathman_upd_del.out index 44bb34fc..752cff27 100644 --- a/expected/pathman_upd_del.out +++ b/expected/pathman_upd_del.out @@ -9,6 +9,9 @@ * plans here. There is an option to forcibly make them MATERIALIZED, but we * also need to run tests on older versions, so put updated plans in * pathman_upd_del_2.out instead. + * + * In Postgres Pro Standard/Enterprise 15+ the EXPLAIN output has changed so put + * the updated plan in pathman_upd_del_4.out. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_upd_del_1.out b/expected/pathman_upd_del_1.out index 0a7e91e9..6e0f312d 100644 --- a/expected/pathman_upd_del_1.out +++ b/expected/pathman_upd_del_1.out @@ -9,6 +9,9 @@ * plans here. There is an option to forcibly make them MATERIALIZED, but we * also need to run tests on older versions, so put updated plans in * pathman_upd_del_2.out instead. + * + * In Postgres Pro Standard/Enterprise 15+ the EXPLAIN output has changed so put + * the updated plan in pathman_upd_del_4.out. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_upd_del_2.out b/expected/pathman_upd_del_2.out index 80325d7e..0826594c 100644 --- a/expected/pathman_upd_del_2.out +++ b/expected/pathman_upd_del_2.out @@ -9,6 +9,9 @@ * plans here. There is an option to forcibly make them MATERIALIZED, but we * also need to run tests on older versions, so put updated plans in * pathman_upd_del_2.out instead. + * + * In Postgres Pro Standard/Enterprise 15+ the EXPLAIN output has changed so put + * the updated plan in pathman_upd_del_4.out. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_upd_del_3.out b/expected/pathman_upd_del_3.out index 70b41e7d..d11eb6f8 100644 --- a/expected/pathman_upd_del_3.out +++ b/expected/pathman_upd_del_3.out @@ -9,6 +9,9 @@ * plans here. There is an option to forcibly make them MATERIALIZED, but we * also need to run tests on older versions, so put updated plans in * pathman_upd_del_2.out instead. + * + * In Postgres Pro Standard/Enterprise 15+ the EXPLAIN output has changed so put + * the updated plan in pathman_upd_del_4.out. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_upd_del_4.out b/expected/pathman_upd_del_4.out new file mode 100644 index 00000000..54330190 --- /dev/null +++ b/expected/pathman_upd_del_4.out @@ -0,0 +1,464 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. Moreover, again since 12 (608b167f9f), CTEs which are + * scanned once are no longer an optimization fence, changing a good deal of + * plans here. There is an option to forcibly make them MATERIALIZED, but we + * also need to run tests on older versions, so put updated plans in + * pathman_upd_del_2.out instead. + * + * In Postgres Pro Standard/Enterprise 15+ the EXPLAIN output has changed so put + * the updated plan in pathman_upd_del_4.out. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +/* Temporary tables for JOINs */ +CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp VALUES (1, 1), (2, 2); +CREATE TABLE test.tmp2 (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp2 SELECT i % 10 + 1, i FROM generate_series(1, 100) i; +SELECT pathman.create_range_partitions('test.tmp2', 'id', 1, 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* Partition table by RANGE */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + value INTEGER); +INSERT INTO test.range_rel (dt, value) SELECT g, extract(day from g) +FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2010-01-01'::date, '1 month'::interval, + 12); + create_range_partitions +------------------------- + 12 +(1 row) + +VACUUM ANALYZE; +/* + * Test UPDATE and DELETE + */ +/* have partitions for this 'dt' */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Update on range_rel_6 + -> Seq Scan on range_rel_6 + Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) +(3 rows) + +BEGIN; +UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; + id | dt | value +-----+--------------------------+------- + 166 | Tue Jun 15 00:00:00 2010 | 111 +(1 row) + +ROLLBACK; +/* have partitions for this 'dt' */ +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt = '2010-06-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Delete on range_rel_6 + -> Seq Scan on range_rel_6 + Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) +(3 rows) + +BEGIN; +DELETE FROM test.range_rel WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* no partitions for this 'dt' */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 222 WHERE dt = '1990-01-01'; + QUERY PLAN +-------------------------------------------------------------------------------- + Update on range_rel + -> Seq Scan on range_rel + Filter: (dt = 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) +(3 rows) + +BEGIN; +UPDATE test.range_rel SET value = 111 WHERE dt = '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt = '1990-01-01'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* no partitions for this 'dt' */ +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt < '1990-01-01'; + QUERY PLAN +-------------------------------------------------------------------------------- + Delete on range_rel + -> Seq Scan on range_rel + Filter: (dt < 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) +(3 rows) + +BEGIN; +DELETE FROM test.range_rel WHERE dt < '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt < '1990-01-01'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* UPDATE + FROM, partitioned table */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = t.value +FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Update on range_rel_1 r + -> Nested Loop + Join Filter: (r.id = t.id) + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) + -> Seq Scan on tmp t +(6 rows) + +BEGIN; +UPDATE test.range_rel r SET value = t.value +FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; +/* UPDATE + FROM, single table */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = r.value +FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Update on tmp t + -> Nested Loop + -> Seq Scan on tmp t + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) +(6 rows) + +BEGIN; +UPDATE test.tmp t SET value = r.value +FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, partitioned table */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp t +WHERE r.dt = '2010-01-02' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Delete on range_rel_1 r + -> Nested Loop + Join Filter: (r.id = t.id) + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Seq Scan on tmp t +(6 rows) + +BEGIN; +DELETE FROM test.range_rel r USING test.tmp t +WHERE r.dt = '2010-01-02' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, single table */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp t USING test.range_rel r +WHERE r.dt = '2010-01-02' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Delete on tmp t + -> Nested Loop + -> Seq Scan on tmp t + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) +(6 rows) + +BEGIN; +DELETE FROM test.tmp t USING test.range_rel r +WHERE r.dt = '2010-01-02' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, two partitioned tables */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* DELETE + USING, partitioned table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* DELETE + USING, single table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; + QUERY PLAN +------------------------------------------------ + Delete on tmp r + -> Nested Loop + -> Nested Loop + -> Seq Scan on tmp r + -> Custom Scan (RuntimeAppend) + Prune by: (r.id = a1.id) + -> Seq Scan on tmp2_1 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_2 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_3 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_4 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_5 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_6 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_7 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_8 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_9 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_10 a1 + Filter: (r.id = id) + -> Custom Scan (RuntimeAppend) + Prune by: (a1.id = a2.id) + -> Seq Scan on tmp2_1 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_2 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_3 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_4 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_5 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_6 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_7 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_8 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_9 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_10 a2 + Filter: (a1.id = id) +(48 rows) + +BEGIN; +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ROLLBACK; +/* UPDATE + FROM, two partitioned tables */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* + * UPDATE + subquery with partitioned table (PG 9.5). + * See pathman_rel_pathlist_hook() + RELOPT_OTHER_MEMBER_REL. + */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = 2 +WHERE t.id IN (SELECT id + FROM test.tmp2 t2 + WHERE id = t.id); + QUERY PLAN +------------------------------------------ + Update on tmp t + -> Nested Loop Semi Join + -> Seq Scan on tmp t + -> Custom Scan (RuntimeAppend) + Prune by: (t.id = t2.id) + -> Seq Scan on tmp2_1 t2 + Filter: (t.id = id) + -> Seq Scan on tmp2_2 t2 + Filter: (t.id = id) + -> Seq Scan on tmp2_3 t2 + Filter: (t.id = id) + -> Seq Scan on tmp2_4 t2 + Filter: (t.id = id) + -> Seq Scan on tmp2_5 t2 + Filter: (t.id = id) + -> Seq Scan on tmp2_6 t2 + Filter: (t.id = id) + -> Seq Scan on tmp2_7 t2 + Filter: (t.id = id) + -> Seq Scan on tmp2_8 t2 + Filter: (t.id = id) + -> Seq Scan on tmp2_9 t2 + Filter: (t.id = id) + -> Seq Scan on tmp2_10 t2 + Filter: (t.id = id) +(25 rows) + +/* Test special rule for CTE; SELECT (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT * FROM test.range_rel r + WHERE r.dt = '2010-01-02') +DELETE FROM test.tmp USING q; + QUERY PLAN +-------------------------------------------------------------------------------------------- + Delete on tmp + -> Nested Loop + -> Seq Scan on tmp + -> Materialize + -> Seq Scan on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) +(6 rows) + +BEGIN; +WITH q AS (SELECT * FROM test.range_rel r + WHERE r.dt = '2010-01-02') +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; DELETE (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (DELETE FROM test.range_rel r + WHERE r.dt = '2010-01-02' + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------------------------------------------------------- + Delete on tmp + CTE q + -> Delete on range_rel_1 r + -> Seq Scan on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(8 rows) + +BEGIN; +WITH q AS (DELETE FROM test.range_rel r + WHERE r.dt = '2010-01-02' + RETURNING *) +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; DELETE + USING (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (DELETE FROM test.tmp t + USING test.range_rel r + WHERE r.dt = '2010-01-02' AND r.id = t.id + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------------------------------------------------------------- + Delete on tmp + CTE q + -> Delete on tmp t + -> Nested Loop + -> Seq Scan on tmp t + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(11 rows) + +BEGIN; +WITH q AS (DELETE FROM test.tmp t + USING test.range_rel r + WHERE r.dt = '2010-01-02' AND r.id = t.id + RETURNING *) +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; Nested CTEs (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (WITH n AS (SELECT id FROM test.tmp2 WHERE id = 2) + DELETE FROM test.tmp t + USING n + WHERE t.id = n.id + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +--------------------------------------------- + Delete on tmp + CTE q + -> Delete on tmp t + -> Nested Loop + -> Seq Scan on tmp t + Filter: (id = 2) + -> Seq Scan on tmp2_2 tmp2 + Filter: (id = 2) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(11 rows) + +/* Test special rule for CTE; CTE in quals (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); + QUERY PLAN +-------------------------------------------------------------- + Delete on tmp t + -> Nested Loop Semi Join + -> Seq Scan on tmp t + -> Custom Scan (RuntimeAppend) + Prune by: ((tmp2.id < 3) AND (t.id = tmp2.id)) + -> Seq Scan on tmp2_1 tmp2 + Filter: (t.id = id) + -> Seq Scan on tmp2_2 tmp2 + Filter: (t.id = id) +(9 rows) + +BEGIN; +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); +ROLLBACK; +DROP TABLE test.tmp CASCADE; +DROP TABLE test.tmp2 CASCADE; +NOTICE: drop cascades to 11 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 13 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/sql/pathman_upd_del.sql b/sql/pathman_upd_del.sql index a034c14a..c99b9666 100644 --- a/sql/pathman_upd_del.sql +++ b/sql/pathman_upd_del.sql @@ -9,6 +9,9 @@ * plans here. There is an option to forcibly make them MATERIALIZED, but we * also need to run tests on older versions, so put updated plans in * pathman_upd_del_2.out instead. + * + * In Postgres Pro Standard/Enterprise 15+ the EXPLAIN output has changed so put + * the updated plan in pathman_upd_del_4.out. */ \set VERBOSITY terse