You can subscribe to this list here.
2010 |
Jan
|
Feb
|
Mar
|
Apr
(4) |
May
(28) |
Jun
(12) |
Jul
(11) |
Aug
(12) |
Sep
(5) |
Oct
(19) |
Nov
(14) |
Dec
(12) |
---|---|---|---|---|---|---|---|---|---|---|---|---|
2011 |
Jan
(18) |
Feb
(30) |
Mar
(115) |
Apr
(89) |
May
(50) |
Jun
(44) |
Jul
(22) |
Aug
(13) |
Sep
(11) |
Oct
(30) |
Nov
(28) |
Dec
(39) |
2012 |
Jan
(38) |
Feb
(18) |
Mar
(43) |
Apr
(91) |
May
(108) |
Jun
(46) |
Jul
(37) |
Aug
(44) |
Sep
(33) |
Oct
(29) |
Nov
(36) |
Dec
(15) |
2013 |
Jan
(35) |
Feb
(611) |
Mar
(5) |
Apr
(55) |
May
(30) |
Jun
(28) |
Jul
(458) |
Aug
(34) |
Sep
(9) |
Oct
(39) |
Nov
(22) |
Dec
(32) |
2014 |
Jan
(16) |
Feb
(16) |
Mar
(42) |
Apr
(179) |
May
(7) |
Jun
(6) |
Jul
(9) |
Aug
|
Sep
(4) |
Oct
|
Nov
(3) |
Dec
|
2015 |
Jan
|
Feb
|
Mar
|
Apr
(2) |
May
(4) |
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
From: Michael P. <mic...@us...> - 2011-04-11 00:52:04
|
Project "Postgres-XC". The branch, master has been updated via b53f7e7bdc5cc827e332f655c73ac7334e9a162d (commit) from e137a355b191645ddaf921b5cee2264252a76e8d (commit) - Log ----------------------------------------------------------------- commit b53f7e7bdc5cc827e332f655c73ac7334e9a162d Author: Michael P <mic...@us...> Date: Mon Apr 11 09:46:09 2011 +0900 Delete pgxc_class tuple when dropping a relation Clean-up of pgxc_class, the catalog table containing all the distribution information of cluster tables, was not done when dropping a table. Patch written by Benny Wang diff --git a/src/backend/catalog/dependency.c b/src/backend/catalog/dependency.c index abefa48..48eaeac 100644 --- a/src/backend/catalog/dependency.c +++ b/src/backend/catalog/dependency.c @@ -1275,6 +1275,11 @@ doDeletion(const ObjectAddress *object) case OCLASS_DEFACL: RemoveDefaultACLById(object->objectId); break; +#ifdef PGXC + case OCLASS_PGXC_CLASS: + RemovePgxcClass(object->objectId); + break; +#endif default: elog(ERROR, "unrecognized object class: %u", @@ -2210,6 +2215,11 @@ getObjectClass(const ObjectAddress *object) case DefaultAclRelationId: Assert(object->objectSubId == 0); return OCLASS_DEFACL; +#ifdef PGXC + case PgxcClassRelationId: + Assert(object->objectSubId == 0); + return OCLASS_PGXC_CLASS; +#endif } /* shouldn't get here */ diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c index 8b25ace..34f8dba 100644 --- a/src/backend/catalog/heap.c +++ b/src/backend/catalog/heap.c @@ -830,6 +830,8 @@ AddRelationDistribution (Oid relid, int hashalgorithm = 0; int hashbuckets = 0; AttrNumber attnum = 0; + ObjectAddress myself, + referenced; if (!distributeby) @@ -982,6 +984,17 @@ AddRelationDistribution (Oid relid, } PgxcClassCreate (relid, locatortype, attnum, hashalgorithm, hashbuckets); + + /* Make dependency entries */ + myself.classId = PgxcClassRelationId; + myself.objectId = relid; + myself.objectSubId = 0; + + /* Dependency on relation */ + referenced.classId = RelationRelationId; + referenced.objectId = relid; + referenced.objectSubId = 0; + recordDependencyOn(&myself, &referenced, DEPENDENCY_INTERNAL); } #endif diff --git a/src/backend/utils/cache/syscache.c b/src/backend/utils/cache/syscache.c index 9050357..6a538fc 100644 --- a/src/backend/utils/cache/syscache.c +++ b/src/backend/utils/cache/syscache.c @@ -506,7 +506,7 @@ static const struct cachedesc cacheinfo[] = { PgxcClassPgxcRelIdIndexId, 1, { - ObjectIdAttributeNumber, + Anum_pgxc_class_pcrelid, 0, 0, 0 ----------------------------------------------------------------------- Summary of changes: src/backend/catalog/dependency.c | 10 ++++++++++ src/backend/catalog/heap.c | 13 +++++++++++++ src/backend/utils/cache/syscache.c | 2 +- 3 files changed, 24 insertions(+), 1 deletions(-) hooks/post-receive -- Postgres-XC |
From: Michael P. <mic...@us...> - 2011-04-11 00:05:55
|
Project "Postgres-XC". The branch, master has been updated via e137a355b191645ddaf921b5cee2264252a76e8d (commit) from 43675e5f56beb98419514bd23997bc178e005a6f (commit) - Log ----------------------------------------------------------------- commit e137a355b191645ddaf921b5cee2264252a76e8d Author: Michael P <mic...@us...> Date: Mon Apr 11 08:52:22 2011 +0900 Fix for bug 3237820: rules on views Views are only created on Coordinator, so if a rule is defined on a view this query is only launched on Coordinators. A query that drops a rule defined on a view also is launched only on Coordinators. diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c index 8408b40..28a43c2 100644 --- a/src/backend/tcop/utility.c +++ b/src/backend/tcop/utility.c @@ -1240,8 +1240,19 @@ standard_ProcessUtility(Node *parsetree, case T_RuleStmt: /* CREATE RULE */ DefineRule((RuleStmt *) parsetree, queryString); #ifdef PGXC - if (IS_PGXC_COORDINATOR) - ExecUtilityStmtOnNodes(queryString, NULL, false, EXEC_ON_ALL_NODES); + /* If a rule is created on a view, define it only on Coordinator */ + if (IS_PGXC_COORDINATOR && !IsConnFromCoord()) + { + RemoteQueryExecType remoteExecType; + Oid relid = RangeVarGetRelid(((RuleStmt *) parsetree)->relation, false); + + if (get_rel_relkind(relid) == RELKIND_VIEW) + remoteExecType = EXEC_ON_COORDS; + else + remoteExecType = EXEC_ON_ALL_NODES; + + ExecUtilityStmtOnNodes(queryString, NULL, false, remoteExecType); + } #endif break; @@ -1472,11 +1483,30 @@ standard_ProcessUtility(Node *parsetree, /* RemoveRewriteRule checks permissions */ RemoveRewriteRule(relId, stmt->property, stmt->behavior, stmt->missing_ok); +#ifdef PGXC + /* If rule is defined on a view, drop it only on Coordinators */ + if (IS_PGXC_COORDINATOR && !IsConnFromCoord()) + { + RemoteQueryExecType remoteExecType; + Oid relid = RangeVarGetRelid(stmt->relation, false); + + if (get_rel_relkind(relid) == RELKIND_VIEW) + remoteExecType = EXEC_ON_COORDS; + else + remoteExecType = EXEC_ON_ALL_NODES; + + ExecUtilityStmtOnNodes(queryString, NULL, false, remoteExecType); + } +#endif break; case OBJECT_TRIGGER: /* DropTrigger checks permissions */ DropTrigger(relId, stmt->property, stmt->behavior, stmt->missing_ok); +#ifdef PGXC + if (IS_PGXC_COORDINATOR) + ExecUtilityStmtOnNodes(queryString, NULL, false, EXEC_ON_ALL_NODES); +#endif break; default: elog(ERROR, "unrecognized object type: %d", @@ -1484,10 +1514,6 @@ standard_ProcessUtility(Node *parsetree, break; } } -#ifdef PGXC - if (IS_PGXC_COORDINATOR) - ExecUtilityStmtOnNodes(queryString, NULL, false, EXEC_ON_ALL_NODES); -#endif break; case T_CreatePLangStmt: ----------------------------------------------------------------------- Summary of changes: src/backend/tcop/utility.c | 38 ++++++++++++++++++++++++++++++++------ 1 files changed, 32 insertions(+), 6 deletions(-) hooks/post-receive -- Postgres-XC |
From: Abbas B. <ga...@us...> - 2011-04-08 15:46:17
|
Project "Postgres-XC". The branch, master has been updated via 43675e5f56beb98419514bd23997bc178e005a6f (commit) from 48986620f70d0220524e715061b76824ac9684c6 (commit) - Log ----------------------------------------------------------------- commit 43675e5f56beb98419514bd23997bc178e005a6f Author: Abbas <abb...@en...> Date: Fri Apr 8 20:34:58 2011 +0500 This patch fixes bug ID 3280980 in which ORDER BY BIT fails The reason of the failure was in the function create_tuple_desc. The function do_query sends the query to appropriate nodes and then reads the results. It then calls handle_response function to process messages received from the nodes. On receiving the RowDescription message, the function calls HandleRowDescription to create tuple descriptor from row description message. In the function create_tuple_desc type mode was being ignored as received in the row description. Instead the function was asking parseTypeString to decode type modifier from type name. Although the BIT array was defined to be 11 bits long, the function would report that type modifier is 1. This was causing the query to fail. This patch changes the code so that it now no longer ignores the type mode received in the row description message. diff --git a/src/backend/pgxc/pool/execRemote.c b/src/backend/pgxc/pool/execRemote.c index 848a3cc..700268a 100644 --- a/src/backend/pgxc/pool/execRemote.c +++ b/src/backend/pgxc/pool/execRemote.c @@ -410,7 +410,7 @@ create_tuple_desc(char *msg_body, size_t len) char *attname; char *typname; Oid oidtypeid; - int32 typmod; + int32 typemode, typmod; uint32 n32; attnum = (AttrNumber) i; @@ -435,14 +435,16 @@ create_tuple_desc(char *msg_body, size_t len) /* type len, ignored */ msg_body += 2; - /* type mod, ignored */ + /* type mod */ + memcpy(&typemode, msg_body, 4); + typmod = ntohl(typemode); msg_body += 4; /* PGXCTODO text/binary flag? */ msg_body += 2; /* Get the OID type and mode type from typename */ - parseTypeString(typname, &oidtypeid, &typmod); + parseTypeString(typname, &oidtypeid, NULL); TupleDescInitEntry(result, attnum, attname, oidtypeid, typmod, 0); } ----------------------------------------------------------------------- Summary of changes: src/backend/pgxc/pool/execRemote.c | 8 +++++--- 1 files changed, 5 insertions(+), 3 deletions(-) hooks/post-receive -- Postgres-XC |
From: Michael P. <mic...@us...> - 2011-04-08 09:02:29
|
Project "Postgres-XC". The branch, master has been updated via 48986620f70d0220524e715061b76824ac9684c6 (commit) from 9a329381083f831459db12cdd6616552c4571c5b (commit) - Log ----------------------------------------------------------------- commit 48986620f70d0220524e715061b76824ac9684c6 Author: Michael P <mic...@us...> Date: Fri Apr 8 17:57:02 2011 +0900 Fix for regression test create_index XC does not support yet concurrent index, TEMP table and INTO clause, so this output is correct. Query plan is set now as a Data node scan. diff --git a/src/test/regress/expected/create_index_1.out b/src/test/regress/expected/create_index_1.out new file mode 100644 index 0000000..0c5b67d --- /dev/null +++ b/src/test/regress/expected/create_index_1.out @@ -0,0 +1,963 @@ +-- +-- CREATE_INDEX +-- Create ancillary data structures (i.e. indices) +-- +-- +-- BTREE +-- +CREATE INDEX onek_unique1 ON onek USING btree(unique1 int4_ops); +CREATE INDEX onek_unique2 ON onek USING btree(unique2 int4_ops); +CREATE INDEX onek_hundred ON onek USING btree(hundred int4_ops); +CREATE INDEX onek_stringu1 ON onek USING btree(stringu1 name_ops); +CREATE INDEX tenk1_unique1 ON tenk1 USING btree(unique1 int4_ops); +CREATE INDEX tenk1_unique2 ON tenk1 USING btree(unique2 int4_ops); +CREATE INDEX tenk1_hundred ON tenk1 USING btree(hundred int4_ops); +CREATE INDEX tenk1_thous_tenthous ON tenk1 (thousand, tenthous); +CREATE INDEX tenk2_unique1 ON tenk2 USING btree(unique1 int4_ops); +CREATE INDEX tenk2_unique2 ON tenk2 USING btree(unique2 int4_ops); +CREATE INDEX tenk2_hundred ON tenk2 USING btree(hundred int4_ops); +CREATE INDEX rix ON road USING btree (name text_ops); +CREATE INDEX iix ON ihighway USING btree (name text_ops); +CREATE INDEX six ON shighway USING btree (name text_ops); +-- test comments +COMMENT ON INDEX six_wrong IS 'bad index'; +ERROR: relation "six_wrong" does not exist +COMMENT ON INDEX six IS 'good index'; +COMMENT ON INDEX six IS NULL; +-- +-- BTREE ascending/descending cases +-- +-- we load int4/text from pure descending data (each key is a new +-- low key) and name/f8 from pure ascending data (each key is a new +-- high key). we had a bug where new low keys would sometimes be +-- "lost". +-- +CREATE INDEX bt_i4_index ON bt_i4_heap USING btree (seqno int4_ops); +CREATE INDEX bt_name_index ON bt_name_heap USING btree (seqno name_ops); +CREATE INDEX bt_txt_index ON bt_txt_heap USING btree (seqno text_ops); +CREATE INDEX bt_f8_index ON bt_f8_heap USING btree (seqno float8_ops); +-- +-- BTREE partial indices +-- +CREATE INDEX onek2_u1_prtl ON onek2 USING btree(unique1 int4_ops) + where unique1 < 20 or unique1 > 980; +ERROR: relation "onek2" does not exist +CREATE INDEX onek2_u2_prtl ON onek2 USING btree(unique2 int4_ops) + where stringu1 < 'B'; +ERROR: relation "onek2" does not exist +CREATE INDEX onek2_stu1_prtl ON onek2 USING btree(stringu1 name_ops) + where onek2.stringu1 >= 'J' and onek2.stringu1 < 'K'; +ERROR: relation "onek2" does not exist +-- +-- GiST (rtree-equivalent opclasses only) +-- +CREATE INDEX grect2ind ON fast_emp4000 USING gist (home_base); +CREATE INDEX gpolygonind ON polygon_tbl USING gist (f1); +CREATE INDEX gcircleind ON circle_tbl USING gist (f1); +CREATE INDEX gpointind ON point_tbl USING gist (f1); +CREATE TEMP TABLE gpolygon_tbl AS + SELECT polygon(home_base) AS f1 FROM slow_emp4000; +ERROR: INTO clause not yet supported +INSERT INTO gpolygon_tbl VALUES ( '(1000,0,0,1000)' ); +ERROR: relation "gpolygon_tbl" does not exist +LINE 1: INSERT INTO gpolygon_tbl VALUES ( '(1000,0,0,1000)' ); + ^ +INSERT INTO gpolygon_tbl VALUES ( '(0,1000,1000,1000)' ); +ERROR: relation "gpolygon_tbl" does not exist +LINE 1: INSERT INTO gpolygon_tbl VALUES ( '(0,1000,1000,1000)' ); + ^ +CREATE TEMP TABLE gcircle_tbl AS + SELECT circle(home_base) AS f1 FROM slow_emp4000; +ERROR: INTO clause not yet supported +CREATE INDEX ggpolygonind ON gpolygon_tbl USING gist (f1); +ERROR: relation "gpolygon_tbl" does not exist +CREATE INDEX ggcircleind ON gcircle_tbl USING gist (f1); +ERROR: relation "gcircle_tbl" does not exist +SET enable_seqscan = ON; +SET enable_indexscan = OFF; +SET enable_bitmapscan = OFF; +SELECT * FROM fast_emp4000 + WHERE home_base @ '(200,200),(2000,1000)'::box + ORDER BY (home_base[0])[0]; + home_base +----------- +(0 rows) + +SELECT count(*) FROM fast_emp4000 WHERE home_base && '(1000,1000,0,0)'::box; + count +------- + 1 +(1 row) + +SELECT count(*) FROM fast_emp4000 WHERE home_base IS NULL; + count +------- + 138 +(1 row) + +SELECT * FROM polygon_tbl WHERE f1 ~ '((1,1),(2,2),(2,1))'::polygon + ORDER BY (poly_center(f1))[0]; + id | f1 +----+--------------------- + 1 | ((2,0),(2,4),(0,0)) +(1 row) + +SELECT * FROM circle_tbl WHERE f1 && circle(point(1,-2), 1) + ORDER BY area(f1); + f1 +--------------- + <(1,2),3> + <(1,3),5> + <(1,2),100> + <(100,1),115> +(4 rows) + +SELECT count(*) FROM gpolygon_tbl WHERE f1 && '(1000,1000,0,0)'::polygon; +ERROR: relation "gpolygon_tbl" does not exist +LINE 1: SELECT count(*) FROM gpolygon_tbl WHERE f1 && '(1000,1000,0,... + ^ +SELECT count(*) FROM gcircle_tbl WHERE f1 && '<(500,500),500>'::circle; +ERROR: relation "gcircle_tbl" does not exist +LINE 1: SELECT count(*) FROM gcircle_tbl WHERE f1 && '<(500,500),500... + ^ +SELECT count(*) FROM point_tbl WHERE f1 <@ box '(0,0,100,100)'; + count +------- + 3 +(1 row) + +SELECT count(*) FROM point_tbl WHERE box '(0,0,100,100)' @> f1; + count +------- + 3 +(1 row) + +SELECT count(*) FROM point_tbl WHERE f1 <@ polygon '(0,0),(0,100),(100,100),(50,50),(100,0),(0,0)'; + count +------- + 3 +(1 row) + +SELECT count(*) FROM point_tbl WHERE f1 <@ circle '<(50,50),50>'; + count +------- + 1 +(1 row) + +SELECT count(*) FROM point_tbl p WHERE p.f1 << '(0.0, 0.0)'; + count +------- + 3 +(1 row) + +SELECT count(*) FROM point_tbl p WHERE p.f1 >> '(0.0, 0.0)'; + count +------- + 2 +(1 row) + +SELECT count(*) FROM point_tbl p WHERE p.f1 <^ '(0.0, 0.0)'; + count +------- + 1 +(1 row) + +SELECT count(*) FROM point_tbl p WHERE p.f1 >^ '(0.0, 0.0)'; + count +------- + 3 +(1 row) + +SELECT count(*) FROM point_tbl p WHERE p.f1 ~= '(-5, -12)'; + count +------- + 1 +(1 row) + +SET enable_seqscan = OFF; +SET enable_indexscan = ON; +SET enable_bitmapscan = ON; +EXPLAIN (COSTS OFF) +SELECT * FROM fast_emp4000 + WHERE home_base @ '(200,200),(2000,1000)'::box + ORDER BY (home_base[0])[0]; + QUERY PLAN +---------------- + Data Node Scan +(1 row) + +SELECT * FROM fast_emp4000 + WHERE home_base @ '(200,200),(2000,1000)'::box + ORDER BY (home_base[0])[0]; + home_base +----------- +(0 rows) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM fast_emp4000 WHERE home_base && '(1000,1000,0,0)'::box; + QUERY PLAN +---------------- + Data Node Scan +(1 row) + +SELECT count(*) FROM fast_emp4000 WHERE home_base && '(1000,1000,0,0)'::box; + count +------- + 1 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM fast_emp4000 WHERE home_base IS NULL; + QUERY PLAN +---------------- + Data Node Scan +(1 row) + +SELECT count(*) FROM fast_emp4000 WHERE home_base IS NULL; + count +------- + 138 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT * FROM polygon_tbl WHERE f1 ~ '((1,1),(2,2),(2,1))'::polygon + ORDER BY (poly_center(f1))[0]; + QUERY PLAN +---------------- + Data Node Scan +(1 row) + +SELECT * FROM polygon_tbl WHERE f1 ~ '((1,1),(2,2),(2,1))'::polygon + ORDER BY (poly_center(f1))[0]; + id | f1 +----+--------------------- + 1 | ((2,0),(2,4),(0,0)) +(1 row) + +EXPLAIN (COSTS OFF) +SELECT * FROM circle_tbl WHERE f1 && circle(point(1,-2), 1) + ORDER BY area(f1); + QUERY PLAN +---------------- + Data Node Scan +(1 row) + +SELECT * FROM circle_tbl WHERE f1 && circle(point(1,-2), 1) + ORDER BY area(f1); + f1 +--------------- + <(1,2),3> + <(1,3),5> + <(1,2),100> + <(100,1),115> +(4 rows) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM gpolygon_tbl WHERE f1 && '(1000,1000,0,0)'::polygon; +ERROR: relation "gpolygon_tbl" does not exist +LINE 2: SELECT count(*) FROM gpolygon_tbl WHERE f1 && '(1000,1000,0,... + ^ +SELECT count(*) FROM gpolygon_tbl WHERE f1 && '(1000,1000,0,0)'::polygon; +ERROR: relation "gpolygon_tbl" does not exist +LINE 1: SELECT count(*) FROM gpolygon_tbl WHERE f1 && '(1000,1000,0,... + ^ +EXPLAIN (COSTS OFF) +SELECT count(*) FROM gcircle_tbl WHERE f1 && '<(500,500),500>'::circle; +ERROR: relation "gcircle_tbl" does not exist +LINE 2: SELECT count(*) FROM gcircle_tbl WHERE f1 && '<(500,500),500... + ^ +SELECT count(*) FROM gcircle_tbl WHERE f1 && '<(500,500),500>'::circle; +ERROR: relation "gcircle_tbl" does not exist +LINE 1: SELECT count(*) FROM gcircle_tbl WHERE f1 && '<(500,500),500... + ^ +EXPLAIN (COSTS OFF) +SELECT count(*) FROM point_tbl WHERE f1 <@ box '(0,0,100,100)'; + QUERY PLAN +---------------- + Data Node Scan +(1 row) + +SELECT count(*) FROM point_tbl WHERE f1 <@ box '(0,0,100,100)'; + count +------- + 3 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM point_tbl WHERE box '(0,0,100,100)' @> f1; + QUERY PLAN +---------------- + Data Node Scan +(1 row) + +SELECT count(*) FROM point_tbl WHERE box '(0,0,100,100)' @> f1; + count +------- + 3 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM point_tbl WHERE f1 <@ polygon '(0,0),(0,100),(100,100),(50,50),(100,0),(0,0)'; + QUERY PLAN +---------------- + Data Node Scan +(1 row) + +SELECT count(*) FROM point_tbl WHERE f1 <@ polygon '(0,0),(0,100),(100,100),(50,50),(100,0),(0,0)'; + count +------- + 3 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM point_tbl WHERE f1 <@ circle '<(50,50),50>'; + QUERY PLAN +---------------- + Data Node Scan +(1 row) + +SELECT count(*) FROM point_tbl WHERE f1 <@ circle '<(50,50),50>'; + count +------- + 1 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM point_tbl p WHERE p.f1 << '(0.0, 0.0)'; + QUERY PLAN +---------------- + Data Node Scan +(1 row) + +SELECT count(*) FROM point_tbl p WHERE p.f1 << '(0.0, 0.0)'; + count +------- + 3 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM point_tbl p WHERE p.f1 >> '(0.0, 0.0)'; + QUERY PLAN +---------------- + Data Node Scan +(1 row) + +SELECT count(*) FROM point_tbl p WHERE p.f1 >> '(0.0, 0.0)'; + count +------- + 2 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM point_tbl p WHERE p.f1 <^ '(0.0, 0.0)'; + QUERY PLAN +---------------- + Data Node Scan +(1 row) + +SELECT count(*) FROM point_tbl p WHERE p.f1 <^ '(0.0, 0.0)'; + count +------- + 1 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM point_tbl p WHERE p.f1 >^ '(0.0, 0.0)'; + QUERY PLAN +---------------- + Data Node Scan +(1 row) + +SELECT count(*) FROM point_tbl p WHERE p.f1 >^ '(0.0, 0.0)'; + count +------- + 3 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM point_tbl p WHERE p.f1 ~= '(-5, -12)'; + QUERY PLAN +---------------- + Data Node Scan +(1 row) + +SELECT count(*) FROM point_tbl p WHERE p.f1 ~= '(-5, -12)'; + count +------- + 1 +(1 row) + +RESET enable_seqscan; +RESET enable_indexscan; +RESET enable_bitmapscan; +-- +-- GIN over int[] and text[] +-- +SET enable_seqscan = OFF; +SET enable_indexscan = ON; +SET enable_bitmapscan = OFF; +CREATE INDEX intarrayidx ON array_index_op_test USING gin (i); +SELECT * FROM array_index_op_test WHERE i @> '{32}' ORDER BY seqno; + seqno | i | t +-------+---------------------------------+------------------------------------------------------------------------------------------------------------------------------------ + 6 | {39,35,5,94,17,92,60,32} | {AAAAAAAAAAAAAAA35875,AAAAAAAAAAAAAAAA23657} + 74 | {32} | {AAAAAAAAAAAAAAAA1729,AAAAAAAAAAAAA22860,AAAAAA99807,AAAAA17383,AAAAAAAAAAAAAAA67062,AAAAAAAAAAA15165,AAAAAAAAAAA50956} + 77 | {97,15,32,17,55,59,18,37,50,39} | {AAAAAAAAAAAA67946,AAAAAA54032,AAAAAAAA81587,55847,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAAAAA43052,AAAAAA75463,AAAA49534,AAAAAAAA44066} + 89 | {40,32,17,6,30,88} | {AA44673,AAAAAAAAAAA6119,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAAAAAA47955,AAAAAAAAAAAAAAAA33598,AAAAAAAAAAA33576,AA44673} + 98 | {38,34,32,89} | {AAAAAAAAAAAAAAAAAA71621,AAAA8857,AAAAAAAAAAAAAAAAAAA65037,AAAAAAAAAAAAAAAA31334,AAAAAAAAAA48845} + 100 | {85,32,57,39,49,84,32,3,30} | {AAAAAAA80240,AAAAAAAAAAAAAAAA1729,AAAAA60038,AAAAAAAAAAA92631,AAAAAAAA9523} +(6 rows) + +SELECT * FROM array_index_op_test WHERE i && '{32}' ORDER BY seqno; + seqno | i | t +-------+---------------------------------+------------------------------------------------------------------------------------------------------------------------------------ + 6 | {39,35,5,94,17,92,60,32} | {AAAAAAAAAAAAAAA35875,AAAAAAAAAAAAAAAA23657} + 74 | {32} | {AAAAAAAAAAAAAAAA1729,AAAAAAAAAAAAA22860,AAAAAA99807,AAAAA17383,AAAAAAAAAAAAAAA67062,AAAAAAAAAAA15165,AAAAAAAAAAA50956} + 77 | {97,15,32,17,55,59,18,37,50,39} | {AAAAAAAAAAAA67946,AAAAAA54032,AAAAAAAA81587,55847,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAAAAA43052,AAAAAA75463,AAAA49534,AAAAAAAA44066} + 89 | {40,32,17,6,30,88} | {AA44673,AAAAAAAAAAA6119,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAAAAAA47955,AAAAAAAAAAAAAAAA33598,AAAAAAAAAAA33576,AA44673} + 98 | {38,34,32,89} | {AAAAAAAAAAAAAAAAAA71621,AAAA8857,AAAAAAAAAAAAAAAAAAA65037,AAAAAAAAAAAAAAAA31334,AAAAAAAAAA48845} + 100 | {85,32,57,39,49,84,32,3,30} | {AAAAAAA80240,AAAAAAAAAAAAAAAA1729,AAAAA60038,AAAAAAAAAAA92631,AAAAAAAA9523} +(6 rows) + +SELECT * FROM array_index_op_test WHERE i @> '{17}' ORDER BY seqno; + seqno | i | t +-------+---------------------------------+------------------------------------------------------------------------------------------------------------------------------------ + 6 | {39,35,5,94,17,92,60,32} | {AAAAAAAAAAAAAAA35875,AAAAAAAAAAAAAAAA23657} + 12 | {17,99,18,52,91,72,0,43,96,23} | {AAAAA33250,AAAAAAAAAAAAAAAAAAA85420,AAAAAAAAAAA33576} + 15 | {17,14,16,63,67} | {AA6416,AAAAAAAAAA646,AAAAA95309} + 19 | {52,82,17,74,23,46,69,51,75} | {AAAAAAAAAAAAA73084,AAAAA75968,AAAAAAAAAAAAAAAA14047,AAAAAAA80240,AAAAAAAAAAAAAAAAAAA1205,A68938} + 53 | {38,17} | {AAAAAAAAAAA21658} + 65 | {61,5,76,59,17} | {AAAAAA99807,AAAAA64741,AAAAAAAAAAA53908,AA21643,AAAAAAAAA10012} + 77 | {97,15,32,17,55,59,18,37,50,39} | {AAAAAAAAAAAA67946,AAAAAA54032,AAAAAAAA81587,55847,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAAAAA43052,AAAAAA75463,AAAA49534,AAAAAAAA44066} + 89 | {40,32,17,6,30,88} | {AA44673,AAAAAAAAAAA6119,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAAAAAA47955,AAAAAAAAAAAAAAAA33598,AAAAAAAAAAA33576,AA44673} +(8 rows) + +SELECT * FROM array_index_op_test WHERE i && '{17}' ORDER BY seqno; + seqno | i | t +-------+---------------------------------+------------------------------------------------------------------------------------------------------------------------------------ + 6 | {39,35,5,94,17,92,60,32} | {AAAAAAAAAAAAAAA35875,AAAAAAAAAAAAAAAA23657} + 12 | {17,99,18,52,91,72,0,43,96,23} | {AAAAA33250,AAAAAAAAAAAAAAAAAAA85420,AAAAAAAAAAA33576} + 15 | {17,14,16,63,67} | {AA6416,AAAAAAAAAA646,AAAAA95309} + 19 | {52,82,17,74,23,46,69,51,75} | {AAAAAAAAAAAAA73084,AAAAA75968,AAAAAAAAAAAAAAAA14047,AAAAAAA80240,AAAAAAAAAAAAAAAAAAA1205,A68938} + 53 | {38,17} | {AAAAAAAAAAA21658} + 65 | {61,5,76,59,17} | {AAAAAA99807,AAAAA64741,AAAAAAAAAAA53908,AA21643,AAAAAAAAA10012} + 77 | {97,15,32,17,55,59,18,37,50,39} | {AAAAAAAAAAAA67946,AAAAAA54032,AAAAAAAA81587,55847,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAAAAA43052,AAAAAA75463,AAAA49534,AAAAAAAA44066} + 89 | {40,32,17,6,30,88} | {AA44673,AAAAAAAAAAA6119,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAAAAAA47955,AAAAAAAAAAAAAAAA33598,AAAAAAAAAAA33576,AA44673} +(8 rows) + +SELECT * FROM array_index_op_test WHERE i @> '{32,17}' ORDER BY seqno; + seqno | i | t +-------+---------------------------------+------------------------------------------------------------------------------------------------------------------------------------ + 6 | {39,35,5,94,17,92,60,32} | {AAAAAAAAAAAAAAA35875,AAAAAAAAAAAAAAAA23657} + 77 | {97,15,32,17,55,59,18,37,50,39} | {AAAAAAAAAAAA67946,AAAAAA54032,AAAAAAAA81587,55847,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAAAAA43052,AAAAAA75463,AAAA49534,AAAAAAAA44066} + 89 | {40,32,17,6,30,88} | {AA44673,AAAAAAAAAAA6119,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAAAAAA47955,AAAAAAAAAAAAAAAA33598,AAAAAAAAAAA33576,AA44673} +(3 rows) + +SELECT * FROM array_index_op_test WHERE i && '{32,17}' ORDER BY seqno; + seqno | i | t +-------+---------------------------------+------------------------------------------------------------------------------------------------------------------------------------ + 6 | {39,35,5,94,17,92,60,32} | {AAAAAAAAAAAAAAA35875,AAAAAAAAAAAAAAAA23657} + 12 | {17,99,18,52,91,72,0,43,96,23} | {AAAAA33250,AAAAAAAAAAAAAAAAAAA85420,AAAAAAAAAAA33576} + 15 | {17,14,16,63,67} | {AA6416,AAAAAAAAAA646,AAAAA95309} + 19 | {52,82,17,74,23,46,69,51,75} | {AAAAAAAAAAAAA73084,AAAAA75968,AAAAAAAAAAAAAAAA14047,AAAAAAA80240,AAAAAAAAAAAAAAAAAAA1205,A68938} + 53 | {38,17} | {AAAAAAAAAAA21658} + 65 | {61,5,76,59,17} | {AAAAAA99807,AAAAA64741,AAAAAAAAAAA53908,AA21643,AAAAAAAAA10012} + 74 | {32} | {AAAAAAAAAAAAAAAA1729,AAAAAAAAAAAAA22860,AAAAAA99807,AAAAA17383,AAAAAAAAAAAAAAA67062,AAAAAAAAAAA15165,AAAAAAAAAAA50956} + 77 | {97,15,32,17,55,59,18,37,50,39} | {AAAAAAAAAAAA67946,AAAAAA54032,AAAAAAAA81587,55847,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAAAAA43052,AAAAAA75463,AAAA49534,AAAAAAAA44066} + 89 | {40,32,17,6,30,88} | {AA44673,AAAAAAAAAAA6119,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAAAAAA47955,AAAAAAAAAAAAAAAA33598,AAAAAAAAAAA33576,AA44673} + 98 | {38,34,32,89} | {AAAAAAAAAAAAAAAAAA71621,AAAA8857,AAAAAAAAAAAAAAAAAAA65037,AAAAAAAAAAAAAAAA31334,AAAAAAAAAA48845} + 100 | {85,32,57,39,49,84,32,3,30} | {AAAAAAA80240,AAAAAAAAAAAAAAAA1729,AAAAA60038,AAAAAAAAAAA92631,AAAAAAAA9523} +(11 rows) + +SELECT * FROM array_index_op_test WHERE i <@ '{38,34,32,89}' ORDER BY seqno; + seqno | i | t +-------+---------------+---------------------------------------------------------------------------------------------------------------------------- + 40 | {34} | {AAAAAAAAAAAAAA10611,AAAAAAAAAAAAAAAAAAA1205,AAAAAAAAAAA50956,AAAAAAAAAAAAAAAA31334,AAAAA70466,AAAAAAAA81587,AAAAAAA74623} + 74 | {32} | {AAAAAAAAAAAAAAAA1729,AAAAAAAAAAAAA22860,AAAAAA99807,AAAAA17383,AAAAAAAAAAAAAAA67062,AAAAAAAAAAA15165,AAAAAAAAAAA50956} + 98 | {38,34,32,89} | {AAAAAAAAAAAAAAAAAA71621,AAAA8857,AAAAAAAAAAAAAAAAAAA65037,AAAAAAAAAAAAAAAA31334,AAAAAAAAAA48845} +(3 rows) + +SELECT * FROM array_index_op_test WHERE i = '{47,77}' ORDER BY seqno; + seqno | i | t +-------+---------+----------------------------------------------------------------------------------------------------------------- + 95 | {47,77} | {AAAAAAAAAAAAAAAAA764,AAAAAAAAAAA74076,AAAAAAAAAA18107,AAAAA40681,AAAAAAAAAAAAAAA35875,AAAAA60038,AAAAAAA56483} +(1 row) + +CREATE INDEX textarrayidx ON array_index_op_test USING gin (t); +SELECT * FROM array_index_op_test WHERE t @> '{AAAAAAAA72908}' ORDER BY seqno; + seqno | i | t +-------+-----------------------+-------------------------------------------------------------------------------------------------------------------------------------------- + 22 | {11,6,56,62,53,30} | {AAAAAAAA72908} + 45 | {99,45} | {AAAAAAAA72908,AAAAAAAAAAAAAAAAAAA17075,AA88409,AAAAAAAAAAAAAAAAAA36842,AAAAAAA48038,AAAAAAAAAAAAAA10611} + 72 | {22,1,16,78,20,91,83} | {47735,AAAAAAA56483,AAAAAAAAAAAAA93788,AA42406,AAAAAAAAAAAAA73084,AAAAAAAA72908,AAAAAAAAAAAAAAAAAA61286,AAAAA66674,AAAAAAAAAAAAAAAAA50407} + 79 | {45} | {AAAAAAAAAA646,AAAAAAAAAAAAAAAAAAA70415,AAAAAA43678,AAAAAAAA72908} +(4 rows) + +SELECT * FROM array_index_op_test WHERE t && '{AAAAAAAA72908}' ORDER BY seqno; + seqno | i | t +-------+-----------------------+-------------------------------------------------------------------------------------------------------------------------------------------- + 22 | {11,6,56,62,53,30} | {AAAAAAAA72908} + 45 | {99,45} | {AAAAAAAA72908,AAAAAAAAAAAAAAAAAAA17075,AA88409,AAAAAAAAAAAAAAAAAA36842,AAAAAAA48038,AAAAAAAAAAAAAA10611} + 72 | {22,1,16,78,20,91,83} | {47735,AAAAAAA56483,AAAAAAAAAAAAA93788,AA42406,AAAAAAAAAAAAA73084,AAAAAAAA72908,AAAAAAAAAAAAAAAAAA61286,AAAAA66674,AAAAAAAAAAAAAAAAA50407} + 79 | {45} | {AAAAAAAAAA646,AAAAAAAAAAAAAAAAAAA70415,AAAAAA43678,AAAAAAAA72908} +(4 rows) + +SELECT * FROM array_index_op_test WHERE t @> '{AAAAAAAAAA646}' ORDER BY seqno; + seqno | i | t +-------+------------------+-------------------------------------------------------------------- + 15 | {17,14,16,63,67} | {AA6416,AAAAAAAAAA646,AAAAA95309} + 79 | {45} | {AAAAAAAAAA646,AAAAAAAAAAAAAAAAAAA70415,AAAAAA43678,AAAAAAAA72908} + 96 | {23,97,43} | {AAAAAAAAAA646,A87088} +(3 rows) + +SELECT * FROM array_index_op_test WHERE t && '{AAAAAAAAAA646}' ORDER BY seqno; + seqno | i | t +-------+------------------+-------------------------------------------------------------------- + 15 | {17,14,16,63,67} | {AA6416,AAAAAAAAAA646,AAAAA95309} + 79 | {45} | {AAAAAAAAAA646,AAAAAAAAAAAAAAAAAAA70415,AAAAAA43678,AAAAAAAA72908} + 96 | {23,97,43} | {AAAAAAAAAA646,A87088} +(3 rows) + +SELECT * FROM array_index_op_test WHERE t @> '{AAAAAAAA72908,AAAAAAAAAA646}' ORDER BY seqno; + seqno | i | t +-------+------+-------------------------------------------------------------------- + 79 | {45} | {AAAAAAAAAA646,AAAAAAAAAAAAAAAAAAA70415,AAAAAA43678,AAAAAAAA72908} +(1 row) + +SELECT * FROM array_index_op_test WHERE t && '{AAAAAAAA72908,AAAAAAAAAA646}' ORDER BY seqno; + seqno | i | t +-------+-----------------------+-------------------------------------------------------------------------------------------------------------------------------------------- + 15 | {17,14,16,63,67} | {AA6416,AAAAAAAAAA646,AAAAA95309} + 22 | {11,6,56,62,53,30} | {AAAAAAAA72908} + 45 | {99,45} | {AAAAAAAA72908,AAAAAAAAAAAAAAAAAAA17075,AA88409,AAAAAAAAAAAAAAAAAA36842,AAAAAAA48038,AAAAAAAAAAAAAA10611} + 72 | {22,1,16,78,20,91,83} | {47735,AAAAAAA56483,AAAAAAAAAAAAA93788,AA42406,AAAAAAAAAAAAA73084,AAAAAAAA72908,AAAAAAAAAAAAAAAAAA61286,AAAAA66674,AAAAAAAAAAAAAAAAA50407} + 79 | {45} | {AAAAAAAAAA646,AAAAAAAAAAAAAAAAAAA70415,AAAAAA43678,AAAAAAAA72908} + 96 | {23,97,43} | {AAAAAAAAAA646,A87088} +(6 rows) + +SELECT * FROM array_index_op_test WHERE t <@ '{AAAAAAAA72908,AAAAAAAAAAAAAAAAAAA17075,AA88409,AAAAAAAAAAAAAAAAAA36842,AAAAAAA48038,AAAAAAAAAAAAAA10611}' ORDER BY seqno; + seqno | i | t +-------+--------------------+----------------------------------------------------------------------------------------------------------- + 22 | {11,6,56,62,53,30} | {AAAAAAAA72908} + 45 | {99,45} | {AAAAAAAA72908,AAAAAAAAAAAAAAAAAAA17075,AA88409,AAAAAAAAAAAAAAAAAA36842,AAAAAAA48038,AAAAAAAAAAAAAA10611} +(2 rows) + +SELECT * FROM array_index_op_test WHERE t = '{AAAAAAAAAA646,A87088}' ORDER BY seqno; + seqno | i | t +-------+------------+------------------------ + 96 | {23,97,43} | {AAAAAAAAAA646,A87088} +(1 row) + +-- Repeat some of the above tests but exercising bitmapscans instead +SET enable_indexscan = OFF; +SET enable_bitmapscan = ON; +SELECT * FROM array_index_op_test WHERE i @> '{32}' ORDER BY seqno; + seqno | i | t +-------+---------------------------------+------------------------------------------------------------------------------------------------------------------------------------ + 6 | {39,35,5,94,17,92,60,32} | {AAAAAAAAAAAAAAA35875,AAAAAAAAAAAAAAAA23657} + 74 | {32} | {AAAAAAAAAAAAAAAA1729,AAAAAAAAAAAAA22860,AAAAAA99807,AAAAA17383,AAAAAAAAAAAAAAA67062,AAAAAAAAAAA15165,AAAAAAAAAAA50956} + 77 | {97,15,32,17,55,59,18,37,50,39} | {AAAAAAAAAAAA67946,AAAAAA54032,AAAAAAAA81587,55847,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAAAAA43052,AAAAAA75463,AAAA49534,AAAAAAAA44066} + 89 | {40,32,17,6,30,88} | {AA44673,AAAAAAAAAAA6119,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAAAAAA47955,AAAAAAAAAAAAAAAA33598,AAAAAAAAAAA33576,AA44673} + 98 | {38,34,32,89} | {AAAAAAAAAAAAAAAAAA71621,AAAA8857,AAAAAAAAAAAAAAAAAAA65037,AAAAAAAAAAAAAAAA31334,AAAAAAAAAA48845} + 100 | {85,32,57,39,49,84,32,3,30} | {AAAAAAA80240,AAAAAAAAAAAAAAAA1729,AAAAA60038,AAAAAAAAAAA92631,AAAAAAAA9523} +(6 rows) + +SELECT * FROM array_index_op_test WHERE i && '{32}' ORDER BY seqno; + seqno | i | t +-------+---------------------------------+------------------------------------------------------------------------------------------------------------------------------------ + 6 | {39,35,5,94,17,92,60,32} | {AAAAAAAAAAAAAAA35875,AAAAAAAAAAAAAAAA23657} + 74 | {32} | {AAAAAAAAAAAAAAAA1729,AAAAAAAAAAAAA22860,AAAAAA99807,AAAAA17383,AAAAAAAAAAAAAAA67062,AAAAAAAAAAA15165,AAAAAAAAAAA50956} + 77 | {97,15,32,17,55,59,18,37,50,39} | {AAAAAAAAAAAA67946,AAAAAA54032,AAAAAAAA81587,55847,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAAAAA43052,AAAAAA75463,AAAA49534,AAAAAAAA44066} + 89 | {40,32,17,6,30,88} | {AA44673,AAAAAAAAAAA6119,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAAAAAA47955,AAAAAAAAAAAAAAAA33598,AAAAAAAAAAA33576,AA44673} + 98 | {38,34,32,89} | {AAAAAAAAAAAAAAAAAA71621,AAAA8857,AAAAAAAAAAAAAAAAAAA65037,AAAAAAAAAAAAAAAA31334,AAAAAAAAAA48845} + 100 | {85,32,57,39,49,84,32,3,30} | {AAAAAAA80240,AAAAAAAAAAAAAAAA1729,AAAAA60038,AAAAAAAAAAA92631,AAAAAAAA9523} +(6 rows) + +SELECT * FROM array_index_op_test WHERE i @> '{17}' ORDER BY seqno; + seqno | i | t +-------+---------------------------------+------------------------------------------------------------------------------------------------------------------------------------ + 6 | {39,35,5,94,17,92,60,32} | {AAAAAAAAAAAAAAA35875,AAAAAAAAAAAAAAAA23657} + 12 | {17,99,18,52,91,72,0,43,96,23} | {AAAAA33250,AAAAAAAAAAAAAAAAAAA85420,AAAAAAAAAAA33576} + 15 | {17,14,16,63,67} | {AA6416,AAAAAAAAAA646,AAAAA95309} + 19 | {52,82,17,74,23,46,69,51,75} | {AAAAAAAAAAAAA73084,AAAAA75968,AAAAAAAAAAAAAAAA14047,AAAAAAA80240,AAAAAAAAAAAAAAAAAAA1205,A68938} + 53 | {38,17} | {AAAAAAAAAAA21658} + 65 | {61,5,76,59,17} | {AAAAAA99807,AAAAA64741,AAAAAAAAAAA53908,AA21643,AAAAAAAAA10012} + 77 | {97,15,32,17,55,59,18,37,50,39} | {AAAAAAAAAAAA67946,AAAAAA54032,AAAAAAAA81587,55847,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAAAAA43052,AAAAAA75463,AAAA49534,AAAAAAAA44066} + 89 | {40,32,17,6,30,88} | {AA44673,AAAAAAAAAAA6119,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAAAAAA47955,AAAAAAAAAAAAAAAA33598,AAAAAAAAAAA33576,AA44673} +(8 rows) + +SELECT * FROM array_index_op_test WHERE i && '{17}' ORDER BY seqno; + seqno | i | t +-------+---------------------------------+------------------------------------------------------------------------------------------------------------------------------------ + 6 | {39,35,5,94,17,92,60,32} | {AAAAAAAAAAAAAAA35875,AAAAAAAAAAAAAAAA23657} + 12 | {17,99,18,52,91,72,0,43,96,23} | {AAAAA33250,AAAAAAAAAAAAAAAAAAA85420,AAAAAAAAAAA33576} + 15 | {17,14,16,63,67} | {AA6416,AAAAAAAAAA646,AAAAA95309} + 19 | {52,82,17,74,23,46,69,51,75} | {AAAAAAAAAAAAA73084,AAAAA75968,AAAAAAAAAAAAAAAA14047,AAAAAAA80240,AAAAAAAAAAAAAAAAAAA1205,A68938} + 53 | {38,17} | {AAAAAAAAAAA21658} + 65 | {61,5,76,59,17} | {AAAAAA99807,AAAAA64741,AAAAAAAAAAA53908,AA21643,AAAAAAAAA10012} + 77 | {97,15,32,17,55,59,18,37,50,39} | {AAAAAAAAAAAA67946,AAAAAA54032,AAAAAAAA81587,55847,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAAAAA43052,AAAAAA75463,AAAA49534,AAAAAAAA44066} + 89 | {40,32,17,6,30,88} | {AA44673,AAAAAAAAAAA6119,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAAAAAA47955,AAAAAAAAAAAAAAAA33598,AAAAAAAAAAA33576,AA44673} +(8 rows) + +SELECT * FROM array_index_op_test WHERE i @> '{32,17}' ORDER BY seqno; + seqno | i | t +-------+---------------------------------+------------------------------------------------------------------------------------------------------------------------------------ + 6 | {39,35,5,94,17,92,60,32} | {AAAAAAAAAAAAAAA35875,AAAAAAAAAAAAAAAA23657} + 77 | {97,15,32,17,55,59,18,37,50,39} | {AAAAAAAAAAAA67946,AAAAAA54032,AAAAAAAA81587,55847,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAAAAA43052,AAAAAA75463,AAAA49534,AAAAAAAA44066} + 89 | {40,32,17,6,30,88} | {AA44673,AAAAAAAAAAA6119,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAAAAAA47955,AAAAAAAAAAAAAAAA33598,AAAAAAAAAAA33576,AA44673} +(3 rows) + +SELECT * FROM array_index_op_test WHERE i && '{32,17}' ORDER BY seqno; + seqno | i | t +-------+---------------------------------+------------------------------------------------------------------------------------------------------------------------------------ + 6 | {39,35,5,94,17,92,60,32} | {AAAAAAAAAAAAAAA35875,AAAAAAAAAAAAAAAA23657} + 12 | {17,99,18,52,91,72,0,43,96,23} | {AAAAA33250,AAAAAAAAAAAAAAAAAAA85420,AAAAAAAAAAA33576} + 15 | {17,14,16,63,67} | {AA6416,AAAAAAAAAA646,AAAAA95309} + 19 | {52,82,17,74,23,46,69,51,75} | {AAAAAAAAAAAAA73084,AAAAA75968,AAAAAAAAAAAAAAAA14047,AAAAAAA80240,AAAAAAAAAAAAAAAAAAA1205,A68938} + 53 | {38,17} | {AAAAAAAAAAA21658} + 65 | {61,5,76,59,17} | {AAAAAA99807,AAAAA64741,AAAAAAAAAAA53908,AA21643,AAAAAAAAA10012} + 74 | {32} | {AAAAAAAAAAAAAAAA1729,AAAAAAAAAAAAA22860,AAAAAA99807,AAAAA17383,AAAAAAAAAAAAAAA67062,AAAAAAAAAAA15165,AAAAAAAAAAA50956} + 77 | {97,15,32,17,55,59,18,37,50,39} | {AAAAAAAAAAAA67946,AAAAAA54032,AAAAAAAA81587,55847,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAAAAA43052,AAAAAA75463,AAAA49534,AAAAAAAA44066} + 89 | {40,32,17,6,30,88} | {AA44673,AAAAAAAAAAA6119,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAAAAAA47955,AAAAAAAAAAAAAAAA33598,AAAAAAAAAAA33576,AA44673} + 98 | {38,34,32,89} | {AAAAAAAAAAAAAAAAAA71621,AAAA8857,AAAAAAAAAAAAAAAAAAA65037,AAAAAAAAAAAAAAAA31334,AAAAAAAAAA48845} + 100 | {85,32,57,39,49,84,32,3,30} | {AAAAAAA80240,AAAAAAAAAAAAAAAA1729,AAAAA60038,AAAAAAAAAAA92631,AAAAAAAA9523} +(11 rows) + +SELECT * FROM array_index_op_test WHERE i <@ '{38,34,32,89}' ORDER BY seqno; + seqno | i | t +-------+---------------+---------------------------------------------------------------------------------------------------------------------------- + 40 | {34} | {AAAAAAAAAAAAAA10611,AAAAAAAAAAAAAAAAAAA1205,AAAAAAAAAAA50956,AAAAAAAAAAAAAAAA31334,AAAAA70466,AAAAAAAA81587,AAAAAAA74623} + 74 | {32} | {AAAAAAAAAAAAAAAA1729,AAAAAAAAAAAAA22860,AAAAAA99807,AAAAA17383,AAAAAAAAAAAAAAA67062,AAAAAAAAAAA15165,AAAAAAAAAAA50956} + 98 | {38,34,32,89} | {AAAAAAAAAAAAAAAAAA71621,AAAA8857,AAAAAAAAAAAAAAAAAAA65037,AAAAAAAAAAAAAAAA31334,AAAAAAAAAA48845} +(3 rows) + +SELECT * FROM array_index_op_test WHERE i = '{47,77}' ORDER BY seqno; + seqno | i | t +-------+---------+----------------------------------------------------------------------------------------------------------------- + 95 | {47,77} | {AAAAAAAAAAAAAAAAA764,AAAAAAAAAAA74076,AAAAAAAAAA18107,AAAAA40681,AAAAAAAAAAAAAAA35875,AAAAA60038,AAAAAAA56483} +(1 row) + +-- And try it with a multicolumn GIN index +DROP INDEX intarrayidx, textarrayidx; +CREATE INDEX botharrayidx ON array_index_op_test USING gin (i, t); +SET enable_seqscan = OFF; +SET enable_indexscan = ON; +SET enable_bitmapscan = OFF; +SELECT * FROM array_index_op_test WHERE i @> '{32}' ORDER BY seqno; + seqno | i | t +-------+---------------------------------+------------------------------------------------------------------------------------------------------------------------------------ + 6 | {39,35,5,94,17,92,60,32} | {AAAAAAAAAAAAAAA35875,AAAAAAAAAAAAAAAA23657} + 74 | {32} | {AAAAAAAAAAAAAAAA1729,AAAAAAAAAAAAA22860,AAAAAA99807,AAAAA17383,AAAAAAAAAAAAAAA67062,AAAAAAAAAAA15165,AAAAAAAAAAA50956} + 77 | {97,15,32,17,55,59,18,37,50,39} | {AAAAAAAAAAAA67946,AAAAAA54032,AAAAAAAA81587,55847,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAAAAA43052,AAAAAA75463,AAAA49534,AAAAAAAA44066} + 89 | {40,32,17,6,30,88} | {AA44673,AAAAAAAAAAA6119,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAAAAAA47955,AAAAAAAAAAAAAAAA33598,AAAAAAAAAAA33576,AA44673} + 98 | {38,34,32,89} | {AAAAAAAAAAAAAAAAAA71621,AAAA8857,AAAAAAAAAAAAAAAAAAA65037,AAAAAAAAAAAAAAAA31334,AAAAAAAAAA48845} + 100 | {85,32,57,39,49,84,32,3,30} | {AAAAAAA80240,AAAAAAAAAAAAAAAA1729,AAAAA60038,AAAAAAAAAAA92631,AAAAAAAA9523} +(6 rows) + +SELECT * FROM array_index_op_test WHERE i && '{32}' ORDER BY seqno; + seqno | i | t +-------+---------------------------------+------------------------------------------------------------------------------------------------------------------------------------ + 6 | {39,35,5,94,17,92,60,32} | {AAAAAAAAAAAAAAA35875,AAAAAAAAAAAAAAAA23657} + 74 | {32} | {AAAAAAAAAAAAAAAA1729,AAAAAAAAAAAAA22860,AAAAAA99807,AAAAA17383,AAAAAAAAAAAAAAA67062,AAAAAAAAAAA15165,AAAAAAAAAAA50956} + 77 | {97,15,32,17,55,59,18,37,50,39} | {AAAAAAAAAAAA67946,AAAAAA54032,AAAAAAAA81587,55847,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAAAAA43052,AAAAAA75463,AAAA49534,AAAAAAAA44066} + 89 | {40,32,17,6,30,88} | {AA44673,AAAAAAAAAAA6119,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAAAAAA47955,AAAAAAAAAAAAAAAA33598,AAAAAAAAAAA33576,AA44673} + 98 | {38,34,32,89} | {AAAAAAAAAAAAAAAAAA71621,AAAA8857,AAAAAAAAAAAAAAAAAAA65037,AAAAAAAAAAAAAAAA31334,AAAAAAAAAA48845} + 100 | {85,32,57,39,49,84,32,3,30} | {AAAAAAA80240,AAAAAAAAAAAAAAAA1729,AAAAA60038,AAAAAAAAAAA92631,AAAAAAAA9523} +(6 rows) + +SELECT * FROM array_index_op_test WHERE t @> '{AAAAAAA80240}' ORDER BY seqno; + seqno | i | t +-------+--------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------- + 19 | {52,82,17,74,23,46,69,51,75} | {AAAAAAAAAAAAA73084,AAAAA75968,AAAAAAAAAAAAAAAA14047,AAAAAAA80240,AAAAAAAAAAAAAAAAAAA1205,A68938} + 30 | {26,81,47,91,34} | {AAAAAAAAAAAAAAAAAAA70104,AAAAAAA80240} + 64 | {26,19,34,24,81,78} | {A96617,AAAAAAAAAAAAAAAAAAA70104,A68938,AAAAAAAAAAA53908,AAAAAAAAAAAAAAA453,AA17009,AAAAAAA80240} + 82 | {34,60,4,79,78,16,86,89,42,50} | {AAAAA40681,AAAAAAAAAAAAAAAAAA12591,AAAAAAA80240,AAAAAAAAAAAAAAAA55798,AAAAAAAAAAAAAAAAAAA70104} + 88 | {41,90,77,24,6,24} | {AAAA35194,AAAA35194,AAAAAAA80240,AAAAAAAAAAA46154,AAAAAA58494,AAAAAAAAAAAAAAAAAAA17075,AAAAAAAAAAAAAAAAAA59334,AAAAAAAAAAAAAAAAAAA91804,AA74433} + 97 | {54,2,86,65} | {47735,AAAAAAA99836,AAAAAAAAAAAAAAAAA6897,AAAAAAAAAAAAAAAA29150,AAAAAAA80240,AAAAAAAAAAAAAAAA98414,AAAAAAA56483,AAAAAAAAAAAAAAAA29150,AAAAAAA39692,AA21643} + 100 | {85,32,57,39,49,84,32,3,30} | {AAAAAAA80240,AAAAAAAAAAAAAAAA1729,AAAAA60038,AAAAAAAAAAA92631,AAAAAAAA9523} +(7 rows) + +SELECT * FROM array_index_op_test WHERE t && '{AAAAAAA80240}' ORDER BY seqno; + seqno | i | t +-------+--------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------- + 19 | {52,82,17,74,23,46,69,51,75} | {AAAAAAAAAAAAA73084,AAAAA75968,AAAAAAAAAAAAAAAA14047,AAAAAAA80240,AAAAAAAAAAAAAAAAAAA1205,A68938} + 30 | {26,81,47,91,34} | {AAAAAAAAAAAAAAAAAAA70104,AAAAAAA80240} + 64 | {26,19,34,24,81,78} | {A96617,AAAAAAAAAAAAAAAAAAA70104,A68938,AAAAAAAAAAA53908,AAAAAAAAAAAAAAA453,AA17009,AAAAAAA80240} + 82 | {34,60,4,79,78,16,86,89,42,50} | {AAAAA40681,AAAAAAAAAAAAAAAAAA12591,AAAAAAA80240,AAAAAAAAAAAAAAAA55798,AAAAAAAAAAAAAAAAAAA70104} + 88 | {41,90,77,24,6,24} | {AAAA35194,AAAA35194,AAAAAAA80240,AAAAAAAAAAA46154,AAAAAA58494,AAAAAAAAAAAAAAAAAAA17075,AAAAAAAAAAAAAAAAAA59334,AAAAAAAAAAAAAAAAAAA91804,AA74433} + 97 | {54,2,86,65} | {47735,AAAAAAA99836,AAAAAAAAAAAAAAAAA6897,AAAAAAAAAAAAAAAA29150,AAAAAAA80240,AAAAAAAAAAAAAAAA98414,AAAAAAA56483,AAAAAAAAAAAAAAAA29150,AAAAAAA39692,AA21643} + 100 | {85,32,57,39,49,84,32,3,30} | {AAAAAAA80240,AAAAAAAAAAAAAAAA1729,AAAAA60038,AAAAAAAAAAA92631,AAAAAAAA9523} +(7 rows) + +SELECT * FROM array_index_op_test WHERE i @> '{32}' AND t && '{AAAAAAA80240}' ORDER BY seqno; + seqno | i | t +-------+-----------------------------+------------------------------------------------------------------------------ + 100 | {85,32,57,39,49,84,32,3,30} | {AAAAAAA80240,AAAAAAAAAAAAAAAA1729,AAAAA60038,AAAAAAAAAAA92631,AAAAAAAA9523} +(1 row) + +SELECT * FROM array_index_op_test WHERE i && '{32}' AND t @> '{AAAAAAA80240}' ORDER BY seqno; + seqno | i | t +-------+-----------------------------+------------------------------------------------------------------------------ + 100 | {85,32,57,39,49,84,32,3,30} | {AAAAAAA80240,AAAAAAAAAAAAAAAA1729,AAAAA60038,AAAAAAAAAAA92631,AAAAAAAA9523} +(1 row) + +SET enable_indexscan = OFF; +SET enable_bitmapscan = ON; +SELECT * FROM array_index_op_test WHERE i @> '{32}' ORDER BY seqno; + seqno | i | t +-------+---------------------------------+------------------------------------------------------------------------------------------------------------------------------------ + 6 | {39,35,5,94,17,92,60,32} | {AAAAAAAAAAAAAAA35875,AAAAAAAAAAAAAAAA23657} + 74 | {32} | {AAAAAAAAAAAAAAAA1729,AAAAAAAAAAAAA22860,AAAAAA99807,AAAAA17383,AAAAAAAAAAAAAAA67062,AAAAAAAAAAA15165,AAAAAAAAAAA50956} + 77 | {97,15,32,17,55,59,18,37,50,39} | {AAAAAAAAAAAA67946,AAAAAA54032,AAAAAAAA81587,55847,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAAAAA43052,AAAAAA75463,AAAA49534,AAAAAAAA44066} + 89 | {40,32,17,6,30,88} | {AA44673,AAAAAAAAAAA6119,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAAAAAA47955,AAAAAAAAAAAAAAAA33598,AAAAAAAAAAA33576,AA44673} + 98 | {38,34,32,89} | {AAAAAAAAAAAAAAAAAA71621,AAAA8857,AAAAAAAAAAAAAAAAAAA65037,AAAAAAAAAAAAAAAA31334,AAAAAAAAAA48845} + 100 | {85,32,57,39,49,84,32,3,30} | {AAAAAAA80240,AAAAAAAAAAAAAAAA1729,AAAAA60038,AAAAAAAAAAA92631,AAAAAAAA9523} +(6 rows) + +SELECT * FROM array_index_op_test WHERE i && '{32}' ORDER BY seqno; + seqno | i | t +-------+---------------------------------+------------------------------------------------------------------------------------------------------------------------------------ + 6 | {39,35,5,94,17,92,60,32} | {AAAAAAAAAAAAAAA35875,AAAAAAAAAAAAAAAA23657} + 74 | {32} | {AAAAAAAAAAAAAAAA1729,AAAAAAAAAAAAA22860,AAAAAA99807,AAAAA17383,AAAAAAAAAAAAAAA67062,AAAAAAAAAAA15165,AAAAAAAAAAA50956} + 77 | {97,15,32,17,55,59,18,37,50,39} | {AAAAAAAAAAAA67946,AAAAAA54032,AAAAAAAA81587,55847,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAAAAA43052,AAAAAA75463,AAAA49534,AAAAAAAA44066} + 89 | {40,32,17,6,30,88} | {AA44673,AAAAAAAAAAA6119,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAAAAAA47955,AAAAAAAAAAAAAAAA33598,AAAAAAAAAAA33576,AA44673} + 98 | {38,34,32,89} | {AAAAAAAAAAAAAAAAAA71621,AAAA8857,AAAAAAAAAAAAAAAAAAA65037,AAAAAAAAAAAAAAAA31334,AAAAAAAAAA48845} + 100 | {85,32,57,39,49,84,32,3,30} | {AAAAAAA80240,AAAAAAAAAAAAAAAA1729,AAAAA60038,AAAAAAAAAAA92631,AAAAAAAA9523} +(6 rows) + +SELECT * FROM array_index_op_test WHERE t @> '{AAAAAAA80240}' ORDER BY seqno; + seqno | i | t +-------+--------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------- + 19 | {52,82,17,74,23,46,69,51,75} | {AAAAAAAAAAAAA73084,AAAAA75968,AAAAAAAAAAAAAAAA14047,AAAAAAA80240,AAAAAAAAAAAAAAAAAAA1205,A68938} + 30 | {26,81,47,91,34} | {AAAAAAAAAAAAAAAAAAA70104,AAAAAAA80240} + 64 | {26,19,34,24,81,78} | {A96617,AAAAAAAAAAAAAAAAAAA70104,A68938,AAAAAAAAAAA53908,AAAAAAAAAAAAAAA453,AA17009,AAAAAAA80240} + 82 | {34,60,4,79,78,16,86,89,42,50} | {AAAAA40681,AAAAAAAAAAAAAAAAAA12591,AAAAAAA80240,AAAAAAAAAAAAAAAA55798,AAAAAAAAAAAAAAAAAAA70104} + 88 | {41,90,77,24,6,24} | {AAAA35194,AAAA35194,AAAAAAA80240,AAAAAAAAAAA46154,AAAAAA58494,AAAAAAAAAAAAAAAAAAA17075,AAAAAAAAAAAAAAAAAA59334,AAAAAAAAAAAAAAAAAAA91804,AA74433} + 97 | {54,2,86,65} | {47735,AAAAAAA99836,AAAAAAAAAAAAAAAAA6897,AAAAAAAAAAAAAAAA29150,AAAAAAA80240,AAAAAAAAAAAAAAAA98414,AAAAAAA56483,AAAAAAAAAAAAAAAA29150,AAAAAAA39692,AA21643} + 100 | {85,32,57,39,49,84,32,3,30} | {AAAAAAA80240,AAAAAAAAAAAAAAAA1729,AAAAA60038,AAAAAAAAAAA92631,AAAAAAAA9523} +(7 rows) + +SELECT * FROM array_index_op_test WHERE t && '{AAAAAAA80240}' ORDER BY seqno; + seqno | i | t +-------+--------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------- + 19 | {52,82,17,74,23,46,69,51,75} | {AAAAAAAAAAAAA73084,AAAAA75968,AAAAAAAAAAAAAAAA14047,AAAAAAA80240,AAAAAAAAAAAAAAAAAAA1205,A68938} + 30 | {26,81,47,91,34} | {AAAAAAAAAAAAAAAAAAA70104,AAAAAAA80240} + 64 | {26,19,34,24,81,78} | {A96617,AAAAAAAAAAAAAAAAAAA70104,A68938,AAAAAAAAAAA53908,AAAAAAAAAAAAAAA453,AA17009,AAAAAAA80240} + 82 | {34,60,4,79,78,16,86,89,42,50} | {AAAAA40681,AAAAAAAAAAAAAAAAAA12591,AAAAAAA80240,AAAAAAAAAAAAAAAA55798,AAAAAAAAAAAAAAAAAAA70104} + 88 | {41,90,77,24,6,24} | {AAAA35194,AAAA35194,AAAAAAA80240,AAAAAAAAAAA46154,AAAAAA58494,AAAAAAAAAAAAAAAAAAA17075,AAAAAAAAAAAAAAAAAA59334,AAAAAAAAAAAAAAAAAAA91804,AA74433} + 97 | {54,2,86,65} | {47735,AAAAAAA99836,AAAAAAAAAAAAAAAAA6897,AAAAAAAAAAAAAAAA29150,AAAAAAA80240,AAAAAAAAAAAAAAAA98414,AAAAAAA56483,AAAAAAAAAAAAAAAA29150,AAAAAAA39692,AA21643} + 100 | {85,32,57,39,49,84,32,3,30} | {AAAAAAA80240,AAAAAAAAAAAAAAAA1729,AAAAA60038,AAAAAAAAAAA92631,AAAAAAAA9523} +(7 rows) + +SELECT * FROM array_index_op_test WHERE i @> '{32}' AND t && '{AAAAAAA80240}' ORDER BY seqno; + seqno | i | t +-------+-----------------------------+------------------------------------------------------------------------------ + 100 | {85,32,57,39,49,84,32,3,30} | {AAAAAAA80240,AAAAAAAAAAAAAAAA1729,AAAAA60038,AAAAAAAAAAA92631,AAAAAAAA9523} +(1 row) + +SELECT * FROM array_index_op_test WHERE i && '{32}' AND t @> '{AAAAAAA80240}' ORDER BY seqno; + seqno | i | t +-------+-----------------------------+------------------------------------------------------------------------------ + 100 | {85,32,57,39,49,84,32,3,30} | {AAAAAAA80240,AAAAAAAAAAAAAAAA1729,AAAAA60038,AAAAAAAAAAA92631,AAAAAAAA9523} +(1 row) + +RESET enable_seqscan; +RESET enable_indexscan; +RESET enable_bitmapscan; +-- +-- HASH +-- +CREATE INDEX hash_i4_index ON hash_i4_heap USING hash (random int4_ops); +CREATE INDEX hash_name_index ON hash_name_heap USING hash (random name_ops); +CREATE INDEX hash_txt_index ON hash_txt_heap USING hash (random text_ops); +CREATE INDEX hash_f8_index ON hash_f8_heap USING hash (random float8_ops); +-- CREATE INDEX hash_ovfl_index ON hash_ovfl_heap USING hash (x int4_ops); +-- +-- Test functional index +-- +CREATE TABLE func_index_heap (f1 text, f2 text); +CREATE UNIQUE INDEX func_index_index on func_index_heap (textcat(f1,f2)); +ERROR: Cannot locally enforce a unique index on round robin distributed table. +INSERT INTO func_index_heap VALUES('ABC','DEF'); +INSERT INTO func_index_heap VALUES('AB','CDEFG'); +INSERT INTO func_index_heap VALUES('QWE','RTY'); +-- this should fail because of unique index: +INSERT INTO func_index_heap VALUES('ABCD', 'EF'); +-- but this shouldn't: +INSERT INTO func_index_heap VALUES('QWERTY'); +-- +-- Same test, expressional index +-- +DROP TABLE func_index_heap; +CREATE TABLE func_index_heap (f1 text, f2 text); +CREATE UNIQUE INDEX func_index_index on func_index_heap ((f1 || f2) text_ops); +ERROR: Cannot locally enforce a unique index on round robin distributed table. +INSERT INTO func_index_heap VALUES('ABC','DEF'); +INSERT INTO func_index_heap VALUES('AB','CDEFG'); +INSERT INTO func_index_heap VALUES('QWE','RTY'); +-- this should fail because of unique index: +INSERT INTO func_index_heap VALUES('ABCD', 'EF'); +-- but this shouldn't: +INSERT INTO func_index_heap VALUES('QWERTY'); +-- +-- Also try building functional, expressional, and partial indexes on +-- tables that already contain data. +-- +create unique index hash_f8_index_1 on hash_f8_heap(abs(random)); +ERROR: Unique index of partitioned table must contain the hash/modulo distribution column. +create unique index hash_f8_index_2 on hash_f8_heap((seqno + 1), random); +ERROR: Unique index of partitioned table must contain the hash/modulo distribution column. +create unique index hash_f8_index_3 on hash_f8_heap(random) where seqno > 1000; +ERROR: Unique index of partitioned table must contain the hash/modulo distribution column. +-- +-- Try some concurrent index builds +-- +-- Unfortunately this only tests about half the code paths because there are +-- no concurrent updates happening to the table at the same time. +CREATE TABLE concur_heap (f1 text, f2 text); +-- empty table +CREATE INDEX CONCURRENTLY concur_index1 ON concur_heap(f2,f1); +ERROR: PGXC does not support concurrent INDEX yet +DETAIL: The feature is not currently supported +INSERT INTO concur_heap VALUES ('a','b'); +INSERT INTO concur_heap VALUES ('b','b'); +-- unique index +CREATE UNIQUE INDEX CONCURRENTLY concur_index2 ON concur_heap(f1); +ERROR: PGXC does not support concurrent INDEX yet +DETAIL: The feature is not currently supported +-- check if constraint is set up properly to be enforced +INSERT INTO concur_heap VALUES ('b','x'); +-- check if constraint is enforced properly at build time +CREATE UNIQUE INDEX CONCURRENTLY concur_index3 ON concur_heap(f2); +ERROR: PGXC does not support concurrent INDEX yet +DETAIL: The feature is not currently supported +-- test that expression indexes and partial indexes work concurrently +CREATE INDEX CONCURRENTLY concur_index4 on concur_heap(f2) WHERE f1='a'; +ERROR: PGXC does not support concurrent INDEX yet +DETAIL: The feature is not currently supported +CREATE INDEX CONCURRENTLY concur_index5 on concur_heap(f2) WHERE f1='x'; +ERROR: PGXC does not support concurrent INDEX yet +DETAIL: The feature is not currently supported +-- here we also check that you can default the index name +CREATE INDEX CONCURRENTLY on concur_heap((f2||f1)); +ERROR: PGXC does not support concurrent INDEX yet +DETAIL: The feature is not currently supported +-- You can't do a concurrent index build in a transaction +BEGIN; +CREATE INDEX CONCURRENTLY concur_index7 ON concur_heap(f1); +ERROR: PGXC does not support concurrent INDEX yet +DETAIL: The feature is not currently supported +COMMIT; +-- But you can do a regular index build in a transaction +BEGIN; +CREATE INDEX std_index on concur_heap(f2); +COMMIT; +-- check to make sure that the failed indexes were cleaned up properly and the +-- successful indexes are created properly. Notably that they do NOT have the +-- "invalid" flag set. +\d concur_heap +Table "public.concur_heap" + Column | Type | Modifiers +--------+------+----------- + f1 | text | + f2 | text | +Indexes: + "std_index" btree (f2) + +DROP TABLE concur_heap; +-- +-- Tests for IS NULL/IS NOT NULL with b-tree indexes +-- +SELECT unique1, unique2 INTO onek_with_null FROM onek; +ERROR: INTO clause not yet supported +INSERT INTO onek_with_null (unique1,unique2) VALUES (NULL, -1), (NULL, NULL); +ERROR: relation "onek_with_null" does not exist +LINE 1: INSERT INTO onek_with_null (unique1,unique2) VALUES (NULL, -... + ^ +CREATE UNIQUE INDEX onek_nulltest ON onek_with_null (unique2,unique1); +ERROR: relation "onek_with_null" does not exist +SET enable_seqscan = OFF; +SET enable_indexscan = ON; +SET enable_bitmapscan = ON; +SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL; +ERROR: relation "onek_with_null" does not exist +LINE 1: SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL; + ^ +SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL AND unique2 IS NULL; +ERROR: relation "onek_with_null" does not exist +LINE 1: SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL AN... + ^ +SELECT count(*) FROM onek_with_null WHERE unique1 IS NOT NULL; +ERROR: relation "onek_with_null" does not exist +LINE 1: SELECT count(*) FROM onek_with_null WHERE unique1 IS NOT NUL... + ^ +SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL AND unique2 IS NOT NULL; +ERROR: relation "onek_with_null" does not exist +LINE 1: SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL AN... + ^ +DROP INDEX onek_nulltest; +ERROR: index "onek_nulltest" does not exist +CREATE UNIQUE INDEX onek_nulltest ON onek_with_null (unique2 desc,unique1); +ERROR: relation "onek_with_null" does not exist +SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL; +ERROR: relation "onek_with_null" does not exist +LINE 1: SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL; + ^ +SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL AND unique2 IS NULL; +ERROR: relation "onek_with_null" does not exist +LINE 1: SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL AN... + ^ +SELECT count(*) FROM onek_with_null WHERE unique1 IS NOT NULL; +ERROR: relation "onek_with_null" does not exist +LINE 1: SELECT count(*) FROM onek_with_null WHERE unique1 IS NOT NUL... + ^ +SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL AND unique2 IS NOT NULL; +ERROR: relation "onek_with_null" does not exist +LINE 1: SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL AN... + ^ +DROP INDEX onek_nulltest; +ERROR: index "onek_nulltest" does not exist +CREATE UNIQUE INDEX onek_nulltest ON onek_with_null (unique2 desc nulls last,unique1); +ERROR: relation "onek_with_null" does not exist +SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL; +ERROR: relation "onek_with_null" does not exist +LINE 1: SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL; + ^ +SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL AND unique2 IS NULL; +ERROR: relation "onek_with_null" does not exist +LINE 1: SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL AN... + ^ +SELECT count(*) FROM onek_with_null WHERE unique1 IS NOT NULL; +ERROR: relation "onek_with_null" does not exist +LINE 1: SELECT count(*) FROM onek_with_null WHERE unique1 IS NOT NUL... + ^ +SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL AND unique2 IS NOT NULL; +ERROR: relation "onek_with_null" does not exist +LINE 1: SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL AN... + ^ +DROP INDEX onek_nulltest; +ERROR: index "onek_nulltest" does not exist +CREATE UNIQUE INDEX onek_nulltest ON onek_with_null (unique2 nulls first,unique1); +ERROR: relation "onek_with_null" does not exist +SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL; +ERROR: relation "onek_with_null" does not exist +LINE 1: SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL; + ^ +SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL AND unique2 IS NULL; +ERROR: relation "onek_with_null" does not exist +LINE 1: SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL AN... + ^ +SELECT count(*) FROM onek_with_null WHERE unique1 IS NOT NULL; +ERROR: relation "onek_with_null" does not exist +LINE 1: SELECT count(*) FROM onek_with_null WHERE unique1 IS NOT NUL... + ^ +SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL AND unique2 IS NOT NULL; +ERROR: relation "onek_with_null" does not exist +LINE 1: SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL AN... + ^ +RESET enable_seqscan; +RESET enable_indexscan; +RESET enable_bitmapscan; + +DROP TABLE onek_with_null; +ERROR: table "onek_with_null" does not exist ----------------------------------------------------------------------- Summary of changes: .../{create_index.out => create_index_1.out} | 428 +++++++++----------- 1 files changed, 182 insertions(+), 246 deletions(-) copy src/test/regress/expected/{create_index.out => create_index_1.out} (88%) hooks/post-receive -- Postgres-XC |
From: Michael P. <mic...@us...> - 2011-04-08 08:54:42
|
Project "Postgres-XC". The branch, master has been updated via 9a329381083f831459db12cdd6616552c4571c5b (commit) from 7ebac29189aaa205c18e21580ef88f98ac262c43 (commit) - Log ----------------------------------------------------------------- commit 9a329381083f831459db12cdd6616552c4571c5b Author: Michael P <mic...@us...> Date: Fri Apr 8 17:49:52 2011 +0900 Change license of Postgres-XC to BSD License of versions 0.9 to 0.9.3 was LGPL. diff --git a/COPYRIGHT b/COPYRIGHT index b773b4d..18259d0 100644 --- a/COPYRIGHT +++ b/COPYRIGHT @@ -1,6 +1,6 @@ -PostgreSQL Database Management System -(formerly known as Postgres, then as Postgres95) +Postgres-XC Cluster Database Management System +Portions Copyright (c) 2010-2011, Nippon Telegraph and Telephone Corporation Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group Portions Copyright (c) 1994, The Regents of the University of California @@ -21,3 +21,15 @@ INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. + +IN NO EVENT SHALL POSTGRESQL GLOBAL DEVELOPMENT GROUP BE LIABLE TO ANY +PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS +SOFTWARE AND ITS DOCUMENTATION, EVEN IF POSTGRESQL GLOBAL DEVELOPMENT +GROUP HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +POSTGRESQL GLOBAL DEVELOPMENT GROUP SPECIFICALLY DISCLAIMS ANY WARRANTIES, +INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY +AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS +ON AN "AS IS" BASIS, AND THE POSTGRESQL GLOBAL DEVELOPMENT GROUP HAS NO OBLIGATIONS TO +PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. ----------------------------------------------------------------------- Summary of changes: COPYRIGHT | 16 ++++++++++++++-- 1 files changed, 14 insertions(+), 2 deletions(-) hooks/post-receive -- Postgres-XC |
From: Michael P. <mic...@us...> - 2011-04-08 08:34:15
|
Project "Postgres-XC". The branch, master has been updated via 7ebac29189aaa205c18e21580ef88f98ac262c43 (commit) from 0fd3d4109912f1685bbd8ee657df2e92a4ec69f7 (commit) - Log ----------------------------------------------------------------- commit 7ebac29189aaa205c18e21580ef88f98ac262c43 Author: Michael P <mic...@us...> Date: Fri Apr 8 17:28:38 2011 +0900 Fix for regression test constraints Non-immutable functions used as DEFAULT and DEFERRED constraints, are not supported, so this output is OK. A couple of ORDER BY have been added to have constant results whatever the cluster configuration. diff --git a/src/test/regress/input/constraints.source b/src/test/regress/input/constraints.source index 7474d3e..9b3c514 100644 --- a/src/test/regress/input/constraints.source +++ b/src/test/regress/input/constraints.source @@ -282,7 +282,7 @@ ROLLBACK; -- check is done at end of statement, so this should succeed UPDATE unique_tbl SET i = i+1; -SELECT * FROM unique_tbl; +SELECT * FROM unique_tbl ORDER BY 1,t; -- explicitly defer the constraint BEGIN; @@ -294,7 +294,7 @@ DELETE FROM unique_tbl WHERE t = 'tree'; -- makes constraint valid again COMMIT; -- should succeed -SELECT * FROM unique_tbl; +SELECT * FROM unique_tbl ORDER BY 1,t; -- try adding an initially deferred constraint ALTER TABLE unique_tbl DROP CONSTRAINT unique_tbl_i_key; @@ -312,7 +312,7 @@ DELETE FROM unique_tbl WHERE i = 5 AND t = 'five'; COMMIT; -SELECT * FROM unique_tbl; +SELECT * FROM unique_tbl ORDER BY 1,t; -- should fail at commit-time BEGIN; @@ -349,7 +349,7 @@ UPDATE unique_tbl SET t = 'THREE' WHERE i = 3 AND t = 'Three'; COMMIT; -- should fail -SELECT * FROM unique_tbl; +SELECT * FROM unique_tbl ORDER BY 1,t; -- test a HOT update that modifies the newly inserted tuple, -- but should succeed because we then remove the other conflicting tuple. @@ -360,11 +360,11 @@ INSERT INTO unique_tbl VALUES(3, 'tree'); -- should succeed for now UPDATE unique_tbl SET t = 'threex' WHERE t = 'tree'; DELETE FROM unique_tbl WHERE t = 'three'; -SELECT * FROM unique_tbl; +SELECT * FROM unique_tbl ORDER BY 1,t; COMMIT; -SELECT * FROM unique_tbl; +SELECT * FROM unique_tbl ORDER BY 1,t; DROP TABLE unique_tbl; diff --git a/src/test/regress/output/constraints_1.source b/src/test/regress/output/constraints_1.source new file mode 100644 index 0000000..77e440d --- /dev/null +++ b/src/test/regress/output/constraints_1.source @@ -0,0 +1,646 @@ +-- +-- CONSTRAINTS +-- Constraints can be specified with: +-- - DEFAULT clause +-- - CHECK clauses +-- - PRIMARY KEY clauses +-- - UNIQUE clauses +-- - EXCLUDE clauses +-- +-- +-- DEFAULT syntax +-- +CREATE TABLE DEFAULT_TBL (i int DEFAULT 100, + x text DEFAULT 'vadim', f float8 DEFAULT 123.456); +INSERT INTO DEFAULT_TBL VALUES (1, 'thomas', 57.0613); +INSERT INTO DEFAULT_TBL VALUES (1, 'bruce'); +INSERT INTO DEFAULT_TBL (i, f) VALUES (2, 987.654); +INSERT INTO DEFAULT_TBL (x) VALUES ('marc'); +INSERT INTO DEFAULT_TBL VALUES (3, null, 1.0); +SELECT '' AS five, * FROM DEFAULT_TBL ORDER BY i,x,f; + five | i | x | f +------+-----+--------+--------- + | 1 | bruce | 123.456 + | 1 | thomas | 57.0613 + | 2 | vadim | 987.654 + | 3 | | 1 + | 100 | marc | 123.456 +(5 rows) + +CREATE SEQUENCE DEFAULT_SEQ; +CREATE TABLE DEFAULTEXPR_TBL (i1 int DEFAULT 100 + (200-199) * 2, + i2 int DEFAULT nextval('default_seq')); +ERROR: Postgres-XC does not support DEFAULT with non-immutable functions yet +DETAIL: The feature is not currently supported +INSERT INTO DEFAULTEXPR_TBL VALUES (-1, -2); +ERROR: relation "defaultexpr_tbl" does not exist +LINE 1: INSERT INTO DEFAULTEXPR_TBL VALUES (-1, -2); + ^ +INSERT INTO DEFAULTEXPR_TBL (i1) VALUES (-3); +ERROR: relation "defaultexpr_tbl" does not exist +LINE 1: INSERT INTO DEFAULTEXPR_TBL (i1) VALUES (-3); + ^ +INSERT INTO DEFAULTEXPR_TBL (i2) VALUES (-4); +ERROR: relation "defaultexpr_tbl" does not exist +LINE 1: INSERT INTO DEFAULTEXPR_TBL (i2) VALUES (-4); + ^ +INSERT INTO DEFAULTEXPR_TBL (i2) VALUES (NULL); +ERROR: relation "defaultexpr_tbl" does not exist +LINE 1: INSERT INTO DEFAULTEXPR_TBL (i2) VALUES (NULL); + ^ +SELECT '' AS four, * FROM DEFAULTEXPR_TBL ORDER BY i1,i2; +ERROR: relation "defaultexpr_tbl" does not exist +LINE 1: SELECT '' AS four, * FROM DEFAULTEXPR_TBL ORDER BY i1,i2; + ^ +-- syntax errors +-- test for extraneous comma +CREATE TABLE error_tbl (i int DEFAULT (100, )); +ERROR: syntax error at or near ")" +LINE 1: CREATE TABLE error_tbl (i int DEFAULT (100, )); + ^ +-- this will fail because gram.y uses b_expr not a_expr for defaults, +-- to avoid a shift/reduce conflict that arises from NOT NULL being +-- part of the column definition syntax: +CREATE TABLE error_tbl (b1 bool DEFAULT 1 IN (1, 2)); +ERROR: syntax error at or near "IN" +LINE 1: CREATE TABLE error_tbl (b1 bool DEFAULT 1 IN (1, 2)); + ^ +-- this should work, however: +CREATE TABLE error_tbl (b1 bool DEFAULT (1 IN (1, 2))); +DROP TABLE error_tbl; +-- +-- CHECK syntax +-- +CREATE TABLE CHECK_TBL (x int, + CONSTRAINT CHECK_CON CHECK (x > 3)); +INSERT INTO CHECK_TBL VALUES (5); +INSERT INTO CHECK_TBL VALUES (4); +INSERT INTO CHECK_TBL VALUES (3); +ERROR: new row for relation "check_tbl" violates check constraint "check_con" +INSERT INTO CHECK_TBL VALUES (2); +ERROR: new row for relation "check_tbl" violates check constraint "check_con" +INSERT INTO CHECK_TBL VALUES (6); +INSERT INTO CHECK_TBL VALUES (1); +ERROR: new row for relation "check_tbl" violates check constraint "check_con" +SELECT '' AS three, * FROM CHECK_TBL ORDER BY x; + three | x +-------+--- + | 4 + | 5 + | 6 +(3 rows) + +CREATE SEQUENCE CHECK_SEQ; +CREATE TABLE CHECK2_TBL (x int, y text, z int, + CONSTRAINT SEQUENCE_CON + CHECK (x > 3 and y <> 'check failed' and z < 8)); +INSERT INTO CHECK2_TBL VALUES (4, 'check ok', -2); +INSERT INTO CHECK2_TBL VALUES (1, 'x check failed', -2); +ERROR: new row for relation "check2_tbl" violates check constraint "sequence_con" +INSERT INTO CHECK2_TBL VALUES (5, 'z check failed', 10); +ERROR: new row for relation "check2_tbl" violates check constraint "sequence_con" +INSERT INTO CHECK2_TBL VALUES (0, 'check failed', -2); +ERROR: new row for relation "check2_tbl" violates check constraint "sequence_con" +INSERT INTO CHECK2_TBL VALUES (6, 'check failed', 11); +ERROR: new row for relation "check2_tbl" violates check constraint "sequence_con" +INSERT INTO CHECK2_TBL VALUES (7, 'check ok', 7); +SELECT '' AS two, * from CHECK2_TBL ORDER BY x,y,z; + two | x | y | z +-----+---+----------+---- + | 4 | check ok | -2 + | 7 | check ok | 7 +(2 rows) + +-- +-- Check constraints on INSERT +-- +CREATE SEQUENCE INSERT_SEQ; +CREATE TABLE INSERT_TBL (x INT DEFAULT nextval('insert_seq'), + y TEXT DEFAULT '-NULL-', + z INT DEFAULT -1 * currval('insert_seq'), + CONSTRAINT INSERT_CON CHECK (x >= 3 AND y <> 'check failed' AND x < 8), + CHECK (x + z = 0)); +ERROR: Postgres-XC does not support DEFAULT with non-immutable functions yet +DETAIL: The feature is not currently supported +INSERT INTO INSERT_TBL(x,z) VALUES (2, -2); +ERROR: relation "insert_tbl" does not exist +LINE 1: INSERT INTO INSERT_TBL(x,z) VALUES (2, -2); + ^ +SELECT '' AS zero, * FROM INSERT_TBL order by x,y,z; +ERROR: relation "insert_tbl" does not exist +LINE 1: SELECT '' AS zero, * FROM INSERT_TBL order by x,y,z; + ^ +SELECT 'one' AS one, nextval('insert_seq'); + one | nextval +-----+--------- + one | 1 +(1 row) + +INSERT INTO INSERT_TBL(y) VALUES ('Y'); +ERROR: relation "insert_tbl" does not exist +LINE 1: INSERT INTO INSERT_TBL(y) VALUES ('Y'); + ^ +INSERT INTO INSERT_TBL(y) VALUES ('Y'); +ERROR: relation "insert_tbl" does not exist +LINE 1: INSERT INTO INSERT_TBL(y) VALUES ('Y'); + ^ +INSERT INTO INSERT_TBL(x,z) VALUES (1, -2); +ERROR: relation "insert_tbl" does not exist +LINE 1: INSERT INTO INSERT_TBL(x,z) VALUES (1, -2); + ^ +INSERT INTO INSERT_TBL(z,x) VALUES (-7, 7); +ERROR: relation "insert_tbl" does not exist +LINE 1: INSERT INTO INSERT_TBL(z,x) VALUES (-7, 7); + ^ +INSERT INTO INSERT_TBL VALUES (5, 'check failed', -5); +ERROR: relation "insert_tbl" does not exist +LINE 1: INSERT INTO INSERT_TBL VALUES (5, 'check failed', -5); + ^ +INSERT INTO INSERT_TBL VALUES (7, '!check failed', -7); +ERROR: relation "insert_tbl" does not exist +LINE 1: INSERT INTO INSERT_TBL VALUES (7, '!check failed', -7); + ^ +INSERT INTO INSERT_TBL(y) VALUES ('-!NULL-'); +ERROR: relation "insert_tbl" does not exist +LINE 1: INSERT INTO INSERT_TBL(y) VALUES ('-!NULL-'); + ^ +SELECT '' AS four, * FROM INSERT_TBL order by x,y,z; +ERROR: relation "insert_tbl" does not exist +LINE 1: SELECT '' AS four, * FROM INSERT_TBL order by x,y,z; + ^ +INSERT INTO INSERT_TBL(y,z) VALUES ('check failed', 4); +ERROR: relation "insert_tbl" does not exist +LINE 1: INSERT INTO INSERT_TBL(y,z) VALUES ('check failed', 4); + ^ +INSERT INTO INSERT_TBL(x,y) VALUES (5, 'check failed'); +ERROR: relation "insert_tbl" does not exist +LINE 1: INSERT INTO INSERT_TBL(x,y) VALUES (5, 'check failed'); + ^ +INSERT INTO INSERT_TBL(x,y) VALUES (5, '!check failed'); +ERROR: relation "insert_tbl" does not exist +LINE 1: INSERT INTO INSERT_TBL(x,y) VALUES (5, '!check failed'); + ^ +INSERT INTO INSERT_TBL(y) VALUES ('-!NULL-'); +ERROR: relation "insert_tbl" does not exist +LINE 1: INSERT INTO INSERT_TBL(y) VALUES ('-!NULL-'); + ^ +SELECT '' AS six, * FROM INSERT_TBL order by x,y,z; +ERROR: relation "insert_tbl" does not exist +LINE 1: SELECT '' AS six, * FROM INSERT_TBL order by x,y,z; + ^ +SELECT 'seven' AS one, nextval('insert_seq'); + one | nextval +-------+--------- + seven | 2 +(1 row) + +INSERT INTO INSERT_TBL(y) VALUES ('Y'); +ERROR: relation "insert_tbl" does not exist +LINE 1: INSERT INTO INSERT_TBL(y) VALUES ('Y'); + ^ +SELECT 'eight' AS one, currval('insert_seq'); + one | currval +-------+--------- + eight | 2 +(1 row) + +-- According to SQL92, it is OK to insert a record that gives rise to NULL +-- constraint-condition results. Postgres used to reject this, but it +-- was wrong: +INSERT INTO INSERT_TBL VALUES (null, null, null); +ERROR: relation "insert_tbl" does not exist +LINE 1: INSERT INTO INSERT_TBL VALUES (null, null, null); + ^ +SELECT '' AS nine, * FROM INSERT_TBL order by x,y,z; +ERROR: relation "insert_tbl" does not exist +LINE 1: SELECT '' AS nine, * FROM INSERT_TBL order by x,y,z; + ^ +-- +-- Check inheritance of defaults and constraints +-- +CREATE TABLE INSERT_CHILD (cx INT default 42, + cy INT CHECK (cy > x)) + INHERITS (INSERT_TBL); +ERROR: relation "insert_tbl" does not exist +INSERT INTO INSERT_CHILD(x,z,cy) VALUES (7,-7,11); +ERROR: relation "insert_child" does not exist +LINE 1: INSERT INTO INSERT_CHILD(x,z,cy) VALUES (7,-7,11); + ^ +INSERT INTO INSERT_CHILD(x,z,cy) VALUES (7,-7,6); +ERROR: relation "insert_child" does not exist +LINE 1: INSERT INTO INSERT_CHILD(x,z,cy) VALUES (7,-7,6); + ^ +INSERT INTO INSERT_CHILD(x,z,cy) VALUES (6,-7,7); +ERROR: relation "insert_child" does not exist +LINE 1: INSERT INTO INSERT_CHILD(x,z,cy) VALUES (6,-7,7); + ^ +INSERT INTO INSERT_CHILD(x,y,z,cy) VALUES (6,'check failed',-6,7); +ERROR: relation "insert_child" does not exist +LINE 1: INSERT INTO INSERT_CHILD(x,y,z,cy) VALUES (6,'check failed',... + ^ +SELECT * FROM INSERT_CHILD order by 1,2,3; +ERROR: relation "insert_child" does not exist +LINE 1: SELECT * FROM INSERT_CHILD order by 1,2,3; + ^ +DROP TABLE INSERT_CHILD; +ERROR: table "insert_child" does not exist +-- +-- Check constraints on INSERT INTO +-- +DELETE FROM INSERT_TBL; +ERROR: relation "insert_tbl" does not exist +LINE 1: DELETE FROM INSERT_TBL; + ^ +ALTER SEQUENCE INSERT_SEQ RESTART WITH 4; +CREATE TABLE tmp (xd INT, yd TEXT, zd INT); +INSERT INTO tmp VALUES (null, 'Y', null); +INSERT INTO tmp VALUES (5, '!check failed', null); +INSERT INTO tmp VALUES (null, 'try again', null); +INSERT INTO INSERT_TBL(y) select yd from tmp; +ERROR: relation "insert_tbl" does not exist +LINE 1: INSERT INTO INSERT_TBL(y) select yd from tmp; + ^ +SELECT '' AS three, * FROM INSERT_TBL order by x,y,z; +ERROR: relation "insert_tbl" does not exist +LINE 1: SELECT '' AS three, * FROM INSERT_TBL order by x,y,z; + ^ +INSERT INTO INSERT_TBL SELECT * FROM tmp WHERE yd = 'try again'; +ERROR: relation "insert_tbl" does not exist +LINE 1: INSERT INTO INSERT_TBL SELECT * FROM tmp WHERE yd = 'try aga... + ^ +INSERT INTO INSERT_TBL(y,z) SELECT yd, -7 FROM tmp WHERE yd = 'try again'; +ERROR: relation "insert_tbl" does not exist +LINE 1: INSERT INTO INSERT_TBL(y,z) SELECT yd, -7 FROM tmp WHERE yd ... + ^ +INSERT INTO INSERT_TBL(y,z) SELECT yd, -8 FROM tmp WHERE yd = 'try again'; +ERROR: relation "insert_tbl" does not exist +LINE 1: INSERT INTO INSERT_TBL(y,z) SELECT yd, -8 FROM tmp WHERE yd ... + ^ +SELECT '' AS four, * FROM INSERT_TBL order by x,y,z; +ERROR: relation "insert_tbl" does not exist +LINE 1: SELECT '' AS four, * FROM INSERT_TBL order by x,y,z; + ^ +DROP TABLE tmp; +-- +-- Check constraints on UPDATE +-- +UPDATE INSERT_TBL SET x = NULL WHERE x = 5; +ERROR: relation "insert_tbl" does not exist +LINE 1: UPDATE INSERT_TBL SET x = NULL WHERE x = 5; + ^ +UPDATE INSERT_TBL SET x = 6 WHERE x = 6; +ERROR: relation "insert_tbl" does not exist +LINE 1: UPDATE INSERT_TBL SET x = 6 WHERE x = 6; + ^ +UPDATE INSERT_TBL SET x = -z, z = -x; +ERROR: relation "insert_tbl" does not exist +LINE 1: UPDATE INSERT_TBL SET x = -z, z = -x; + ^ +UPDATE INSERT_TBL SET x = z, z = x; +ERROR: relation "insert_tbl" does not exist +LINE 1: UPDATE INSERT_TBL SET x = z, z = x; + ^ +SELECT * FROM INSERT_TBL order by x,y,z; +ERROR: relation "insert_tbl" does not exist +LINE 1: SELECT * FROM INSERT_TBL order by x,y,z; + ^ +-- DROP TABLE INSERT_TBL; +-- +-- Check constraints on COPY FROM +-- +CREATE TABLE COPY_TBL (x INT, y TEXT, z INT, + CONSTRAINT COPY_CON + CHECK (x > 3 AND y <> 'check failed' AND x < 7 )); +COPY COPY_TBL FROM '@abs_srcdir@/data/constro.data'; +SELECT '' AS two, * FROM COPY_TBL order by x,y,z; + two | x | y | z +-----+---+---------------+--- + | 4 | !check failed | 5 + | 6 | OK | 4 +(2 rows) + +COPY COPY_TBL FROM '@abs_srcdir@/data/constrf.data'; +ERROR: Error while running COPY +SELECT * FROM COPY_TBL order by x,y,z; + x | y | z +---+---------------+--- + 4 | !check failed | 5 + 6 | OK | 4 +(2 rows) + +-- +-- Primary keys +-- +CREATE TABLE PRIMARY_TBL (i int PRIMARY KEY, t text); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "primary_tbl_pkey" for table "primary_tbl" +INSERT INTO PRIMARY_TBL VALUES (1, 'one'); +INSERT INTO PRIMARY_TBL VALUES (2, 'two'); +INSERT INTO PRIMARY_TBL VALUES (1, 'three'); +ERROR: duplicate key value violates unique constraint "primary_tbl_pkey" +DETAIL: Key (i)=(1) already exists. +INSERT INTO PRIMARY_TBL VALUES (4, 'three'); +INSERT INTO PRIMARY_TBL VALUES (5, 'one'); +INSERT INTO PRIMARY_TBL (t) VALUES ('six'); +ERROR: null value in column "i" violates not-null constraint +SELECT '' AS four, * FROM PRIMARY_TBL order by i,t; + four | i | t +------+---+------- + | 1 | one + | 2 | two + | 4 | three + | 5 | one +(4 rows) + +DROP TABLE PRIMARY_TBL; +CREATE TABLE PRIMARY_TBL (i int, t text, + PRIMARY KEY(i,t)); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "primary_tbl_pkey" for table "primary_tbl" +INSERT INTO PRIMARY_TBL VALUES (1, 'one'); +INSERT INTO PRIMARY_TBL VALUES (2, 'two'); +INSERT INTO PRIMARY_TBL VALUES (1, 'three'); +INSERT INTO PRIMARY_TBL VALUES (4, 'three'); +INSERT INTO PRIMARY_TBL VALUES (5, 'one'); +INSERT INTO PRIMARY_TBL (t) VALUES ('six'); +ERROR: null value in column "i" violates not-null constraint +SELECT '' AS three, * FROM PRIMARY_TBL order by i,t; + three | i | t +-------+---+------- + | 1 | one + | 1 | three + | 2 | two + | 4 | three + | 5 | one +(5 rows) + +DROP TABLE PRIMARY_TBL; +-- +-- Unique keys +-- +CREATE TABLE UNIQUE_TBL (i int UNIQUE, t text); +NOTICE: CREATE TABLE / UNIQUE will create implicit index "unique_tbl_i_key" for table "unique_tbl" +INSERT INTO UNIQUE_TBL VALUES (1, 'one'); +INSERT INTO UNIQUE_TBL VALUES (2, 'two'); +INSERT INTO UNIQUE_TBL VALUES (1, 'three'); +ERROR: duplicate key value violates unique constraint "unique_tbl_i_key" +DETAIL: Key (i)=(1) already exists. +INSERT INTO UNIQUE_TBL VALUES (4, 'four'); +INSERT INTO UNIQUE_TBL VALUES (5, 'one'); +INSERT INTO UNIQUE_TBL (t) VALUES ('six'); +INSERT INTO UNIQUE_TBL (t) VALUES ('seven'); +SELECT '' AS five, * FROM UNIQUE_TBL order by i,t; + five | i | t +------+---+------- + | 1 | one + | 2 | two + | 4 | four + | 5 | one + | | seven + | | six +(6 rows) + +DROP TABLE UNIQUE_TBL; +CREATE TABLE UNIQUE_TBL (i int, t text, + UNIQUE(i,t)); +NOTICE: CREATE TABLE / UNIQUE will create implicit index "unique_tbl_i_t_key" for table "unique_tbl" +INSERT INTO UNIQUE_TBL VALUES (1, 'one'); +INSERT INTO UNIQUE_TBL VALUES (2, 'two'); +INSERT INTO UNIQUE_TBL VALUES (1, 'three'); +INSERT INTO UNIQUE_TBL VALUES (1, 'one'); +ERROR: duplicate key value violates unique constraint "unique_tbl_i_t_key" +DETAIL: Key (i, t)=(1, one) already exists. +INSERT INTO UNIQUE_TBL VALUES (5, 'one'); +INSERT INTO UNIQUE_TBL (t) VALUES ('six'); +SELECT '' AS five, * FROM UNIQUE_TBL order by i,t; + five | i | t +------+---+------- + | 1 | one + | 1 | three + | 2 | two + | 5 | one + | | six +(5 rows) + +DROP TABLE UNIQUE_TBL; +-- +-- Deferrable unique constraints +-- +CREATE TABLE unique_tbl (i int UNIQUE DEFERRABLE, t text); +NOTICE: CREATE TABLE / UNIQUE will create implicit index "unique_tbl_i_key" for table "unique_tbl" +INSERT INTO unique_tbl VALUES (0, 'one'); +INSERT INTO unique_tbl VALUES (1, 'two'); +INSERT INTO unique_tbl VALUES (2, 'tree'); +INSERT INTO unique_tbl VALUES (3, 'four'); +INSERT INTO unique_tbl VALUES (4, 'five'); +BEGIN; +-- default is immediate so this should fail right away +UPDATE unique_tbl SET i = 1 WHERE i = 0; +ERROR: Partition column can't be updated in current version +ROLLBACK; +-- check is done at end of statement, so this should succeed +UPDATE unique_tbl SET i = i+1; +ERROR: Partition column can't be updated in current version +SELECT * FROM unique_tbl ORDER BY 1,t; + i | t +---+------ + 0 | one + 1 | two + 2 | tree + 3 | four + 4 | five +(5 rows) + +-- explicitly defer the constraint +BEGIN; +SET CONSTRAINTS unique_tbl_i_key DEFERRED; +ERROR: Postgres-XC does not support DEFERRED constraints yet +DETAIL: The feature is not currently supported +INSERT INTO unique_tbl VALUES (3, 'three'); +ERROR: current transaction is aborted, commands ignored until end of transaction block +DELETE FROM unique_tbl WHERE t = 'tree'; -- makes constraint valid again +ERROR: current transaction is aborted, commands ignored until end of transaction block +COMMIT; -- should succeed +SELECT * FROM unique_tbl ORDER BY 1,t; + i | t +---+------ + 0 | one + 1 | two + 2 | tree + 3 | four + 4 | five +(5 rows) + +-- try adding an initially deferred constraint +ALTER TABLE unique_tbl DROP CONSTRAINT unique_tbl_i_key; +ALTER TABLE unique_tbl ADD CONSTRAINT unique_tbl_i_key + UNIQUE (i) DEFERRABLE INITIALLY DEFERRED; +ERROR: Postgres-XC does not support DEFERRED constraints yet +DETAIL: The feature is not currently supported +BEGIN; +INSERT INTO unique_tbl VALUES (1, 'five'); +INSERT INTO unique_tbl VALUES (5, 'one'); +UPDATE unique_tbl SET i = 4 WHERE i = 2; +ERROR: Partition column can't be updated in current version +UPDATE unique_tbl SET i = 2 WHERE i = 4 AND t = 'four'; +ERROR: current transaction is aborted, commands ignored until end of transaction block +DELETE FROM unique_tbl WHERE i = 1 AND t = 'one'; +ERROR: current transaction is aborted, commands ignored until end of transaction block +DELETE FROM unique_tbl WHERE i = 5 AND t = 'five'; +ERROR: current transaction is aborted, commands ignored until end of transaction block +COMMIT; +SELECT * FROM unique_tbl ORDER BY 1,t; + i | t +---+------ + 0 | one + 1 | two + 2 | tree + 3 | four + 4 | five +(5 rows) + +-- should fail at commit-time +BEGIN; +INSERT INTO unique_tbl VALUES (3, 'Three'); -- should succeed for now +COMMIT; -- should fail +-- make constraint check immediate +BEGIN; +SET CONSTRAINTS ALL IMMEDIATE; +INSERT INTO unique_tbl VALUES (3, 'Three'); -- should fail +COMMIT; +-- forced check when SET CONSTRAINTS is called +BEGIN; +SET CONSTRAINTS ALL DEFERRED; +ERROR: Postgres-XC does not support DEFERRED constraints yet +DETAIL: The feature is not currently supported +INSERT INTO unique_tbl VALUES (3, 'Three'); -- should succeed for now +ERROR: current transaction is aborted, commands ignored until end of transaction block +SET CONSTRAINTS ALL IMMEDIATE; -- should fail +ERROR: current transaction is aborted, commands ignored until end of transaction block +COMMIT; +-- test a HOT update that invalidates the conflicting tuple. +-- the trigger should still fire and catch the violation +BEGIN; +INSERT INTO unique_tbl VALUES (3, 'Three'); -- should succeed for now +UPDATE unique_tbl SET t = 'THREE' WHERE i = 3 AND t = 'Three'; +COMMIT; -- should fail +SELECT * FROM unique_tbl ORDER BY 1,t; + i | t +---+------- + 0 | one + 1 | two + 2 | tree + 3 | THREE + 3 | THREE + 3 | THREE + 3 | four + 4 | five +(8 rows) + +-- test a HOT update that modifies the newly inserted tuple, +-- but should succeed because we then remove the other conflicting tuple. +BEGIN; +INSERT INTO unique_tbl VALUES(3, 'tree'); -- should succeed for now +UPDATE unique_tbl SET t = 'threex' WHERE t = 'tree'; +DELETE FROM unique_tbl WHERE t = 'three'; +SELECT * FROM unique_tbl ORDER BY 1,t; + i | t +---+-------- + 0 | one + 1 | two + 2 | threex + 3 | THREE + 3 | THREE + 3 | THREE + 3 | four + 3 | threex + 4 | five +(9 rows) + +COMMIT; +SELECT * FROM unique_tbl ORDER BY 1,t; + i | t +---+-------- + 0 | one + 1 | two + 2 | threex + 3 | THREE + 3 | THREE + 3 | THREE + 3 | four + 3 | threex + 4 | five +(9 rows) + +DROP TABLE unique_tbl; +-- +-- EXCLUDE constraints +-- +CREATE TABLE circles ( + c1 CIRCLE, + c2 TEXT, + EXCLUDE USING gist + (c1 WITH &&, (c2::circle) WITH &&) + WHERE (circle_center(c1) <> '(0,0)') +); +NOTICE: CREATE TABLE / EXCLUDE will create implicit index "circles_c1_c2_excl" for table "circles" +-- these should succeed because they don't match the index predicate +INSERT INTO circles VALUES('<(0,0), 5>', '<(0,0), 5>'); +INSERT INTO circles VALUES('<(0,0), 5>', '<(0,0), 4>'); +-- succeed +INSERT INTO circles VALUES('<(10,10), 10>', '<(0,0), 5>'); +-- fail, overlaps +INSERT INTO circles VALUES('<(20,20), 10>', '<(0,0), 4>'); +-- succeed because c1 doesn't overlap +INSERT INTO circles VALUES('<(20,20), 1>', '<(0,0), 5>'); +-- succeed because c2 doesn't overlap +INSERT INTO circles VALUES('<(20,20), 10>', '<(10,10), 5>'); +-- should fail on existing data without the WHERE clause +ALTER TABLE circles ADD EXCLUDE USING gist + (c1 WITH &&, (c2::circle) WITH &&); +NOTICE: ALTER TABLE / ADD EXCLUDE will create implicit index "circles_c1_c2_excl1" for table "circles" +ERROR: could not create exclusion constraint "circles_c1_c2_excl1" +DETAIL: Key (c1, (c2::circle))=(<(0,0),5>, <(0,0),5>) conflicts with key (c1, (c2::circle))=(<(10,10),10>, <(0,0),5>). +DROP TABLE circles; +-- Check deferred exclusion constraint +CREATE TABLE deferred_excl ( + f1 int, + CONSTRAINT deferred_excl_con EXCLUDE (f1 WITH =) INITIALLY DEFERRED +); +ERROR: Postgres-XC does not support DEFERRED constraints yet +DETAIL: The feature is not currently supported +INSERT INTO deferred_excl VALUES(1); +ERROR: relation "deferred_excl" does not exist +LINE 1: INSERT INTO deferred_excl VALUES(1); + ^ +INSERT INTO deferred_excl VALUES(2); +ERROR: relation "deferred_excl" does not exist +LINE 1: INSERT INTO deferred_excl VALUES(2); + ^ +INSERT INTO deferred_excl VALUES(1); -- fail +ERROR: relation "deferred_excl" does not exist +LINE 1: INSERT INTO deferred_excl VALUES(1); + ^ +BEGIN; +INSERT INTO deferred_excl VALUES(2); -- no fail here +ERROR: relation "deferred_excl" does not exist +LINE 1: INSERT INTO deferred_excl VALUES(2); + ^ +COMMIT; -- should fail here +BEGIN; +INSERT INTO deferred_excl VALUES(3); +ERROR: relation "deferred_excl" does not exist +LINE 1: INSERT INTO deferred_excl VALUES(3); + ^ +INSERT INTO deferred_excl VALUES(3); -- no fail here +ERROR: current transaction is aborted, commands ignored until end of transaction block +COMMIT; -- should fail here +ALTER TABLE deferred_excl DROP CONSTRAINT deferred_excl_con; +ERROR: relation "deferred_excl" does not exist +-- This should fail, but worth testing because of HOT updates +UPDATE deferred_excl SET f1 = 3; +ERROR: relation "deferred_excl" does not exist +LINE 1: UPDATE deferred_excl SET f1 = 3; + ^ +ALTER TABLE deferred_excl ADD EXCLUDE (f1 WITH =); +ERROR: relation "deferred_excl" does not exist +DROP TABLE deferred_excl; +ERROR: table "deferred_excl" does not exist ----------------------------------------------------------------------- Summary of changes: src/test/regress/input/constraints.source | 12 +- .../{constraints.source => constraints_1.source} | 374 ++++++++++++-------- 2 files changed, 228 insertions(+), 158 deletions(-) copy src/test/regress/output/{constraints.source => constraints_1.source} (63%) hooks/post-receive -- Postgres-XC |
From: Koichi S. <koi...@gm...> - 2011-04-08 08:26:45
|
Year. I believe 1st cut is very important to make the solution general. We used to take the second cut first and this influenced the cover range of the statment. (It was very useful to run DBT-1 though). Regards; ---------- Koichi Suzuki 2011/4/8 Ashutosh Bapat <ash...@en...>: > Before I forget, Koichi, your sourceforge address bounced, so my mail only > reached Mason. Thanks Mason for including others in the thread again. > > On Fri, Apr 8, 2011 at 6:04 AM, Koichi Suzuki <koi...@gm...> wrote: >> >> Thank you for valuable advice. We also should think how GROUP BY can >> be pushed down to datanodes so that coordinator can simply merge the >> result. > > If I understand it right, Mason has already given solution to that problem. > We push the groupby down to datanodes with additional order by clause > (ordering based on the group by expressions) on top of them (this looks > tricky if there are already other order by clauses). Thus the data we > collect at coordinator is already grouped per datanode and all coordinator > has to do is to consolidate each row from the data nodes in order like we do > in merge sort. > > I see that we may have to do this in steps. > 1. first cut - implement to apply group by only at coordinator. Not so > efficient, but will make group by work > 2. second cut - implement pushing down group by to the data nodes, better > than one but still the grouping at coordinator is not that efficient > 3. third cut - implement above idea fully > > We might be able to do 1 and 2 in the first cut itself. But this is too > early to say anything. I will get back once, I know things better. > > Mason has also pointed to the possibility of distributing grouping phase at > coordinator across datanodes (in third cut) so that coordinator is not > loaded if there are too many columns in the group by. But that requires the > infrastructure to ship rows from coordinator to datanodes. This > infrastructure is not place, I think. So that is a far possibility for now. > > >> >> ---------- >> Koichi Suzuki >> >> >> >> 2011/4/7 Mason <ma...@us...>: >> > I looked at the schedule. >> > >> > I am not sure about the planned design for GROUP BY, but originally >> > Andrei was planning on making it somewhat similar to ORDER BY, how >> > ORDER BY does a merge sort on the coordinator, based on sorted results >> > from the data nodes. Each data node could do the beginning phase of >> > aggregation in groups and then sort the output in the same manner of >> > the groups. Then, the coordinator could do the last step of >> > aggregation with like groups, which is easy to get them on the fly >> > because of the sorting coming in from the data nodes (and avoiding >> > materialization). >> > >> > This should work pretty well. One drawback is if they chose a GROUP >> > BY clause with many groups (many = thousands+). Then some parallelism >> > is lost because of the final phase being done in only one place, on >> > the Coordinator. GridSQL spreads out the final aggregation phase >> > amongst all the data nodes, moving like groups to the same node to get >> > more parallelism. I think row shipping infrastructure might have to be >> > in place first before implementing that, and there will be a >> > noticeable benefit only once there are many, many groups, so I don't >> > see it being a critical thing at this phase and can be added later. >> > >> > Regards, >> > >> > Mason >> > >> > >> > >> > On Thu, Apr 7, 2011 at 5:27 AM, Koichi Suzuki >> > <koi...@us...> wrote: >> >> Project "Postgres-XC documentation". >> >> >> >> The branch, master has been updated >> >> via 62434399fdd57aff2701e3e5e97fed619f6d6820 (commit) >> >> from 252519c2be5309a3682b0ee895cf040083ae1784 (commit) >> >> >> >> >> >> - Log ----------------------------------------------------------------- >> >> commit 62434399fdd57aff2701e3e5e97fed619f6d6820 >> >> Author: Koichi Suzuki <koi...@gm...> >> >> Date: Thu Apr 7 18:27:26 2011 +0900 >> >> >> >> 1. Added 2011FYQ1 schedule for each member. >> >> 2. Modified my progress sheet of Reference Manual. >> >> >> >> -- Koichi Suzuki >> >> >> >> diff --git a/progress/2011FYQ1_Schedule.ods >> >> b/progress/2011FYQ1_Schedule.ods >> >> new file mode 100755 >> >> index 0000000..5e24d37 >> >> Binary files /dev/null and b/progress/2011FYQ1_Schedule.ods differ >> >> diff --git a/progress/documentation-progress.ods >> >> b/progress/documentation-progress.ods >> >> index 277aade..2c8577e 100644 >> >> Binary files a/progress/documentation-progress.ods and >> >> b/progress/documentation-progress.ods differ >> >> >> >> ----------------------------------------------------------------------- >> >> >> >> Summary of changes: >> >> progress/2011FYQ1_Schedule.ods | Bin 0 -> 22147 bytes >> >> progress/documentation-progress.ods | Bin 16883 -> 19519 bytes >> >> 2 files changed, 0 insertions(+), 0 deletions(-) >> >> create mode 100755 progress/2011FYQ1_Schedule.ods >> >> >> >> >> >> hooks/post-receive >> >> -- >> >> Postgres-XC documentation >> >> >> >> >> >> ------------------------------------------------------------------------------ >> >> Xperia(TM) PLAY >> >> It's a major breakthrough. An authentic gaming >> >> smartphone on the nation's most reliable network. >> >> And it wants your games. >> >> http://p.sf.net/sfu/verizon-sfdev >> >> _______________________________________________ >> >> Postgres-xc-committers mailing list >> >> Pos...@li... >> >> https://lists.sourceforge.net/lists/listinfo/postgres-xc-committers >> >> >> > >> > >> > ------------------------------------------------------------------------------ >> > Xperia(TM) PLAY >> > It's a major breakthrough. An authentic gaming >> > smartphone on the nation's most reliable network. >> > And it wants your games. >> > http://p.sf.net/sfu/verizon-sfdev >> > _______________________________________________ >> > Postgres-xc-committers mailing list >> > Pos...@li... >> > https://lists.sourceforge.net/lists/listinfo/postgres-xc-committers >> > > > > > -- > Best Wishes, > Ashutosh Bapat > EntepriseDB Corporation > The Enterprise Postgres Company > > |
From: Michael P. <mic...@us...> - 2011-04-08 08:05:42
|
Project "Postgres-XC". The branch, master has been updated via 0fd3d4109912f1685bbd8ee657df2e92a4ec69f7 (commit) from 219e41d7324c86ceb537df13214774b70051a833 (commit) - Log ----------------------------------------------------------------- commit 0fd3d4109912f1685bbd8ee657df2e92a4ec69f7 Author: Michael P <mic...@us...> Date: Fri Apr 8 16:53:19 2011 +0900 Block use of non-immutable functions as DEFAULT values DEFAULT is used by CREATE/ALTER TABLE/DOMAIN. A DML query using a default with non-immutable function needs to be fed by Coordinator. This is particularly necessary in the case of replicated tables. But even in the case of distributed tables timestamp values have to be taken from GTM to avoid data inconsistencies through the cluster. As a possible implementation, a constraint with non-immutable functions is just created on Coordinator and not on Datanodes. When a DML query needs a default value Coordinator feeds it, rewrites the query, and distributes it to Datanodes so as the uniqueness of the value is insured. Sequence (currval, nextval) and timestamp values (now()...) have to be taken from GTM. diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c index 64eab92..8b25ace 100644 --- a/src/backend/catalog/heap.c +++ b/src/backend/catalog/heap.c @@ -2399,6 +2399,28 @@ cookDefault(ParseState *pstate, Assert(raw_default != NULL); +#ifdef PGXC + /* + * Block use of non-immutable functions as DEFAULT. + * + * Support of nextval(), currval(), now() as DEFAULT value in XC needs a special support + * like SERIAL, so block it for the time being + * + * PGXCTODO: As possible implementation, a constraint with non-immutable functions + * is just created on Coordinator and when an INSERT query needs a default value + * Coordinator feeds it, rewrite the query, and distributes it to Datanodes + * + * Sequence (currval, nextval) and timestamp values (now()...) have + * to be taken from GTM. + */ + if (IsA(raw_default,FuncCall)) + if (!IsFuncImmutable(pstate, (FuncCall *) raw_default)) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Postgres-XC does not support DEFAULT with non-immutable functions yet"), + errdetail("The feature is not currently supported"))); +#endif + /* * Transform raw parsetree to executable expression. */ diff --git a/src/backend/parser/parse_expr.c b/src/backend/parser/parse_expr.c index addd0d4..2159a68 100644 --- a/src/backend/parser/parse_expr.c +++ b/src/backend/parser/parse_expr.c @@ -1232,6 +1232,30 @@ transformFuncCall(ParseState *pstate, FuncCall *fn) fn->location); } +#ifdef PGXC +/* + * IsFuncImmutable + * + * Check if given function is immutable or not + * based on the function name and on its arguments + */ +bool +IsFuncImmutable(ParseState *pstate, FuncCall *fn) +{ + ListCell *args; + List *targs = NIL; + + /* Transform list of arguments */ + foreach(args, fn->args) + { + targs = lappend(targs, transformExpr(pstate, + (Node *) lfirst(args))); + } + + return IsParseFuncImmutable(pstate, targs, fn->funcname, fn->func_variadic); +} +#endif + static Node * transformCaseExpr(ParseState *pstate, CaseExpr *c) { diff --git a/src/backend/parser/parse_func.c b/src/backend/parser/parse_func.c index 2cbc9f5..da8abee 100644 --- a/src/backend/parser/parse_func.c +++ b/src/backend/parser/parse_func.c @@ -1654,6 +1654,101 @@ check_pg_get_expr_args(ParseState *pstate, Oid fnoid, List *args) errmsg("argument to pg_get_expr() must come from system catalogs"))); } +#ifdef PGXC +/* + * IsParseFuncImmutable + * + * Check if given function is immutable or not + * based on the function name and on its arguments + * This functionnality will be extended to support functions in constraints + */ +bool +IsParseFuncImmutable(ParseState *pstate, List *targs, List *funcname, bool func_variadic) +{ + ListCell *l; + ListCell *nextl; + FuncDetailCode fdresult; + Oid actual_arg_types[FUNC_MAX_ARGS]; + List *argnames; + int nargs; + /* Return results */ + Oid funcid, rettype; + Oid *declared_arg_types; + bool retset; + int nvargs; + List *argdefaults; + + /* Get detailed argument information */ + nargs = 0; + for (l = list_head(targs); l != NULL; l = nextl) + { + Node *arg = lfirst(l); + Oid argtype = exprType(arg); + + nextl = lnext(l); + + if (argtype == VOIDOID && IsA(arg, Param)) + { + targs = list_delete_ptr(targs, arg); + continue; + } + actual_arg_types[nargs++] = argtype; + } + argnames = NIL; + + foreach(l, targs) + { + Node *arg = lfirst(l); + + if (IsA(arg, NamedArgExpr)) + { + NamedArgExpr *na = (NamedArgExpr *) arg; + ListCell *lc; + + /* Reject duplicate arg names */ + foreach(lc, argnames) + { + if (strcmp(na->name, (char *) lfirst(lc)) == 0) + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("argument name \"%s\" used more than once", + na->name), + parser_errposition(pstate, na->location))); + } + argnames = lappend(argnames, na->name); + } + else + { + if (argnames != NIL) + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("positional argument cannot follow named argument"), + parser_errposition(pstate, exprLocation(arg)))); + } + } + + fdresult = func_get_detail(funcname, + targs, + argnames, + nargs, + actual_arg_types, + !func_variadic, + true, + &funcid, &rettype, &retset, &nvargs, + &declared_arg_types, &argdefaults); + + /* + * Now only the function ID is used to check if function is immutable or not, + * but for function support in DEFAULT values, this function can be easily extended + * for other analysis purposes. + */ + if (func_volatile(funcid) == PROVOLATILE_IMMUTABLE) + return true; + else + return false; +} +#endif + static bool check_pg_get_expr_arg(ParseState *pstate, Node *arg, int netlevelsup) { diff --git a/src/include/parser/parse_expr.h b/src/include/parser/parse_expr.h index 654bf34..dce33db 100644 --- a/src/include/parser/parse_expr.h +++ b/src/include/parser/parse_expr.h @@ -19,5 +19,8 @@ extern bool Transform_null_equals; extern Node *transformExpr(ParseState *pstate, Node *expr); +#ifdef PGXC +extern bool IsFuncImmutable(ParseState *pstate, FuncCall *fn); +#endif #endif /* PARSE_EXPR_H */ diff --git a/src/include/parser/parse_func.h b/src/include/parser/parse_func.h index 05f07c7..544179a 100644 --- a/src/include/parser/parse_func.h +++ b/src/include/parser/parse_func.h @@ -83,5 +83,8 @@ extern Oid LookupAggNameTypeNames(List *aggname, List *argtypes, bool noError); extern void check_pg_get_expr_args(ParseState *pstate, Oid fnoid, List *args); +#ifdef PGXC +extern bool IsParseFuncImmutable(ParseState *pstate, List *fn_args, List *funcname, bool func_variadic); +#endif #endif /* PARSE_FUNC_H */ diff --git a/src/test/regress/expected/dependency_1.out b/src/test/regress/expected/dependency_1.out index ecf687d..827f442 100644 --- a/src/test/regress/expected/dependency_1.out +++ b/src/test/regress/expected/dependency_1.out @@ -102,7 +102,8 @@ CREATE TABLE deptest2 (f1 int); -- make a serial column the hard way CREATE SEQUENCE ss1; ALTER TABLE deptest2 ALTER f1 SET DEFAULT nextval('ss1'); -ERROR: relation "ss1" does not exist +ERROR: Postgres-XC does not support DEFAULT with non-immutable functions yet +DETAIL: The feature is not currently supported ALTER SEQUENCE ss1 OWNED BY deptest2.f1; RESET SESSION AUTHORIZATION; REASSIGN OWNED BY regression_user1 TO regression_user2; diff --git a/src/test/regress/expected/uuid_1.out b/src/test/regress/expected/uuid_1.out index 6a593be..39ea1e9 100644 --- a/src/test/regress/expected/uuid_1.out +++ b/src/test/regress/expected/uuid_1.out @@ -5,144 +5,147 @@ CREATE TABLE guid1 guid_field UUID, text_field TEXT DEFAULT(now()) ); +ERROR: Postgres-XC does not support DEFAULT with non-immutable functions yet +DETAIL: The feature is not currently supported CREATE TABLE guid2 ( guid_field UUID, text_field TEXT DEFAULT(now()) ); +ERROR: Postgres-XC does not support DEFAULT with non-immutable functions yet +DETAIL: The feature is not currently supported -- inserting invalid data tests -- too long INSERT INTO guid1(guid_field) VALUES('11111111-1111-1111-1111-111111111111F'); -ERROR: invalid input syntax for uuid: "11111111-1111-1111-1111-111111111111F" +ERROR: relation "guid1" does not exist LINE 1: INSERT INTO guid1(guid_field) VALUES('11111111-1111-1111-111... - ^ + ^ -- too short INSERT INTO guid1(guid_field) VALUES('{11111111-1111-1111-1111-11111111111}'); -ERROR: invalid input syntax for uuid: "{11111111-1111-1111-1111-11111111111}" +ERROR: relation "guid1" does not exist LINE 1: INSERT INTO guid1(guid_field) VALUES('{11111111-1111-1111-11... - ^ + ^ -- valid data but invalid format INSERT INTO guid1(guid_field) VALUES('111-11111-1111-1111-1111-111111111111'); -ERROR: invalid input syntax for uuid: "111-11111-1111-1111-1111-111111111111" +ERROR: relation "guid1" does not exist LINE 1: INSERT INTO guid1(guid_field) VALUES('111-11111-1111-1111-11... - ^ + ^ INSERT INTO guid1(guid_field) VALUES('{22222222-2222-2222-2222-222222222222 '); -ERROR: invalid input syntax for uuid: "{22222222-2222-2222-2222-222222222222 " +ERROR: relation "guid1" does not exist LINE 1: INSERT INTO guid1(guid_field) VALUES('{22222222-2222-2222-22... - ^ + ^ -- invalid data INSERT INTO guid1(guid_field) VALUES('11111111-1111-1111-G111-111111111111'); -ERROR: invalid input syntax for uuid: "11111111-1111-1111-G111-111111111111" +ERROR: relation "guid1" does not exist LINE 1: INSERT INTO guid1(guid_field) VALUES('11111111-1111-1111-G11... - ^ + ^ INSERT INTO guid1(guid_field) VALUES('11+11111-1111-1111-1111-111111111111'); -ERROR: invalid input syntax for uuid: "11+11111-1111-1111-1111-111111111111" +ERROR: relation "guid1" does not exist LINE 1: INSERT INTO guid1(guid_field) VALUES('11+11111-1111-1111-111... - ^ + ^ --inserting three input formats INSERT INTO guid1(guid_field) VALUES('11111111-1111-1111-1111-111111111111'); +ERROR: relation "guid1" does not exist +LINE 1: INSERT INTO guid1(guid_field) VALUES('11111111-1111-1111-111... + ^ INSERT INTO guid1(guid_field) VALUES('{22222222-2222-2222-2222-222222222222}'); +ERROR: relation "guid1" does not exist +LINE 1: INSERT INTO guid1(guid_field) VALUES('{22222222-2222-2222-22... + ^ INSERT INTO guid1(guid_field) VALUES('3f3e3c3b3a3039383736353433a2313e'); +ERROR: relation "guid1" does not exist +LINE 1: INSERT INTO guid1(guid_field) VALUES('3f3e3c3b3a303938373635... + ^ -- retrieving the inserted data SELECT guid_field FROM guid1 ORDER BY guid_field; - guid_field --------------------------------------- - 11111111-1111-1111-1111-111111111111 - 22222222-2222-2222-2222-222222222222 - 3f3e3c3b-3a30-3938-3736-353433a2313e -(3 rows) - +ERROR: relation "guid1" does not exist +LINE 1: SELECT guid_field FROM guid1 ORDER BY guid_field; + ^ -- ordering test SELECT guid_field FROM guid1 ORDER BY guid_field ASC; - guid_field --------------------------------------- - 11111111-1111-1111-1111-111111111111 - 22222222-2222-2222-2222-222222222222 - 3f3e3c3b-3a30-3938-3736-353433a2313e -(3 rows) - +ERROR: relation "guid1" does not exist +LINE 1: SELECT guid_field FROM guid1 ORDER BY guid_field ASC; + ^ SELECT guid_field FROM guid1 ORDER BY guid_field DESC; - guid_field --------------------------------------- - 3f3e3c3b-3a30-3938-3736-353433a2313e - 22222222-2222-2222-2222-222222222222 - 11111111-1111-1111-1111-111111111111 -(3 rows) - +ERROR: relation "guid1" does not exist +LINE 1: SELECT guid_field FROM guid1 ORDER BY guid_field DESC; + ^ -- = operator test SELECT COUNT(*) FROM guid1 WHERE guid_field = '3f3e3c3b-3a30-3938-3736-353433a2313e'; - count -------- - 1 -(1 row) - +ERROR: relation "guid1" does not exist +LINE 1: SELECT COUNT(*) FROM guid1 WHERE guid_field = '3f3e3c3b-3a30... + ^ -- <> operator test SELECT COUNT(*) FROM guid1 WHERE guid_field <> '11111111111111111111111111111111'; - count -------- - 2 -(1 row) - +ERROR: relation "guid1" does not exist +LINE 1: SELECT COUNT(*) FROM guid1 WHERE guid_field <> '111111111111... + ^ -- < operator test SELECT COUNT(*) FROM guid1 WHERE guid_field < '22222222-2222-2222-2222-222222222222'; - count -------- - 1 -(1 row) - +ERROR: relation "guid1" does not exist +LINE 1: SELECT COUNT(*) FROM guid1 WHERE guid_field < '22222222-2222... + ^ -- <= operator test SELECT COUNT(*) FROM guid1 WHERE guid_field <= '22222222-2222-2222-2222-222222222222'; - count -------- - 2 -(1 row) - +ERROR: relation "guid1" does not exist +LINE 1: SELECT COUNT(*) FROM guid1 WHERE guid_field <= '22222222-222... + ^ -- > operator test SELECT COUNT(*) FROM guid1 WHERE guid_field > '22222222-2222-2222-2222-222222222222'; - count -------- - 1 -(1 row) - +ERROR: relation "guid1" does not exist +LINE 1: SELECT COUNT(*) FROM guid1 WHERE guid_field > '22222222-2222... + ^ -- >= operator test SELECT COUNT(*) FROM guid1 WHERE guid_field >= '22222222-2222-2222-2222-222222222222'; - count -------- - 2 -(1 row) - +ERROR: relation "guid1" does not exist +LINE 1: SELECT COUNT(*) FROM guid1 WHERE guid_field >= '22222222-222... + ^ -- btree and hash index creation test CREATE INDEX guid1_btree ON guid1 USING BTREE (guid_field); +ERROR: relation "guid1" does not exist CREATE INDEX guid1_hash ON guid1 USING HASH (guid_field); +ERROR: relation "guid1" does not exist -- unique index test CREATE UNIQUE INDEX guid1_unique_BTREE ON guid1 USING BTREE (guid_field); -ERROR: Cannot locally enforce a unique index on round robin distributed table. +ERROR: relation "guid1" does not exist -- should fail INSERT INTO guid1(guid_field) VALUES('11111111-1111-1111-1111-111111111111'); +ERROR: relation "guid1" does not exist +LINE 1: INSERT INTO guid1(guid_field) VALUES('11111111-1111-1111-111... + ^ -- check to see whether the new indexes are actually there SELECT count(*) FROM pg_class WHERE relkind='i' AND relname LIKE 'guid%'; count ------- - 2 + 0 (1 row) -- populating the test tables with additional records INSERT INTO guid1(guid_field) VALUES('44444444-4444-4444-4444-444444444444'); +ERROR: relation "guid1" does not exist +LINE 1: INSERT INTO guid1(guid_field) VALUES('44444444-4444-4444-444... + ^ INSERT INTO guid2(guid_field) VALUES('11111111-1111-1111-1111-111111111111'); +ERROR: relation "guid2" does not exist +LINE 1: INSERT INTO guid2(guid_field) VALUES('11111111-1111-1111-111... + ^ INSERT INTO guid2(guid_field) VALUES('{22222222-2222-2222-2222-222222222222}'); +ERROR: relation "guid2" does not exist +LINE 1: INSERT INTO guid2(guid_field) VALUES('{22222222-2222-2222-22... + ^ INSERT INTO guid2(guid_field) VALUES('3f3e3c3b3a3039383736353433a2313e'); +ERROR: relation "guid2" does not exist +LINE 1: INSERT INTO guid2(guid_field) VALUES('3f3e3c3b3a303938373635... + ^ -- join test SELECT COUNT(*) FROM guid1 g1 INNER JOIN guid2 g2 ON g1.guid_field = g2.guid_field; - count -------- - 4 -(1 row) - +ERROR: relation "guid1" does not exist +LINE 1: SELECT COUNT(*) FROM guid1 g1 INNER JOIN guid2 g2 ON g1.guid... + ^ SELECT COUNT(*) FROM guid1 g1 LEFT JOIN guid2 g2 ON g1.guid_field = g2.guid_field WHERE g2.guid_field IS NULL; - count -------- - 1 -(1 row) - +ERROR: relation "guid1" does not exist +LINE 1: SELECT COUNT(*) FROM guid1 g1 LEFT JOIN guid2 g2 ON g1.guid_... + ^ -- clean up DROP TABLE guid1, guid2 CASCADE; +ERROR: table "guid1" does not exist ----------------------------------------------------------------------- Summary of changes: src/backend/catalog/heap.c | 22 ++++ src/backend/parser/parse_expr.c | 24 +++++ src/backend/parser/parse_func.c | 95 +++++++++++++++++ src/include/parser/parse_expr.h | 3 + src/include/parser/parse_func.h | 3 + src/test/regress/expected/dependency_1.out | 3 +- src/test/regress/expected/uuid_1.out | 153 ++++++++++++++-------------- 7 files changed, 227 insertions(+), 76 deletions(-) hooks/post-receive -- Postgres-XC |
From: Koichi S. <koi...@us...> - 2011-04-08 07:28:40
|
Project "Postgres-XC". The branch, ha_support has been updated via 2974169569091d02a67feaf2b5dcc93b575965e5 (commit) from 5dddee958a4b124741d1e76c4187a04f73852f94 (commit) - Log ----------------------------------------------------------------- commit 2974169569091d02a67feaf2b5dcc93b575965e5 Author: Koichi Suzuki <koi...@gm...> Date: Fri Apr 8 16:28:31 2011 +0900 gtm_ctl.c is modified so that it can accept reconnect command only for gtm_proxy. diff --git a/src/gtm/gtm_ctl/gtm_ctl.c b/src/gtm/gtm_ctl/gtm_ctl.c index 1fdce3b..5092a47 100644 --- a/src/gtm/gtm_ctl/gtm_ctl.c +++ b/src/gtm/gtm_ctl/gtm_ctl.c @@ -593,6 +593,14 @@ do_reconnect(void) char *reconnect_point_file_nam; FILE *reconnect_point_file; + /* + * Target must beo "gtm_proxy" + */ + if (strcmp(gtm_app, "gtm_proxy") != 0) + { + write_stderr(_("%s: only gtm_proxy can accept reconnect command\n"), progname); + exit(1); + } pid = get_pgpid(); if (pid == 0) /* no pid file */ ----------------------------------------------------------------------- Summary of changes: src/gtm/gtm_ctl/gtm_ctl.c | 8 ++++++++ 1 files changed, 8 insertions(+), 0 deletions(-) hooks/post-receive -- Postgres-XC |
From: Andrei M. <and...@gm...> - 2011-04-08 07:28:21
|
Hi, Actually the ORDER BY is not always has to be pushed down to data nodes. Postgres may decide to group by hash. In this case sorting is unnecessary operation. However it may be a problem to determine on coordinator, if data node is going to use sort or hash grouping. Aggregate functions are already changed so values are pre-aggregated on datanodes, and coordinator completes aggregation. This should not be a problem. 2011/4/8 Ashutosh Bapat <ash...@en...> > Before I forget, Koichi, your sourceforge address bounced, so my mail only > reached Mason. Thanks Mason for including others in the thread again. > > On Fri, Apr 8, 2011 at 6:04 AM, Koichi Suzuki <koi...@gm...>wrote: > >> Thank you for valuable advice. We also should think how GROUP BY can >> be pushed down to datanodes so that coordinator can simply merge the >> result. >> > > If I understand it right, Mason has already given solution to that problem. > We push the groupby down to datanodes with additional order by clause > (ordering based on the group by expressions) on top of them (this looks > tricky if there are already other order by clauses). Thus the data we > collect at coordinator is already grouped per datanode and all coordinator > has to do is to consolidate each row from the data nodes in order like we do > in merge sort. > > I see that we may have to do this in steps. > 1. first cut - implement to apply group by only at coordinator. Not so > efficient, but will make group by work > 2. second cut - implement pushing down group by to the data nodes, better > than one but still the grouping at coordinator is not that efficient > 3. third cut - implement above idea fully > > We might be able to do 1 and 2 in the first cut itself. But this is too > early to say anything. I will get back once, I know things better. > > Mason has also pointed to the possibility of distributing grouping phase at > coordinator across datanodes (in third cut) so that coordinator is not > loaded if there are too many columns in the group by. But that requires the > infrastructure to ship rows from coordinator to datanodes. This > infrastructure is not place, I think. So that is a far possibility for now. > > > >> >> ---------- >> Koichi Suzuki >> >> >> >> 2011/4/7 Mason <ma...@us...>: >> > I looked at the schedule. >> > >> > I am not sure about the planned design for GROUP BY, but originally >> > Andrei was planning on making it somewhat similar to ORDER BY, how >> > ORDER BY does a merge sort on the coordinator, based on sorted results >> > from the data nodes. Each data node could do the beginning phase of >> > aggregation in groups and then sort the output in the same manner of >> > the groups. Then, the coordinator could do the last step of >> > aggregation with like groups, which is easy to get them on the fly >> > because of the sorting coming in from the data nodes (and avoiding >> > materialization). >> > >> > This should work pretty well. One drawback is if they chose a GROUP >> > BY clause with many groups (many = thousands+). Then some parallelism >> > is lost because of the final phase being done in only one place, on >> > the Coordinator. GridSQL spreads out the final aggregation phase >> > amongst all the data nodes, moving like groups to the same node to get >> > more parallelism. I think row shipping infrastructure might have to be >> > in place first before implementing that, and there will be a >> > noticeable benefit only once there are many, many groups, so I don't >> > see it being a critical thing at this phase and can be added later. >> > >> > Regards, >> > >> > Mason >> > >> > >> > >> > On Thu, Apr 7, 2011 at 5:27 AM, Koichi Suzuki >> > <koi...@us...> wrote: >> >> Project "Postgres-XC documentation". >> >> >> >> The branch, master has been updated >> >> via 62434399fdd57aff2701e3e5e97fed619f6d6820 (commit) >> >> from 252519c2be5309a3682b0ee895cf040083ae1784 (commit) >> >> >> >> >> >> - Log ----------------------------------------------------------------- >> >> commit 62434399fdd57aff2701e3e5e97fed619f6d6820 >> >> Author: Koichi Suzuki <koi...@gm...> >> >> Date: Thu Apr 7 18:27:26 2011 +0900 >> >> >> >> 1. Added 2011FYQ1 schedule for each member. >> >> 2. Modified my progress sheet of Reference Manual. >> >> >> >> -- Koichi Suzuki >> >> >> >> diff --git a/progress/2011FYQ1_Schedule.ods >> b/progress/2011FYQ1_Schedule.ods >> >> new file mode 100755 >> >> index 0000000..5e24d37 >> >> Binary files /dev/null and b/progress/2011FYQ1_Schedule.ods differ >> >> diff --git a/progress/documentation-progress.ods >> b/progress/documentation-progress.ods >> >> index 277aade..2c8577e 100644 >> >> Binary files a/progress/documentation-progress.ods and >> b/progress/documentation-progress.ods differ >> >> >> >> ----------------------------------------------------------------------- >> >> >> >> Summary of changes: >> >> progress/2011FYQ1_Schedule.ods | Bin 0 -> 22147 bytes >> >> progress/documentation-progress.ods | Bin 16883 -> 19519 bytes >> >> 2 files changed, 0 insertions(+), 0 deletions(-) >> >> create mode 100755 progress/2011FYQ1_Schedule.ods >> >> >> >> >> >> hooks/post-receive >> >> -- >> >> Postgres-XC documentation >> >> >> >> >> ------------------------------------------------------------------------------ >> >> Xperia(TM) PLAY >> >> It's a major breakthrough. An authentic gaming >> >> smartphone on the nation's most reliable network. >> >> And it wants your games. >> >> http://p.sf.net/sfu/verizon-sfdev >> >> _______________________________________________ >> >> Postgres-xc-committers mailing list >> >> Pos...@li... >> >> https://lists.sourceforge.net/lists/listinfo/postgres-xc-committers >> >> >> > >> > >> ------------------------------------------------------------------------------ >> > Xperia(TM) PLAY >> > It's a major breakthrough. An authentic gaming >> > smartphone on the nation's most reliable network. >> > And it wants your games. >> > http://p.sf.net/sfu/verizon-sfdev >> > _______________________________________________ >> > Postgres-xc-committers mailing list >> > Pos...@li... >> > https://lists.sourceforge.net/lists/listinfo/postgres-xc-committers >> > >> > > > > -- > Best Wishes, > Ashutosh Bapat > EntepriseDB Corporation > The Enterprise Postgres Company > > > > ------------------------------------------------------------------------------ > Xperia(TM) PLAY > It's a major breakthrough. An authentic gaming > smartphone on the nation's most reliable network. > And it wants your games. > http://p.sf.net/sfu/verizon-sfdev > _______________________________________________ > Postgres-xc-developers mailing list > Pos...@li... > https://lists.sourceforge.net/lists/listinfo/postgres-xc-developers > > -- Best regards, Andrei Martsinchyk mailto:and...@gm... |
From: Koichi S. <koi...@us...> - 2011-04-08 07:15:38
|
Project "Postgres-XC". The branch, ha_support has been updated via 5dddee958a4b124741d1e76c4187a04f73852f94 (commit) from 0794d85a15b5124f666190926ae036dec9f39855 (commit) - Log ----------------------------------------------------------------- commit 5dddee958a4b124741d1e76c4187a04f73852f94 Author: Koichi Suzuki <koi...@gm...> Date: Fri Apr 8 16:11:14 2011 +0900 This is the first commit ot GTM reconnect to new promoted GTM. Reconnect can be done by gtm_ctl as follows: gtm_ctl -D dir -S gtm_proxy reconnect -o "-s newhost -t newport" Reconnect is notified to gtm_proxy using SIGUSR2 signal. Option parameter will be passed through the file "newgtm" placed at the directory specified by -D option. After this, we should write gtm_proxy handler. diff --git a/src/gtm/gtm_ctl/gtm_ctl.c b/src/gtm/gtm_ctl/gtm_ctl.c index 91690c9..1fdce3b 100644 --- a/src/gtm/gtm_ctl/gtm_ctl.c +++ b/src/gtm/gtm_ctl/gtm_ctl.c @@ -45,6 +45,7 @@ typedef enum PROMOTE_COMMAND, RESTART_COMMAND, STATUS_COMMAND, + RECONNECT_COMMAND, /* gtm_ctl -S gtm_proxy reconnect */ } CtlCommand; #define DEFAULT_WAIT 60 @@ -64,6 +65,8 @@ static char *log_file = NULL; static char *gtm_path = NULL; static char *gtm_app = NULL; static char *argv0 = NULL; +static char *reconnect_host = NULL; +static char *reconnect_port = NULL; static void write_stderr(const char *fmt,...) @@ -78,6 +81,7 @@ static void set_mode(char *modeopt); static void do_start(void); static void do_stop(void); static void do_restart(void); +static void do_reconnect(void); static void print_msg(const char *msg); static pgpid_t get_pgpid(void); @@ -567,6 +571,77 @@ do_promote(void) } } +/* + * At least we expect the following argument + * + * 1) -D datadir + * 2) -o options: we expect that -t and -s options are specified here. + * Check will be done in GTM-Proxy. If there's an error, it will be + * logged. In this case, GTM-Proxy won't terminate. It will continue + * to read/write with old GTM. + * + * Because they are not passed to gtm directly, they should appear in + * gtm_ctl argument, not in -o options. They're specific to gtm_ctl + * reconnect. + * + * + */ +static void +do_reconnect(void) +{ + pgpid_t pid; + char *reconnect_point_file_nam; + FILE *reconnect_point_file; + + pid = get_pgpid(); + + if (pid == 0) /* no pid file */ + { + write_stderr(_("%s: PID file \"%s\" does not exist\n"), progname, pid_file); + write_stderr(_("Is server running?\n")); + exit(1); + } + else if (pid < 0) /* standalone backend, not gtm */ + { + pid = -pid; + write_stderr(_("%s: cannot promote server; " + "single-user server is running (PID: %ld)\n"), + progname, pid); + exit(1); + } + read_gtm_opts(); + reconnect_point_file_nam = malloc(strlen(gtm_data) + 8); + if (reconnect_point_file_nam == NULL) + { + write_stderr(_("%s: No memory available.\n"), progname); + exit(1); + } + snprintf(reconnect_point_file_nam, strlen(gtm_data) + 7, "%s/newgtm", gtm_data); + reconnect_point_file = fopen(reconnect_point_file_nam, "w"); + if (reconnect_point_file == NULL) + { + write_stderr(_("%s: Cannot open reconnect point file %s\n"), progname, reconnect_point_file_nam); + exit(1); + } + fprintf(reconnect_point_file, "%s", gtm_opts); + fclose(reconnect_point_file); + free(reconnect_point_file_nam); + /* + * Beofore signaling, we need to set the host and port of the new target GTM. + * + * They should be written to "newgtm" file under -D directory. + * First line is the host name and the second line is port (all in + * text representation). + */ + /* === WIP 20110408 === */ + if (kill((pid_t) pid, SIGUSR2) != 0) + { + write_stderr(_("%s: could not send promote signal (PID: %ld): %s\n"), progname, pid, + strerror(errno)); + exit(1); + } +} + /* * restart/reload routines @@ -969,9 +1044,12 @@ main(int argc, char **argv) ctl_command = RESTART_COMMAND; else if (strcmp(argv[optind], "status") == 0) ctl_command = STATUS_COMMAND; + else if (strcmp(argv[optind], "reconnect") == 0) + ctl_command = RECONNECT_COMMAND; else { - write_stderr(_("%s: unrecognized operation mode \"%s\"\n"), progname, argv[optind]); + write_stderr(_("%s: unrecognized operation mode \"%s\"\n"), + progname, argv[optind]); do_advice(); exit(1); } @@ -1072,6 +1150,9 @@ main(int argc, char **argv) case STATUS_COMMAND: do_status(); break; + case RECONNECT_COMMAND: + do_reconnect(); + break; default: break; } ----------------------------------------------------------------------- Summary of changes: src/gtm/gtm_ctl/gtm_ctl.c | 83 ++++++++++++++++++++++++++++++++++++++++++++- 1 files changed, 82 insertions(+), 1 deletions(-) hooks/post-receive -- Postgres-XC |
From: Ashutosh B. <ash...@en...> - 2011-04-08 07:04:28
|
Before I forget, Koichi, your sourceforge address bounced, so my mail only reached Mason. Thanks Mason for including others in the thread again. On Fri, Apr 8, 2011 at 6:04 AM, Koichi Suzuki <koi...@gm...> wrote: > Thank you for valuable advice. We also should think how GROUP BY can > be pushed down to datanodes so that coordinator can simply merge the > result. > If I understand it right, Mason has already given solution to that problem. We push the groupby down to datanodes with additional order by clause (ordering based on the group by expressions) on top of them (this looks tricky if there are already other order by clauses). Thus the data we collect at coordinator is already grouped per datanode and all coordinator has to do is to consolidate each row from the data nodes in order like we do in merge sort. I see that we may have to do this in steps. 1. first cut - implement to apply group by only at coordinator. Not so efficient, but will make group by work 2. second cut - implement pushing down group by to the data nodes, better than one but still the grouping at coordinator is not that efficient 3. third cut - implement above idea fully We might be able to do 1 and 2 in the first cut itself. But this is too early to say anything. I will get back once, I know things better. Mason has also pointed to the possibility of distributing grouping phase at coordinator across datanodes (in third cut) so that coordinator is not loaded if there are too many columns in the group by. But that requires the infrastructure to ship rows from coordinator to datanodes. This infrastructure is not place, I think. So that is a far possibility for now. > > ---------- > Koichi Suzuki > > > > 2011/4/7 Mason <ma...@us...>: > > I looked at the schedule. > > > > I am not sure about the planned design for GROUP BY, but originally > > Andrei was planning on making it somewhat similar to ORDER BY, how > > ORDER BY does a merge sort on the coordinator, based on sorted results > > from the data nodes. Each data node could do the beginning phase of > > aggregation in groups and then sort the output in the same manner of > > the groups. Then, the coordinator could do the last step of > > aggregation with like groups, which is easy to get them on the fly > > because of the sorting coming in from the data nodes (and avoiding > > materialization). > > > > This should work pretty well. One drawback is if they chose a GROUP > > BY clause with many groups (many = thousands+). Then some parallelism > > is lost because of the final phase being done in only one place, on > > the Coordinator. GridSQL spreads out the final aggregation phase > > amongst all the data nodes, moving like groups to the same node to get > > more parallelism. I think row shipping infrastructure might have to be > > in place first before implementing that, and there will be a > > noticeable benefit only once there are many, many groups, so I don't > > see it being a critical thing at this phase and can be added later. > > > > Regards, > > > > Mason > > > > > > > > On Thu, Apr 7, 2011 at 5:27 AM, Koichi Suzuki > > <koi...@us...> wrote: > >> Project "Postgres-XC documentation". > >> > >> The branch, master has been updated > >> via 62434399fdd57aff2701e3e5e97fed619f6d6820 (commit) > >> from 252519c2be5309a3682b0ee895cf040083ae1784 (commit) > >> > >> > >> - Log ----------------------------------------------------------------- > >> commit 62434399fdd57aff2701e3e5e97fed619f6d6820 > >> Author: Koichi Suzuki <koi...@gm...> > >> Date: Thu Apr 7 18:27:26 2011 +0900 > >> > >> 1. Added 2011FYQ1 schedule for each member. > >> 2. Modified my progress sheet of Reference Manual. > >> > >> -- Koichi Suzuki > >> > >> diff --git a/progress/2011FYQ1_Schedule.ods > b/progress/2011FYQ1_Schedule.ods > >> new file mode 100755 > >> index 0000000..5e24d37 > >> Binary files /dev/null and b/progress/2011FYQ1_Schedule.ods differ > >> diff --git a/progress/documentation-progress.ods > b/progress/documentation-progress.ods > >> index 277aade..2c8577e 100644 > >> Binary files a/progress/documentation-progress.ods and > b/progress/documentation-progress.ods differ > >> > >> ----------------------------------------------------------------------- > >> > >> Summary of changes: > >> progress/2011FYQ1_Schedule.ods | Bin 0 -> 22147 bytes > >> progress/documentation-progress.ods | Bin 16883 -> 19519 bytes > >> 2 files changed, 0 insertions(+), 0 deletions(-) > >> create mode 100755 progress/2011FYQ1_Schedule.ods > >> > >> > >> hooks/post-receive > >> -- > >> Postgres-XC documentation > >> > >> > ------------------------------------------------------------------------------ > >> Xperia(TM) PLAY > >> It's a major breakthrough. An authentic gaming > >> smartphone on the nation's most reliable network. > >> And it wants your games. > >> http://p.sf.net/sfu/verizon-sfdev > >> _______________________________________________ > >> Postgres-xc-committers mailing list > >> Pos...@li... > >> https://lists.sourceforge.net/lists/listinfo/postgres-xc-committers > >> > > > > > ------------------------------------------------------------------------------ > > Xperia(TM) PLAY > > It's a major breakthrough. An authentic gaming > > smartphone on the nation's most reliable network. > > And it wants your games. > > http://p.sf.net/sfu/verizon-sfdev > > _______________________________________________ > > Postgres-xc-committers mailing list > > Pos...@li... > > https://lists.sourceforge.net/lists/listinfo/postgres-xc-committers > > > -- Best Wishes, Ashutosh Bapat EntepriseDB Corporation The Enterprise Postgres Company |
From: Koichi S. <koi...@us...> - 2011-04-08 05:41:29
|
Project "Postgres-XC documentation". The branch, master has been updated via 4c8c9a83834b9513848c2040596a5d310d432e57 (commit) from 62434399fdd57aff2701e3e5e97fed619f6d6820 (commit) - Log ----------------------------------------------------------------- commit 4c8c9a83834b9513848c2040596a5d310d432e57 Author: Koichi Suzuki <koi...@gm...> Date: Fri Apr 8 14:39:52 2011 +0900 Changed Koichi's schedule for May and June. Now preparation of Reference Manual will be done in the following order: 1) Vol.3: Server Administration Guide 2) Vol.1B: SQL Command Reference 3) Vol.1A: SQL Language 4) Vol.2: Programming Guide diff --git a/progress/2011FYQ1_Schedule.ods b/progress/2011FYQ1_Schedule.ods index 5e24d37..d97af84 100755 Binary files a/progress/2011FYQ1_Schedule.ods and b/progress/2011FYQ1_Schedule.ods differ ----------------------------------------------------------------------- Summary of changes: progress/2011FYQ1_Schedule.ods | Bin 22147 -> 22246 bytes 1 files changed, 0 insertions(+), 0 deletions(-) hooks/post-receive -- Postgres-XC documentation |
From: Koichi S. <koi...@gm...> - 2011-04-08 00:34:51
|
Thank you for valuable advice. We also should think how GROUP BY can be pushed down to datanodes so that coordinator can simply merge the result. ---------- Koichi Suzuki 2011/4/7 Mason <ma...@us...>: > I looked at the schedule. > > I am not sure about the planned design for GROUP BY, but originally > Andrei was planning on making it somewhat similar to ORDER BY, how > ORDER BY does a merge sort on the coordinator, based on sorted results > from the data nodes. Each data node could do the beginning phase of > aggregation in groups and then sort the output in the same manner of > the groups. Then, the coordinator could do the last step of > aggregation with like groups, which is easy to get them on the fly > because of the sorting coming in from the data nodes (and avoiding > materialization). > > This should work pretty well. One drawback is if they chose a GROUP > BY clause with many groups (many = thousands+). Then some parallelism > is lost because of the final phase being done in only one place, on > the Coordinator. GridSQL spreads out the final aggregation phase > amongst all the data nodes, moving like groups to the same node to get > more parallelism. I think row shipping infrastructure might have to be > in place first before implementing that, and there will be a > noticeable benefit only once there are many, many groups, so I don't > see it being a critical thing at this phase and can be added later. > > Regards, > > Mason > > > > On Thu, Apr 7, 2011 at 5:27 AM, Koichi Suzuki > <koi...@us...> wrote: >> Project "Postgres-XC documentation". >> >> The branch, master has been updated >> via 62434399fdd57aff2701e3e5e97fed619f6d6820 (commit) >> from 252519c2be5309a3682b0ee895cf040083ae1784 (commit) >> >> >> - Log ----------------------------------------------------------------- >> commit 62434399fdd57aff2701e3e5e97fed619f6d6820 >> Author: Koichi Suzuki <koi...@gm...> >> Date: Thu Apr 7 18:27:26 2011 +0900 >> >> 1. Added 2011FYQ1 schedule for each member. >> 2. Modified my progress sheet of Reference Manual. >> >> -- Koichi Suzuki >> >> diff --git a/progress/2011FYQ1_Schedule.ods b/progress/2011FYQ1_Schedule.ods >> new file mode 100755 >> index 0000000..5e24d37 >> Binary files /dev/null and b/progress/2011FYQ1_Schedule.ods differ >> diff --git a/progress/documentation-progress.ods b/progress/documentation-progress.ods >> index 277aade..2c8577e 100644 >> Binary files a/progress/documentation-progress.ods and b/progress/documentation-progress.ods differ >> >> ----------------------------------------------------------------------- >> >> Summary of changes: >> progress/2011FYQ1_Schedule.ods | Bin 0 -> 22147 bytes >> progress/documentation-progress.ods | Bin 16883 -> 19519 bytes >> 2 files changed, 0 insertions(+), 0 deletions(-) >> create mode 100755 progress/2011FYQ1_Schedule.ods >> >> >> hooks/post-receive >> -- >> Postgres-XC documentation >> >> ------------------------------------------------------------------------------ >> Xperia(TM) PLAY >> It's a major breakthrough. An authentic gaming >> smartphone on the nation's most reliable network. >> And it wants your games. >> http://p.sf.net/sfu/verizon-sfdev >> _______________________________________________ >> Postgres-xc-committers mailing list >> Pos...@li... >> https://lists.sourceforge.net/lists/listinfo/postgres-xc-committers >> > > ------------------------------------------------------------------------------ > Xperia(TM) PLAY > It's a major breakthrough. An authentic gaming > smartphone on the nation's most reliable network. > And it wants your games. > http://p.sf.net/sfu/verizon-sfdev > _______________________________________________ > Postgres-xc-committers mailing list > Pos...@li... > https://lists.sourceforge.net/lists/listinfo/postgres-xc-committers > |
From: Mason <ma...@us...> - 2011-04-07 12:58:59
|
I looked at the schedule. I am not sure about the planned design for GROUP BY, but originally Andrei was planning on making it somewhat similar to ORDER BY, how ORDER BY does a merge sort on the coordinator, based on sorted results from the data nodes. Each data node could do the beginning phase of aggregation in groups and then sort the output in the same manner of the groups. Then, the coordinator could do the last step of aggregation with like groups, which is easy to get them on the fly because of the sorting coming in from the data nodes (and avoiding materialization). This should work pretty well. One drawback is if they chose a GROUP BY clause with many groups (many = thousands+). Then some parallelism is lost because of the final phase being done in only one place, on the Coordinator. GridSQL spreads out the final aggregation phase amongst all the data nodes, moving like groups to the same node to get more parallelism. I think row shipping infrastructure might have to be in place first before implementing that, and there will be a noticeable benefit only once there are many, many groups, so I don't see it being a critical thing at this phase and can be added later. Regards, Mason On Thu, Apr 7, 2011 at 5:27 AM, Koichi Suzuki <koi...@us...> wrote: > Project "Postgres-XC documentation". > > The branch, master has been updated > via 62434399fdd57aff2701e3e5e97fed619f6d6820 (commit) > from 252519c2be5309a3682b0ee895cf040083ae1784 (commit) > > > - Log ----------------------------------------------------------------- > commit 62434399fdd57aff2701e3e5e97fed619f6d6820 > Author: Koichi Suzuki <koi...@gm...> > Date: Thu Apr 7 18:27:26 2011 +0900 > > 1. Added 2011FYQ1 schedule for each member. > 2. Modified my progress sheet of Reference Manual. > > -- Koichi Suzuki > > diff --git a/progress/2011FYQ1_Schedule.ods b/progress/2011FYQ1_Schedule.ods > new file mode 100755 > index 0000000..5e24d37 > Binary files /dev/null and b/progress/2011FYQ1_Schedule.ods differ > diff --git a/progress/documentation-progress.ods b/progress/documentation-progress.ods > index 277aade..2c8577e 100644 > Binary files a/progress/documentation-progress.ods and b/progress/documentation-progress.ods differ > > ----------------------------------------------------------------------- > > Summary of changes: > progress/2011FYQ1_Schedule.ods | Bin 0 -> 22147 bytes > progress/documentation-progress.ods | Bin 16883 -> 19519 bytes > 2 files changed, 0 insertions(+), 0 deletions(-) > create mode 100755 progress/2011FYQ1_Schedule.ods > > > hooks/post-receive > -- > Postgres-XC documentation > > ------------------------------------------------------------------------------ > Xperia(TM) PLAY > It's a major breakthrough. An authentic gaming > smartphone on the nation's most reliable network. > And it wants your games. > http://p.sf.net/sfu/verizon-sfdev > _______________________________________________ > Postgres-xc-committers mailing list > Pos...@li... > https://lists.sourceforge.net/lists/listinfo/postgres-xc-committers > |
From: Abbas B. <ga...@us...> - 2011-04-07 11:36:34
|
Project "Postgres-XC". The branch, master has been updated via 219e41d7324c86ceb537df13214774b70051a833 (commit) from 3e585734f4fa26b6356529f51d7a478871df0301 (commit) - Log ----------------------------------------------------------------- commit 219e41d7324c86ceb537df13214774b70051a833 Author: Abbas <abb...@en...> Date: Thu Apr 7 16:29:05 2011 +0500 This patch fixes bug ID 3170712 There was a server crash in case of multiple inserts in a table on which rules are defined. Patch has been written by Benny. diff --git a/src/backend/rewrite/rewriteHandler.c b/src/backend/rewrite/rewriteHandler.c index 78850bb..da1af1a 100644 --- a/src/backend/rewrite/rewriteHandler.c +++ b/src/backend/rewrite/rewriteHandler.c @@ -1716,8 +1716,7 @@ RewriteQuery(Query *parsetree, List *rewrite_events) /* ... and the VALUES expression lists */ rewriteValuesRTE(values_rte, rt_entry_relation, attrnos); #ifdef PGXC - if (IS_PGXC_COORDINATOR && - list_length(values_rte->values_lists) > 1) + if (IS_PGXC_COORDINATOR) parsetree_list = RewriteInsertStmt(parsetree, values_rte); #endif } @@ -1845,14 +1844,12 @@ RewriteQuery(Query *parsetree, List *rewrite_events) query = (Query *)lfirst(pt_cell); - locks = matchLocks(event, rt_entry_relation->rd_rules, - result_relation, query); - /* * Collect and apply the appropriate rules. */ locks = matchLocks(event, rt_entry_relation->rd_rules, - result_relation, parsetree); + result_relation, query); + if (locks != NIL) { List *product_queries = NIL; @@ -1976,42 +1973,37 @@ RewriteQuery(Query *parsetree, List *rewrite_events) } else { - int query_no = -1; - + int query_no = 0; + foreach(pt_cell, parsetree_list) { - Query *query; - Query *qual; - query_no ++; + Query *query = NULL; + Query *qual = NULL; query = (Query *)lfirst(pt_cell); if (!instead) { + if (qual_product_list) + qual = (Query *)list_nth(qual_product_list, + query_no); + if (query->commandType == CMD_INSERT) { - if (qual_product_list != NIL) - { - qual = (Query *)list_nth(qual_product_list, - query_no); + if (qual != NULL) rewritten = lcons(qual, rewritten); - } else rewritten = lcons(query, rewritten); } - - } - else - { - if (qual_product_list != NIL) + else { - qual = (Query *)list_nth(qual_product_list, - query_no); - rewritten = lcons(qual, rewritten); + if (qual != NULL) + rewritten = lappend(rewritten, qual); + else + rewritten = lappend(rewritten, query); } - else - rewritten = lappend(rewritten, query); } + query_no++; } } #endif @@ -2291,6 +2283,9 @@ ProcessRobinValue(Oid relid, List **valuesList, * 3.DEFAULT: no need to process (replicate case) * * values_rte is the values list range table. + * + * note: sql_statement of query with mutiple values must be assigned in this function. + * It will not be assigned in pgxc_planner again. */ static List * RewriteInsertStmt(Query *query, RangeTblEntry *values_rte) @@ -2390,7 +2385,9 @@ collect: DestroyValuesList(&valuesList); break; - default: /* distribute by replication: just do it as usual */ + default: + get_query_def_from_valuesList(query, &buf); + query->sql_statement = pstrdup(buf.data); rwInsertList = lappend(rwInsertList, query); break; } ----------------------------------------------------------------------- Summary of changes: src/backend/rewrite/rewriteHandler.c | 51 ++++++++++++++++------------------ 1 files changed, 24 insertions(+), 27 deletions(-) hooks/post-receive -- Postgres-XC |
From: Koichi S. <koi...@us...> - 2011-04-07 09:27:53
|
Project "Postgres-XC documentation". The branch, master has been updated via 62434399fdd57aff2701e3e5e97fed619f6d6820 (commit) from 252519c2be5309a3682b0ee895cf040083ae1784 (commit) - Log ----------------------------------------------------------------- commit 62434399fdd57aff2701e3e5e97fed619f6d6820 Author: Koichi Suzuki <koi...@gm...> Date: Thu Apr 7 18:27:26 2011 +0900 1. Added 2011FYQ1 schedule for each member. 2. Modified my progress sheet of Reference Manual. -- Koichi Suzuki diff --git a/progress/2011FYQ1_Schedule.ods b/progress/2011FYQ1_Schedule.ods new file mode 100755 index 0000000..5e24d37 Binary files /dev/null and b/progress/2011FYQ1_Schedule.ods differ diff --git a/progress/documentation-progress.ods b/progress/documentation-progress.ods index 277aade..2c8577e 100644 Binary files a/progress/documentation-progress.ods and b/progress/documentation-progress.ods differ ----------------------------------------------------------------------- Summary of changes: progress/2011FYQ1_Schedule.ods | Bin 0 -> 22147 bytes progress/documentation-progress.ods | Bin 16883 -> 19519 bytes 2 files changed, 0 insertions(+), 0 deletions(-) create mode 100755 progress/2011FYQ1_Schedule.ods hooks/post-receive -- Postgres-XC documentation |
From: Michael P. <mic...@us...> - 2011-04-07 08:24:48
|
Project "Postgres-XC". The branch, master has been updated via 3e585734f4fa26b6356529f51d7a478871df0301 (commit) from 8b018fd20850ec0753fdfbef024b9a957efaeb0a (commit) - Log ----------------------------------------------------------------- commit 3e585734f4fa26b6356529f51d7a478871df0301 Author: Michael P <mic...@us...> Date: Thu Apr 7 17:07:33 2011 +0900 Fix for regression test temp TEMP tables are not supported yet, but it has been checked that usage of whereami and whoami is correct if search_path is set to a different value. diff --git a/src/test/regress/expected/temp_1.out b/src/test/regress/expected/temp_1.out index b966fb3..ef48e27 100644 --- a/src/test/regress/expected/temp_1.out +++ b/src/test/regress/expected/temp_1.out @@ -150,11 +150,12 @@ create function public.whoami() returns text create function pg_temp.whoami() returns text as $$select 'temp'::text$$ language sql; -- default should have pg_temp implicitly first, but only for tables -select * from whereami; - f1 ------- +select * from whereami order by f1; + f1 +-------- + public temp -(1 row) +(2 rows) select whoami(); whoami @@ -164,11 +165,12 @@ select whoami(); -- can list temp first explicitly, but it still doesn't affect functions set search_path = pg_temp, public; -select * from whereami; - f1 ------- +select * from whereami order by f1; + f1 +-------- + public temp -(1 row) +(2 rows) select whoami(); whoami @@ -178,11 +180,12 @@ select whoami(); -- or put it last for security set search_path = public, pg_temp; -select * from whereami; +select * from whereami order by f1; f1 -------- public -(1 row) + temp +(2 rows) select whoami(); whoami diff --git a/src/test/regress/sql/temp.sql b/src/test/regress/sql/temp.sql index aed4be8..66b12ae 100644 --- a/src/test/regress/sql/temp.sql +++ b/src/test/regress/sql/temp.sql @@ -134,17 +134,17 @@ create function pg_temp.whoami() returns text as $$select 'temp'::text$$ language sql; -- default should have pg_temp implicitly first, but only for tables -select * from whereami; +select * from whereami order by f1; select whoami(); -- can list temp first explicitly, but it still doesn't affect functions set search_path = pg_temp, public; -select * from whereami; +select * from whereami order by f1; select whoami(); -- or put it last for security set search_path = public, pg_temp; -select * from whereami; +select * from whereami order by f1; select whoami(); -- you can invoke a temp function explicitly, though ----------------------------------------------------------------------- Summary of changes: src/test/regress/expected/temp_1.out | 23 +++++++++++++---------- src/test/regress/sql/temp.sql | 6 +++--- 2 files changed, 16 insertions(+), 13 deletions(-) hooks/post-receive -- Postgres-XC |
From: Koichi S. <koi...@us...> - 2011-04-07 07:36:00
|
Project "Postgres-XC". The branch, documentation has been updated via 0222ee52bb3fd9bedc71ee86169ad5009137fecc (commit) from 180361060615c52e211244be7d5939c4a5edc0c4 (commit) - Log ----------------------------------------------------------------- commit 0222ee52bb3fd9bedc71ee86169ad5009137fecc Author: Koichi Suzuki <koi...@gm...> Date: Thu Apr 7 16:32:42 2011 +0900 This is very important commit of Reference Manual build. Now all the SGML files are generaged from *.sgmlin files, which includes all the contents common to all, specific to PostgreSQL, specific to Postgres-XC, and specific translations. These contents will be filtered by makesgml command included in this repository for specific usage, for example, English version of Postgres-XC, Japanese version of PostgreSQL, etc. You will see all the *.sgml file is now *.sgmlin file, which is filtered to *.sgml through new Makefile. diff --git a/doc/src/sgml/Makefile b/doc/src/sgml/Makefile index a7f0c8d..5bf5fce 100644 --- a/doc/src/sgml/Makefile +++ b/doc/src/sgml/Makefile @@ -2,7 +2,7 @@ # # PostgreSQL documentation makefile # -# doc/src/sgml/Makefile +# $PostgreSQL: pgsql/doc/src/sgml/Makefile,v 1.148 2010/06/12 21:40:31 tgl Exp $ # #---------------------------------------------------------------------------- @@ -54,7 +54,13 @@ override XSLTPROCFLAGS += --stringparam pg.version '$(VERSION)' GENERATED_SGML = bookindex.sgml version.sgml \ features-supported.sgml features-unsupported.sgml -ALLSGML := $(wildcard $(srcdir)/*.sgml $(srcdir)/ref/*.sgml) $(GENERATED_SGML) +ALLSGMLIN := $(wildcard $(srcdir)/*.sgmlin $(srcdir)/ref/*.sgmlin) + +#ALLSGML := $(wildcard $(srcdir)/*.sgml $(srcdir)/ref/*.sgml) $(GENERATED_SGML) +ALLSGML := $(ALLSGMLIN:%sgmlin=%sgml) $(GENERATED_SGML) + +ALLSGMLTOREMOVE := $(ALLSGMLIN:%sgmlin=%sgml) + # Sometimes we don't want this one. ALMOSTALLSGML := $(filter-out %bookindex.sgml,$(ALLSGML)) @@ -190,6 +196,13 @@ postgres.pdf: # Cancel built-in suffix rules, interfering with PS building .SUFFIXES: +.SUFFIXES: .sgml .sgmlin + +INC_LIST = -I PG -I EN +#INC_LIST = -I PGXC -I EN + +.sgmlin.sgml: + makesgml -i $< -o $@ $(INC_LIST) $(EXC_LIST) # This generates an XML version of the flow-object tree. It's useful @@ -334,11 +347,16 @@ fixed-man-stamp: man-stamp install-man: cp -R $(foreach dir,man1 man3 man$(sqlmansectnum),fixedman/$(dir)) '$(DESTDIR)$(mandir)' -clean: clean-man +clean: clean-man clean-sgml + .PHONY: clean-man clean-man: rm -rf fixedman/ fixed-man-stamp +.PHONY: clean-sgml: +clean-sgml: + rm -rf $(ALLSGML) + endif # sqlmansectnum != 7 # tabs are harmless, but it is best to avoid them in SGML files @@ -369,6 +387,8 @@ clean: rm -f postgres.xml postgres.xmltmp htmlhelp.hhp toc.hhc index.hhk *.fo # Texinfo rm -f *.texixml *.texi *.info db2texi.refs +# sgml + rm -f $(ALLSGMLTOREMOVE) distclean: clean @@ -377,3 +397,11 @@ maintainer-clean: distclean rm -fr html/ html-stamp # man rm -rf man1/ man3/ man7/ man-stamp + +.PHONY: sgmlfiles + +INC_LIST = -I XC -I EN +EXC_LIST = -E PG -E JP + +sgmlfiles: + ./makesgmlfiles $(INC_LIST) $(EXC_LIST) diff --git a/doc/src/sgml/README.links b/doc/src/sgml/README.links index 27b429a..1991c20 100644 --- a/doc/src/sgml/README.links +++ b/doc/src/sgml/README.links @@ -1,4 +1,4 @@ -<!-- doc/src/sgml/README.links +<!-- $PostgreSQL: pgsql/doc/src/sgml/README.links,v 1.3 2009/01/10 16:58:39 momjian Exp $ --> Linking within SGML documents can be confusing, so here is a summary: diff --git a/doc/src/sgml/acronyms.sgml b/doc/src/sgml/acronyms.sgml deleted file mode 100644 index 2cd5641..0000000 --- a/doc/src/sgml/acronyms.sgml +++ /dev/null @@ -1,748 +0,0 @@ -<!-- doc/src/sgml/acronyms.sgml - -<appendix id="acronyms"> - <title>Acronyms</title> - - <para> - This is a list of acronyms commonly used in the <productname>PostgreSQL</> - documentation and in discussions about <productname>PostgreSQL</>. - - <variablelist> - - <varlistentry> - <term><acronym>ANSI</acronym></term> - <listitem> - <para> - <ulink url="http://en.wikipedia.org/wiki/American_National_Standards_Institute"> - American National Standards Institute</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>API</acronym></term> - <listitem> - <para> - <ulink url="http://en.wikipedia.org/wiki/API">Application Programming Interface</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>ASCII</acronym></term> - <listitem> - <para> - <ulink url="http://en.wikipedia.org/wiki/Ascii">American Standard - Code for Information Interchange</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>BKI</acronym></term> - <listitem> - <para> - <link linkend="bki">Backend Interface</link> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>CA</acronym></term> - <listitem> - <para> - <ulink url="http://en.wikipedia.org/wiki/Certificate_authority">Certificate Authority</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>CIDR</acronym></term> - <listitem> - <para> - <ulink - url="http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing">Classless - Inter-Domain Routing</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>CPAN</acronym></term> - <listitem> - <para> - <ulink url="http://www.cpan.org/">Comprehensive Perl Archive Network</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>CRL</acronym></term> - <listitem> - <para> - <ulink - url="http://en.wikipedia.org/wiki/Certificate_revocation_list">Certificate - Revocation List</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>CSV</acronym></term> - <listitem> - <para> - <ulink - url="http://en.wikipedia.org/wiki/Comma-separated_values">Comma - Separated Values</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>CTE</acronym></term> - <listitem> - <para> - <link linkend="queries-with">Common Table Expression</link> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>CVE</acronym></term> - <listitem> - <para> - <ulink url="http://cve.mitre.org/">Common Vulnerabilities and Exposures</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>DBA</acronym></term> - <listitem> - <para> - <ulink - url="http://en.wikipedia.org/wiki/Database_administrator">Database - Administrator</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>DBI</acronym></term> - <listitem> - <para> - <ulink url="http://dbi.perl.org/">Database Interface (Perl)</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>DBMS</acronym></term> - <listitem> - <para> - <ulink url="http://en.wikipedia.org/wiki/Dbms">Database Management - System</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>DDL</acronym></term> - <listitem> - <para> - <ulink - url="http://en.wikipedia.org/wiki/Data_Definition_Language">Data - Definition Language</ulink>, SQL commands such as <command>CREATE - TABLE</>, <command>ALTER USER</> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>DML</acronym></term> - <listitem> - <para> - <ulink - url="http://en.wikipedia.org/wiki/Data_Manipulation_Language">Data - Manipulation Language</ulink>, SQL commands such as <command>INSERT</>, - <command>UPDATE</>, <command>DELETE</> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>DST</acronym></term> - <listitem> - <para> - <ulink - url="http://en.wikipedia.org/wiki/Daylight_saving_time">Daylight - Saving Time</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>ECPG</acronym></term> - <listitem> - <para> - <link linkend="ecpg">Embedded C for PostgreSQL</link> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>ESQL</acronym></term> - <listitem> - <para> - <ulink url="http://en.wikipedia.org/wiki/Embedded_SQL">Embedded - SQL</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>FAQ</acronym></term> - <listitem> - <para> - <ulink url="http://en.wikipedia.org/wiki/FAQ">Frequently Asked - Questions</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>FSM</acronym></term> - <listitem> - <para> - <link linkend="storage-fsm">Free Space Map</link> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>GEQO</acronym></term> - <listitem> - <para> - <link linkend="geqo">Genetic Query Optimizer</link> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>GIN</acronym></term> - <listitem> - <para> - <link linkend="GIN">Generalized Inverted Index</link> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>GiST</acronym></term> - <listitem> - <para> - <link linkend="GiST">Generalized Search Tree</link> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>Git</acronym></term> - <listitem> - <para> - <ulink - url="http://en.wikipedia.org/wiki/Git_(software)">Git</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>GMT</acronym></term> - <listitem> - <para> - <ulink url="http://en.wikipedia.org/wiki/GMT">Greenwich Mean Time</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>GSSAPI</acronym></term> - <listitem> - <para> - <ulink - url="http://en.wikipedia.org/wiki/Generic_Security_Services_Application_Program_Interface">Generic - Security Services Application Programming Interface</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>GUC</acronym></term> - <listitem> - <para> - <link linkend="config-setting">Grand Unified Configuration</link>, - the <productname>PostgreSQL</> subsystem that handles server configuration - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>HBA</acronym></term> - <listitem> - <para> - <link linkend="auth-pg-hba-conf">Host-Based Authentication</link> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>HOT</acronym></term> - <listitem> - <para> - <ulink - url="http://git.postgresql.org/gitweb?p=postgresql.git;a=blob;f=src/backend/access/heap/README.HOT;hb=HEAD">Heap-Only - Tuples</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>IEC</acronym></term> - <listitem> - <para> - <ulink - url="http://en.wikipedia.org/wiki/International_Electrotechnical_Commission">International - Electrotechnical Commission</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>IEEE</acronym></term> - <listitem> - <para> - <ulink url="http://standards.ieee.org/">Institute of Electrical and - Electronics Engineers</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>IPC</acronym></term> - <listitem> - <para> - <ulink - url="http://en.wikipedia.org/wiki/Inter-process_communication">Inter-Process - Communication</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>ISO</acronym></term> - <listitem> - <para> - <ulink url="http://www.iso.org/iso/home.htm">International Organization for - Standardization</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>ISSN</acronym></term> - <listitem> - <para> - <ulink url="http://en.wikipedia.org/wiki/Issn">International Standard - Serial Number</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>JDBC</acronym></term> - <listitem> - <para> - <ulink - url="http://en.wikipedia.org/wiki/Java_Database_Connectivity">Java - Database Connectivity</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>LDAP</acronym></term> - <listitem> - <para> - <ulink - url="http://en.wikipedia.org/wiki/Lightweight_Directory_Access_Protocol">Lightweight - Directory Access Protocol</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>MSVC</acronym></term> - <listitem> - <para> - <ulink - url="http://en.wikipedia.org/wiki/Visual_C++"><productname>Microsoft - Visual C</productname></ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>MVCC</acronym></term> - <listitem> - <para> - <link linkend="mvcc">Multi-Version Concurrency Control</link> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>NLS</acronym></term> - <listitem> - <para> - <ulink - url="http://en.wikipedia.org/wiki/Internationalization_and_localization">National - Language Support</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>ODBC</acronym></term> - <listitem> - <para> - <ulink - url="http://en.wikipedia.org/wiki/Open_Database_Connectivity">Open - Database Connectivity</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>OID</acronym></term> - <listitem> - <para> - <link linkend="datatype-oid">Object Identifier</link> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>OLAP</acronym></term> - <listitem> - <para> - <ulink url="http://en.wikipedia.org/wiki/Olap">Online Analytical - Processing</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>OLTP</acronym></term> - <listitem> - <para> - <ulink url="http://en.wikipedia.org/wiki/OLTP">Online Transaction - Processing</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>ORDBMS</acronym></term> - <listitem> - <para> - <ulink url="http://en.wikipedia.org/wiki/ORDBMS">Object-Relational - Database Management System</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>PAM</acronym></term> - <listitem> - <para> - <ulink - url="http://en.wikipedia.org/wiki/Pluggable_Authentication_Modules">Pluggable - Authentication Modules</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>PGSQL</acronym></term> - <listitem> - <para> - <link linkend="postgres"><productname>PostgreSQL</></link> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>PGXS</acronym></term> - <listitem> - <para> - <link linkend="xfunc-c-pgxs"><productname>PostgreSQL</> Extension System</link> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>PID</acronym></term> - <listitem> - <para> - <ulink url="http://en.wikipedia.org/wiki/Process_identifier">Process Identifier</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>PITR</acronym></term> - <listitem> - <para> - <link linkend="continuous-archiving">Point-In-Time - Recovery</link> (Continuous Archiving) - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>PL</acronym></term> - <listitem> - <para> - <link linkend="server-programming">Programming Languages (server-side)</link> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>POSIX</acronym></term> - <listitem> - <para> - <ulink url="http://en.wikipedia.org/wiki/POSIX">Portable Operating - System Interface</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>RDBMS</acronym></term> - <listitem> - <para> - <ulink - url="http://en.wikipedia.org/wiki/Relational_database_management_system">Relational - Database Management System</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>RFC</acronym></term> - <listitem> - <para> - <ulink - url="http://en.wikipedia.org/wiki/Request_for_Comments">Request For - Comments</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>SGML</acronym></term> - <listitem> - <para> - <ulink url="http://en.wikipedia.org/wiki/SGML">Standard Generalized - Markup Language</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>SPI</acronym></term> - <listitem> - <para> - <link linkend="spi">Server Programming Interface</link> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>SQL</acronym></term> - <listitem> - <para> - <ulink url="http://en.wikipedia.org/wiki/SQL">Structured Query Language</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>SRF</acronym></term> - <listitem> - <para> - <link linkend="xfunc-c-return-set">Set-Returning Function</link> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>SSH</acronym></term> - <listitem> - <para> - <ulink url="http://en.wikipedia.org/wiki/Secure_Shell">Secure - Shell</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>SSL</acronym></term> - <listitem> - <para> - <ulink url="http://en.wikipedia.org/wiki/Secure_Sockets_Layer">Secure Sockets Layer</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>SSPI</acronym></term> - <listitem> - <para> - <ulink url="http://msdn.microsoft.com/en-us/library/aa380493%28VS.85%29.aspx">Security - Support Provider Interface</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>SYSV</acronym></term> - <listitem> - <para> - <ulink url="http://en.wikipedia.org/wiki/System_V">Unix System V</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>TCP/IP</acronym></term> - <listitem> - <para> - <ulink - url="http://en.wikipedia.org/wiki/Transmission_Control_Protocol">Transmission - Control Protocol (TCP) / Internet Protocol (IP)</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>TID</acronym></term> - <listitem> - <para> - <link linkend="datatype-oid">Tuple Identifier</link> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>TOAST</acronym></term> - <listitem> - <para> - <link linkend="storage-toast">The Oversized-Attribute Storage Technique</link> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>TPC</acronym></term> - <listitem> - <para> - <ulink url="http://www.tpc.org/">Transaction Processing - Performance Council</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>URL</acronym></term> - <listitem> - <para> - <ulink url="http://en.wikipedia.org/wiki/URL">Uniform Resource - Locator</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>UTC</acronym></term> - <listitem> - <para> - <ulink - url="http://en.wikipedia.org/wiki/Coordinated_Universal_Time">Coordinated - Universal Time</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>UTF</acronym></term> - <listitem> - <para> - <ulink url="http://www.unicode.org/">Unicode Transformation - Format</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>UTF8</acronym></term> - <listitem> - <para> - <ulink url="http://en.wikipedia.org/wiki/Utf8">Eight-Bit Unicode - Transformation Format</ulink> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>UUID</acronym></term> - <listitem> - <para> - <link linkend="datatype-uuid">Universally Unique Identifier</link> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>WAL</acronym></term> - <listitem> - <para> - <link linkend="wal">Write-Ahead Log</link> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>XID</acronym></term> - <listitem> - <para> - <link linkend="datatype-oid">Transaction Identifier</link> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><acronym>XML</acronym></term> - <listitem> - <para> - <ulink url="http://en.wikipedia.org/wiki/XML">Extensible Markup - Language</ulink> - </para> - </listitem> - </varlistentry> - - </variablelist> - </para> - -</appendix> diff --git a/doc/src/sgml/acronyms.sgmlin b/doc/src/sgml/acronyms.sgmlin new file mode 100644 index 0000000..de4e1e0 --- /dev/null +++ b/doc/src/sgml/acronyms.sgmlin @@ -0,0 +1,748 @@ +<!-- $PostgreSQL: pgsql/doc/src/sgml/acronyms.sgml,v 1.8 2010/03/17 17:12:31 petere Exp $ --> + +<appendix id="acronyms"> + <title>Acronyms</title> + + <para> + This is a list of acronyms commonly used in the <productname>PostgreSQL</> + documentation and in discussions about <productname>PostgreSQL</>. + + <variablelist> + + <varlistentry> + <term><acronym>ANSI</acronym></term> + <listitem> + <para> + <ulink url="http://en.wikipedia.org/wiki/American_National_Standards_Institute"> + American National Standards Institute</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>API</acronym></term> + <listitem> + <para> + <ulink url="http://en.wikipedia.org/wiki/API">Application Programming Interface</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>ASCII</acronym></term> + <listitem> + <para> + <ulink url="http://en.wikipedia.org/wiki/Ascii">American Standard + Code for Information Interchange</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>BKI</acronym></term> + <listitem> + <para> + <link linkend="bki">Backend Interface</link> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>CA</acronym></term> + <listitem> + <para> + <ulink url="http://en.wikipedia.org/wiki/Certificate_authority">Certificate Authority</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>CIDR</acronym></term> + <listitem> + <para> + <ulink + url="http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing">Classless + Inter-Domain Routing</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>CPAN</acronym></term> + <listitem> + <para> + <ulink url="http://www.cpan.org/">Comprehensive Perl Archive Network</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>CRL</acronym></term> + <listitem> + <para> + <ulink + url="http://en.wikipedia.org/wiki/Certificate_revocation_list">Certificate + Revocation List</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>CSV</acronym></term> + <listitem> + <para> + <ulink + url="http://en.wikipedia.org/wiki/Comma-separated_values">Comma + Separated Values</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>CTE</acronym></term> + <listitem> + <para> + <link linkend="queries-with">Common Table Expression</link> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>CVE</acronym></term> + <listitem> + <para> + <ulink url="http://cve.mitre.org/">Common Vulnerabilities and Exposures</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>DBA</acronym></term> + <listitem> + <para> + <ulink + url="http://en.wikipedia.org/wiki/Database_administrator">Database + Administrator</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>DBI</acronym></term> + <listitem> + <para> + <ulink url="http://dbi.perl.org/">Database Interface (Perl)</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>DBMS</acronym></term> + <listitem> + <para> + <ulink url="http://en.wikipedia.org/wiki/Dbms">Database Management + System</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>DDL</acronym></term> + <listitem> + <para> + <ulink + url="http://en.wikipedia.org/wiki/Data_Definition_Language">Data + Definition Language</ulink>, SQL commands such as <command>CREATE + TABLE</>, <command>ALTER USER</> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>DML</acronym></term> + <listitem> + <para> + <ulink + url="http://en.wikipedia.org/wiki/Data_Manipulation_Language">Data + Manipulation Language</ulink>, SQL commands such as <command>INSERT</>, + <command>UPDATE</>, <command>DELETE</> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>DST</acronym></term> + <listitem> + <para> + <ulink + url="http://en.wikipedia.org/wiki/Daylight_saving_time">Daylight + Saving Time</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>ECPG</acronym></term> + <listitem> + <para> + <link linkend="ecpg">Embedded C for PostgreSQL</link> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>ESQL</acronym></term> + <listitem> + <para> + <ulink url="http://en.wikipedia.org/wiki/Embedded_SQL">Embedded + SQL</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>FAQ</acronym></term> + <listitem> + <para> + <ulink url="http://en.wikipedia.org/wiki/FAQ">Frequently Asked + Questions</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>FSM</acronym></term> + <listitem> + <para> + <link linkend="storage-fsm">Free Space Map</link> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>GEQO</acronym></term> + <listitem> + <para> + <link linkend="geqo">Genetic Query Optimizer</link> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>GIN</acronym></term> + <listitem> + <para> + <link linkend="GIN">Generalized Inverted Index</link> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>GiST</acronym></term> + <listitem> + <para> + <link linkend="GiST">Generalized Search Tree</link> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>Git</acronym></term> + <listitem> + <para> + <ulink + url="http://en.wikipedia.org/wiki/Git_(software)">Git</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>GMT</acronym></term> + <listitem> + <para> + <ulink url="http://en.wikipedia.org/wiki/GMT">Greenwich Mean Time</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>GSSAPI</acronym></term> + <listitem> + <para> + <ulink + url="http://en.wikipedia.org/wiki/Generic_Security_Services_Application_Program_Interface">Generic + Security Services Application Programming Interface</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>GUC</acronym></term> + <listitem> + <para> + <link linkend="config-setting">Grand Unified Configuration</link>, + the <productname>PostgreSQL</> subsystem that handles server configuration + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>HBA</acronym></term> + <listitem> + <para> + <link linkend="auth-pg-hba-conf">Host-Based Authentication</link> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>HOT</acronym></term> + <listitem> + <para> + <ulink + url="http://git.postgresql.org/gitweb?p=postgresql.git;a=blob;f=src/backend/access/heap/README.HOT;hb=HEAD">Heap-Only + Tuples</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>IEC</acronym></term> + <listitem> + <para> + <ulink + url="http://en.wikipedia.org/wiki/International_Electrotechnical_Commission">International + Electrotechnical Commission</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>IEEE</acronym></term> + <listitem> + <para> + <ulink url="http://standards.ieee.org/">Institute of Electrical and + Electronics Engineers</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>IPC</acronym></term> + <listitem> + <para> + <ulink + url="http://en.wikipedia.org/wiki/Inter-process_communication">Inter-Process + Communication</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>ISO</acronym></term> + <listitem> + <para> + <ulink url="http://www.iso.org/iso/home.htm">International Organization for + Standardization</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>ISSN</acronym></term> + <listitem> + <para> + <ulink url="http://en.wikipedia.org/wiki/Issn">International Standard + Serial Number</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>JDBC</acronym></term> + <listitem> + <para> + <ulink + url="http://en.wikipedia.org/wiki/Java_Database_Connectivity">Java + Database Connectivity</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>LDAP</acronym></term> + <listitem> + <para> + <ulink + url="http://en.wikipedia.org/wiki/Lightweight_Directory_Access_Protocol">Lightweight + Directory Access Protocol</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>MSVC</acronym></term> + <listitem> + <para> + <ulink + url="http://en.wikipedia.org/wiki/Visual_C++"><productname>Microsoft + Visual C</productname></ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>MVCC</acronym></term> + <listitem> + <para> + <link linkend="mvcc">Multi-Version Concurrency Control</link> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>NLS</acronym></term> + <listitem> + <para> + <ulink + url="http://en.wikipedia.org/wiki/Internationalization_and_localization">National + Language Support</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>ODBC</acronym></term> + <listitem> + <para> + <ulink + url="http://en.wikipedia.org/wiki/Open_Database_Connectivity">Open + Database Connectivity</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>OID</acronym></term> + <listitem> + <para> + <link linkend="datatype-oid">Object Identifier</link> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>OLAP</acronym></term> + <listitem> + <para> + <ulink url="http://en.wikipedia.org/wiki/Olap">Online Analytical + Processing</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>OLTP</acronym></term> + <listitem> + <para> + <ulink url="http://en.wikipedia.org/wiki/OLTP">Online Transaction + Processing</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>ORDBMS</acronym></term> + <listitem> + <para> + <ulink url="http://en.wikipedia.org/wiki/ORDBMS">Object-Relational + Database Management System</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>PAM</acronym></term> + <listitem> + <para> + <ulink + url="http://en.wikipedia.org/wiki/Pluggable_Authentication_Modules">Pluggable + Authentication Modules</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>PGSQL</acronym></term> + <listitem> + <para> + <link linkend="postgres"><productname>PostgreSQL</></link> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>PGXS</acronym></term> + <listitem> + <para> + <link linkend="xfunc-c-pgxs"><productname>PostgreSQL</> Extension System</link> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>PID</acronym></term> + <listitem> + <para> + <ulink url="http://en.wikipedia.org/wiki/Process_identifier">Process Identifier</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>PITR</acronym></term> + <listitem> + <para> + <link linkend="continuous-archiving">Point-In-Time + Recovery</link> (Continuous Archiving) + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>PL</acronym></term> + <listitem> + <para> + <link linkend="server-programming">Programming Languages (server-side)</link> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>POSIX</acronym></term> + <listitem> + <para> + <ulink url="http://en.wikipedia.org/wiki/POSIX">Portable Operating + System Interface</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>RDBMS</acronym></term> + <listitem> + <para> + <ulink + url="http://en.wikipedia.org/wiki/Relational_database_management_system">Relational + Database Management System</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>RFC</acronym></term> + <listitem> + <para> + <ulink + url="http://en.wikipedia.org/wiki/Request_for_Comments">Request For + Comments</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>SGML</acronym></term> + <listitem> + <para> + <ulink url="http://en.wikipedia.org/wiki/SGML">Standard Generalized + Markup Language</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>SPI</acronym></term> + <listitem> + <para> + <link linkend="spi">Server Programming Interface</link> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>SQL</acronym></term> + <listitem> + <para> + <ulink url="http://en.wikipedia.org/wiki/SQL">Structured Query Language</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>SRF</acronym></term> + <listitem> + <para> + <link linkend="xfunc-c-return-set">Set-Returning Function</link> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>SSH</acronym></term> + <listitem> + <para> + <ulink url="http://en.wikipedia.org/wiki/Secure_Shell">Secure + Shell</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>SSL</acronym></term> + <listitem> + <para> + <ulink url="http://en.wikipedia.org/wiki/Secure_Sockets_Layer">Secure Sockets Layer</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>SSPI</acronym></term> + <listitem> + <para> + <ulink url="http://msdn.microsoft.com/en-us/library/aa380493%28VS.85%29.aspx">Security + Support Provider Interface</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>SYSV</acronym></term> + <listitem> + <para> + <ulink url="http://en.wikipedia.org/wiki/System_V">Unix System V</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>TCP/IP</acronym></term> + <listitem> + <para> + <ulink + url="http://en.wikipedia.org/wiki/Transmission_Control_Protocol">Transmission + Control Protocol (TCP) / Internet Protocol (IP)</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>TID</acronym></term> + <listitem> + <para> + <link linkend="datatype-oid">Tuple Identifier</link> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>TOAST</acronym></term> + <listitem> + <para> + <link linkend="storage-toast">The Oversized-Attribute Storage Technique</link> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>TPC</acronym></term> + <listitem> + <para> + <ulink url="http://www.tpc.org/">Transaction Processing + Performance Council</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>URL</acronym></term> + <listitem> + <para> + <ulink url="http://en.wikipedia.org/wiki/URL">Uniform Resource + Locator</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>UTC</acronym></term> + <listitem> + <para> + <ulink + url="http://en.wikipedia.org/wiki/Coordinated_Universal_Time">Coordinated + Universal Time</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>UTF</acronym></term> + <listitem> + <para> + <ulink url="http://www.unicode.org/">Unicode Transformation + Format</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>UTF8</acronym></term> + <listitem> + <para> + <ulink url="http://en.wikipedia.org/wiki/Utf8">Eight-Bit Unicode + Transformation Format</ulink> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>UUID</acronym></term> + <listitem> + <para> + <link linkend="datatype-uuid">Universally Unique Identifier</link> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>WAL</acronym></term> + <listitem> + <para> + <link linkend="wal">Write-Ahead Log</link> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>XID</acronym></term> + <listitem> + <para> + <link linkend="datatype-oid">Transaction Identifier</link> + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><acronym>XML</acronym></term> + <listitem> + <para> + <ulink url="http://en.wikipedia.org/wiki/XML">Extensible Markup + Language</ulink> + </para> + </listitem> + </varlistentry> + + </variablelist> + </para> + +</appendix> diff --git a/doc/src/sgml/adminpack.sgml b/doc/src/sgml/adminpack.sgml deleted file mode 100644 index ec035f4..0000000 --- a/doc/src/sgml/adminpack.sgml +++ /dev/null @@ -1,40 +0,0 @@ -<!-- doc/src/sgml/adminpack.sgml - -<sect1 id="adminpack"> - <title>adminpack</title> - - <indexterm zone="adminpack"> - <primary>adminpack</primary> - </indexterm> - - <para> - <filename>adminpack</> provides a number of support functions which - <application>pgAdmin</> and other administration and management tools can - use to provide additional functionality, such as remote management - of server log files. - </para> - - <sect2> - <title>Functions implemented</title> - - <para> - The functions implemented by <filename>adminpack</> can only be run by a - superuser. Here's a list of these functions: - -<programlisting> -int8 pg_catalog.pg_file_write(fname text, data text, append bool) -bool pg_catalog.pg_file_rename(oldname text, newname text, archivename text) -bool pg_catalog.pg_file_rename(oldname text, newname text) -bool pg_catalog.pg_file_unlink(fname text) -setof record pg_catalog.pg_logdir_ls() - -/* Renaming of existing backend functions for pgAdmin compatibility */ -int8 pg_catalog.pg_file_read(fname text, data text, append bool) -bigint pg_catalog.pg_file_length(text) -int4 pg_catalog.pg_logfile_rotate() -</programlisting> - </para> - - </sect2> - -</sect1> diff --git a/doc/src/sgml/adminpack.sgmlin b/doc/src/sgml/adminpack.sgmlin new file mode 100644 index 0000000..b097000 --- /dev/null +++ b/doc/src/sgml/adminpack.sgmlin @@ -0,0 +1,40 @@ +<!-- $PostgreSQL: pgsql/doc/src/sgml/adminpack.sgml,v 1.3 2007/12/06 04:12:09 tgl Exp $ --> + +<sect1 id="adminpack"> + <title>adminpack</title> + + <indexterm zone="adminpack"> + <primary>adminpack</primary> + </indexterm> + + <para> + <filename>adminpack</> provides a number of support functions which + <application>pgAdmin</> and other administration and management tools can + use to provide additional functionality, such as remote management + of server log files. + </para> + + <sect2> + <title>Functions implemented</title> + + <para> + The functions implemented by <filename>adminpack</> can only be run by a + superuser. Here's a list of these functions: + +<programlisting> +int8 pg_catalog.pg_file_write(fname text, data text, append bool) +bool pg_catalog.pg_file_rename(oldname text, newname text, archivename text) +bool pg_catalog.pg_file_rename(oldname text, newname text) +bool pg_catalog.pg_file_unlink(fname text) +setof record pg_catalog.pg_logdir_ls() + +/* Renaming of existing backend functions for pgAdmin compatibility */ +int8 pg_catalog.pg_file_read(fname text, data text, append bool) +bigint pg_catalog.pg_file_length(text) +int4 pg_catalog.pg_logfile_rotate() +</programlisting> + </para> + + </sect2> + +</sect1> diff --git a/doc/src/sgml/advanced.sgml b/doc/src/sgml/advanced.sgml deleted file mode 100644 index 969db72..0000000 --- a/doc/src/sgml/advanced.sgml +++ /dev/null @@ -1,717 +0,0 @@ -<!-- doc/src/sgml/advanced.sgml - - <chapter id="tutorial-advanced"> - <title>Advanced Features</title> - - <sect1 id="tutorial-advanced-intro"> - <title>Introduction</title> - - <para> - In the previous chapter we have covered the basics of using - <acronym>SQL</acronym> to store and access your data in - <productname>PostgreSQL</productname>. We will now discuss some - more advanced features of <acronym>SQL</acronym> that simplify - management and prevent loss or corruption of your data. Finally, - we will look at some <productname>PostgreSQL</productname> - extensions. - </para> - - <para> - This chapter will on occasion refer to examples found in <xref - linkend="tutorial-sql"> to change or improve them, so it will be - useful to have read that chapter. Some examples from - this chapter can also be found in - <filename>advanced.sql</filename> in the tutorial directory. This - file also contains some sample data to load, which is not - repeated here. (Refer to <xref linkend="tutorial-sql-intro"> for - how to use the file.) - </para> - </sect1> - - - <sect1 id="tutorial-views"> - <title>Views</title> - - <indexterm zone="tutorial-views"> - <primary>view</primary> - </indexterm> - - <para> - Refer back to the queries in <xref linkend="tutorial-join">. - Suppose the combined listing of weather records and city location - is of particular interest to your application, but you do not want - to type the query each time you need it. You can create a - <firstterm>view</firstterm> over the query, which gives a name to - the query that you can refer to like an ordinary table: - -<programlisting> -CREATE VIEW myview AS - SELECT city, temp_lo, temp_hi, prcp, date, location - FROM weather, cities - WHERE city = name; - -SELECT * FROM myview; -</programlisting> - </para> - - <para> - Making liberal use of views is a key aspect of good SQL database - design. Views allow you to encapsulate the details of the - structure of your tables, which might change as your application - evolves, behind consistent interfaces. - </para> - - <para> - Views can be used in almost any place a real table can be used. - Building views upon other views is not uncommon. - </para> - </sect1> - - - <sect1 id="tutorial-fk"> - <title>Foreign Keys</title> - - <indexterm zone="tutorial-fk"> - <primary>foreign key</primary> - </indexterm> - - <indexterm zone="tutorial-fk"> - <primary>referential integrity</primary> - </indexterm> - - <para> - Recall the <classname>weather</classname> and - <classname>cities</classname> tables from <xref - linkend="tutorial-sql">. Consider the following problem: You - want to make sure that no one can insert rows in the - <classname>weather</classname> table that do not have a matching - entry in the <classname>cities</classname> table. This is called - maintaining the <firstterm>referential integrity</firstterm> of - your data. In simplistic database systems this would be - implemented (if at all) by first looking at the - <classname>cities</classname> table to check if a matching record - exists, and then inserting or rejecting the new - <classname>weather</classname> records. This approach has a - number of problems and is very inconvenient, so - <productname>PostgreSQL</productname> can do this for you. - </para> - - <para> - The new declaration of the tables would look like this: - -<programlisting> -CREATE TABLE cities ( - city varchar(80) primary key, - location point -); - -CREATE TABLE weather ( - city varchar(80) references cities(city), - temp_lo int, - temp_hi int, - prcp real, - date date -); -</programlisting> - - Now try inserting an invalid record: - -<programlisting> -INSERT INTO weather VALUES ('Berkeley', 45, 53, 0.0, '1994-11-28'); -</programlisting> - -<screen> -ERROR: insert or update on table "weather" violates foreign key constraint "weather_city_fkey" -DETAIL: Key (city)=(Berkeley) is not present in table "cities". -</screen> - </para> - - <para> - The behavior of foreign keys can be finely tuned to your - application. We will not go beyond this simple example in this - tutorial, but just refer you to <xref linkend="ddl"> - for more information. Making correct use of - foreign keys will definitely improve the quality of your database - applications, so you are strongly encouraged to learn about them. - </para> - </sect1> - - - <sect1 id="tutorial-transactions"> - <title>Transactions</title> - - <indexterm zone="tutorial-transactions"> - <primary>transaction</primary> - </indexterm> - - <para> - <firstterm>Transactions</> are a fundamental concept of all database - systems. The essential point of a transaction is that it bundles - multiple steps into a single, all-or-nothing operation. The intermediate - states between the steps are not visible to other concurrent transactions, - and if some failure occurs that prevents the transaction from completing, - then none of the steps affect the database at all. - </para> - - <para> - For example, consider a bank database that contains balances for various - customer accounts, as well as total deposit balances for branches. - Suppose that we want to record a payment of $100.00 from Alice's account - to Bob's account. Simplifying outrageously, the SQL commands for this - might look like: - -<programlisting> -UPDATE accounts SET balance = balance - 100.00 - WHERE name = 'Alice'; -UPDATE branches SET balance = balance - 100.00 - WHERE name = (SELECT branch_name FROM accounts WHERE name = 'Alice'); -UPDATE accounts SET balance = balance + 100.00 - WHERE name = 'Bob'; -UPDATE branches SET balance = balance + 100.00 - WHERE name = (SELECT branch_name FROM accounts WHERE name = 'Bob'); -</programlisting> - </para> - - <para> - The details of these commands are not important here; the important - point is that there are several separate updates involved to accomplish - this rather simple operation. Our bank's officers will want to be - assured that either all these updates happen, or none of them happen. - It would certainly not do for a system failure to result in Bob - receiving $100.00 that was not debited from Alice. Nor would Alice long - remain a happy customer if she was debited without Bob being credited. - We need a guarantee that if something goes wrong partway through the - operation, none of the steps executed so far will take effect. Grouping - the updates into a <firstterm>transaction</> gives us this guarantee. - A transaction is said to be <firstterm>atomic</>: from the point of - view of other transactions, it either happens completely or not at all. - </para> - - <para> - We also want a - guarantee that once a transaction is completed and acknowledged by - the database system, it has indeed been permanently recorded - and won't be lost even if a crash ensues shortly thereafter. - For example, if we are recording a cash withdrawal by Bob, - we do not want any chance that the debit to his account will - disappear in a crash just after he walks out the bank door. - A transactional database guarantees that all the updates made by - a transaction are logged in permanent storage (i.e., on disk) before - the transaction is reported complete. - </para> - - <para> - Another important property of transactional databases is closely - related to the notion of atomic updates: when multiple transactions - are running concurrently, each one should not be able to see the - incomplete changes made by others. For example, if one transaction - is busy totalling all the branch balances, it would not do for it - to include the debit from Alice's branch but not the credit to - Bob's branch, nor vice versa. So transactions must be all-or-nothing - not only in terms of their permanent effect on the database, but - also in terms of their visibility as they happen. The updates made - so far by an open transaction are invisible to other transactions - until the transaction completes, whereupon all the updates become - visible simultaneously. - </para> - - <para> - In <productname>PostgreSQL</>, a transaction is set up by surrounding - the SQL commands of the transaction with - <command>BEGIN</> and <command>COMMIT</> commands. So our banking - transaction would actually look like: - -<programlisting> -BEGIN; -UPDATE accounts SET balance = balance - 100.00 - WHERE name = 'Alice'; --- etc etc -COMMIT; -</programlisting> - </para> - - <para> - If, partway through the transaction, we decide we do not want to - commit (perhaps we just noticed that Alice's balance went negative), - we can issue the command <command>ROLLBACK</> instead of - <command>COMMIT</>, and all our updates so far will be canceled. - </para> - - <para> - <productname>PostgreSQL</> actually treats every SQL statement as being - executed within a transaction. If you do not issue a <command>BEGIN</> - command, - then each individual statement has an implicit <command>BEGIN</> and - (if successful) <command>COMMIT</> wrapped around it. A group of - statements surrounded by <command>BEGIN</> and <command>COMMIT</> - is sometimes called a <firstterm>transaction block</>. - </para> - - <note> - <para> - Some client libraries issue <command>BEGIN</> and <command>COMMIT</> - commands automatically, so that you might get the effect of transaction - blocks without asking. Check the documentation for the interface - you are using. - </para> - </note> - - <para> - It's possible to control the statements in a transaction in a more - granular fashion through the use of <firstterm>savepoints</>. Savepoints - allow you to selectively discard parts of the transaction, while - committing the rest. After defining a savepoint with - <command>SAVEPOINT</>, you can if needed roll back to the savepoint - with <command>ROLLBACK TO</>. All the transaction's database changes - between defining the savepoint and rolling back to it are discarded, but - changes earlier than the savepoint are kept. - </para> - - <para> - After rolling back to a savepoint, it continues to be defined, so you can - roll back to it several times. Conversely, if you are sure you won't need - to roll back to a particular savepoint again, it can be released, so the - system can free some resources. Keep in mind that either releasing or - rolling back to a savepoint - will automatically release all savepoints that were defined after it. - </para> - - <para> - All this is happening within the transaction block, so none of it - is visible to other database sessions. When and if you commit the - transaction block, the committed actions become visible as a unit - to other sessions, while the rolled-back actions never become visible - at all. - </para> - - <para> - Remembering the bank database, suppose we debit $100.00 from Alice's - account, and credit Bob's account, only to find later that we should - have credited Wally's account. We could do it using savepoints like - this: - -<programlisting> -BEGIN; -UPDATE accounts SET balance = balance - 100.00 - WHERE name = 'Alice'; -SAVEPOINT my_savepoint; -UPDATE accounts SET balance = balance + 100.00 - WHERE name = 'Bob'; --- oops ... forget that and use Wally's account -ROLLBACK TO my_savepoint; -UPDATE accounts SET balance = balance + 100.00 - WHERE name = 'Wally'; -COMMIT; -</programlisting> - </para> - - <para> - This example is, of course, oversimplified, but there's a lot of control - possible in a transaction block through the use of savepoints. - Moreover, <command>ROLLBACK TO</> is the only way to regain control of a - transaction block that was put in aborted state by the - system due to an error, short of rolling it back completely and starting - again. - </para> - - </sect1> - - - <sect1 id="tutorial-window"> - <title>Window Functions</title> - - <indexterm zone="tutorial-window"> - <primary>window function</primary> - </indexterm> - - <para> - A <firstterm>window function</> performs a calculation across a set of - table rows that are somehow related to the current row. This is comparable - to the type of calculation that can be done with an aggregate function. - But unlike regular aggregate functions, use of a window function does not - cause rows to become grouped into a single output row — the - rows retain their separate identities. Behind the scenes, the window - function is able to access more than just the current row of the query - result. - </para> - - <para> - Here is an example that shows how to compare each employee's salary - with the average salary in his or her department: - -<programlisting> -SELECT depname, empno, salary, avg(salary) OVER (PARTITION BY depname) FROM empsalary; -</programlisting> - -<screen> - depname | empno | salary | avg ------------+-------+--------+----------------------- - develop | 11 | 5200 | 5020.0000000000000000 - develop | 7 | 4200 | 5020.0000000000000000 - develop | 9 | 4500 | 5020.0000000000000000 - develop | 8 | 6000 | 5020.0000000000000000 - develop | 10 | 5200 | 5020.0000000000000000 - personnel | 5 | 3500 | 3700.0000000000000000 - personnel | 2 | 3900 | 3700.0000000000000000 - sales | 3 | 4800 | 4866.6666666666666667 - sales | 1 | 5000 | 4866.6666666666666667 - sales | 4 | 4800 | 4866.6666666666666667 -(10 rows) -</screen> - - The first three output columns come directly from the table - <structname>empsalary</>, and there is one output row for each row in the - table. The fourth column represents an average taken across all the table - rows that have the same <structfield>depname</> value as the current row. - (This actually is the same function as the regular <function>avg</> - aggregate function, but the <literal>OVER</> clause causes it to be - treated as a window function and computed across an appropriate set of - rows.) - </para> - - <para> - A window function call always contains an <literal>OVER</> clause - following the window function's name and argument(s). This is what - syntactically distinguishes it from a regular function or aggregate - function. The <literal>OVER</> clause determines exactly how the - rows of the query are split up for processing by the window function. - The <literal>PARTITION BY</> list within <literal>OVER</> specifies - dividing the rows into groups, or partitions, that share the same - values of the <literal>PARTITION BY</> expression(s). For each row, - the window function is computed across the rows that fall into the - same partition as the current row. - </para> - - <para> - Although <function>avg</> will produce the same result no matter - what order it processes the partition's rows in, this is not true of all - window functions. When needed, you can control that order using - <literal>ORDER BY</> within <literal>OVER</>. Here is an example: - -<programlisting> -SELECT depname, empno, salary, rank() OVER (PARTITION BY depname ORDER BY salary DESC) FROM empsalary; -</programlisting> - -<screen> - depname | empno | salary | rank ------------+-------+--------+------ - develop | 8 | 6000 | 1 - develop | 10 | 5200 | 2 - develop | 11 | 5200 | 2 - develop | 9 | 4500 | 4 - develop | 7 | 4200 | 5 - personnel | 2 | 3900 | 1 - personnel | 5 | 3500 | 2 - sales | 1 | 5000 | 1 - sales | 4 | 4800 | 2 - sales | 3 | 4800 | 2 -(10 rows) -</screen> - - As shown here, the <function>rank</> function produces a numerical rank - within the current row's partition for each distinct <literal>ORDER BY</> - value, in the order defined by the <literal>ORDER BY</> clause. - <function>rank</> needs no explicit parameter, because its behavior - is entirely determined by the <literal>OVER</> clause. - </para> - - <para> - The rows considered by a window function are those of the <quote>virtual - t... [truncated message content] |
From: Koichi S. <koi...@us...> - 2011-04-07 05:13:52
|
Project "Postgres-XC documentation". The branch, master has been updated via 252519c2be5309a3682b0ee895cf040083ae1784 (commit) from 44c2c20d58aa6b5f271cb0d388ffc59bc32403bb (commit) - Log ----------------------------------------------------------------- commit 252519c2be5309a3682b0ee895cf040083ae1784 Author: Koichi Suzuki <koi...@gm...> Date: Thu Apr 7 14:13:27 2011 +0900 Added new directory for progress reporting document, "progress". Also added new file to report the progress of "Reference Manual". diff --git a/progress/documentation-progress.ods b/progress/documentation-progress.ods new file mode 100644 index 0000000..277aade Binary files /dev/null and b/progress/documentation-progress.ods differ ----------------------------------------------------------------------- Summary of changes: progress/documentation-progress.ods | Bin 0 -> 16883 bytes 1 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 progress/documentation-progress.ods hooks/post-receive -- Postgres-XC documentation |
From: Michael P. <mic...@us...> - 2011-04-07 04:30:38
|
Project "Postgres-XC". The branch, master has been updated via 8b018fd20850ec0753fdfbef024b9a957efaeb0a (commit) from 913fba843d425a786196bcffc965e5ceea75e55d (commit) - Log ----------------------------------------------------------------- commit 8b018fd20850ec0753fdfbef024b9a957efaeb0a Author: Michael P <mic...@us...> Date: Thu Apr 7 13:10:09 2011 +0900 Support for session and local parameters This commit adds support for commands like: SET ROLE ... ; SET param TO value; SET SESSION param TO value; SET LOCAL param TO value; When a SET command is launched, it is saved in pooler and then launched to nodes by pooler if connections to backend nodes exist. Commands are saved with the following format as a string: "SET param1 TO value1;...;SET paramN TO valueN" Local and session commands are saved as separate strings. When a new connection is created to a backend node, pooler replays all the saved SET commands. When a transaction is finished, local parameters are deleted from pooler. It is not necessary in this case to reset on backend nodes as transaction commit has made the work. If a SET command has been launched for a non-local parameter, connections to nodes are kept alive with the session and not sent back to pool when a transaction finishes. When session is finished (user logging off), pooler sends asynchronously a "RESET ALL" command to each connection and put connections back to pool. Reset is not launched if no SET queries for session parameters have been launched. A SET command for local parameters is not sent to pooler if it is not inside a transaction block to save ressources in the cluster. This commit contains also a couple of corrections for regression tests according to implementation of session parameters. diff --git a/src/backend/pgxc/pool/execRemote.c b/src/backend/pgxc/pool/execRemote.c index cf38041..848a3cc 100644 --- a/src/backend/pgxc/pool/execRemote.c +++ b/src/backend/pgxc/pool/execRemote.c @@ -4398,6 +4398,9 @@ PGXCNodeCleanAndRelease(int code, Datum arg) /* Release data node connections */ release_handles(); + /* Disconnect from Pooler */ + PoolManagerDisconnect(); + /* Close connection with GTM */ CloseGTM(); diff --git a/src/backend/pgxc/pool/pgxcnode.c b/src/backend/pgxc/pool/pgxcnode.c index dcd721d..d7230b0 100644 --- a/src/backend/pgxc/pool/pgxcnode.c +++ b/src/backend/pgxc/pool/pgxcnode.c @@ -196,6 +196,28 @@ PGXCNodeClose(NODE_CONNECTION *conn) PQfinish((PGconn *) conn); } +/* + * Send SET query to given connection. + * Query is sent asynchronously and results are consumed + */ +int +PGXCNodeSendSetQuery(NODE_CONNECTION *conn, const char *sql_command) +{ + PGresult *result; + + if (!PQsendQuery((PGconn *) conn, sql_command)) + return -1; + + /* Consume results from SET commands */ + while ((result = PQgetResult((PGconn *) conn)) != NULL) + { + /* TODO: Check that results are of type 'S' */ + PQclear(result); + } + + return 0; +} + /* * Checks if connection active diff --git a/src/backend/pgxc/pool/poolmgr.c b/src/backend/pgxc/pool/poolmgr.c index 478ba42..ecd18f9 100644 --- a/src/backend/pgxc/pool/poolmgr.c +++ b/src/backend/pgxc/pool/poolmgr.c @@ -93,6 +93,7 @@ static void agent_init(PoolAgent *agent, const char *database, const char *user_ static void agent_destroy(PoolAgent *agent); static void agent_create(void); static void agent_handle_input(PoolAgent *agent, StringInfo s); +static int agent_set_command(PoolAgent *agent, const char *set_command, bool is_local); static DatabasePool *create_database_pool(const char *database, const char *user_name); static void insert_database_pool(DatabasePool *pool); static int destroy_database_pool(const char *database, const char *user_name); @@ -102,6 +103,7 @@ static DatabasePool *remove_database_pool(const char *database, const char *user static int *agent_acquire_connections(PoolAgent *agent, List *datanodelist, List *coordlist); static PGXCNodePoolSlot *acquire_connection(DatabasePool *dbPool, int node, char client_conn_type); static void agent_release_connections(PoolAgent *agent, List *dn_discard, List *co_discard); +static void agent_reset_params(PoolAgent *agent, List *dn_list, List *co_list); static void release_connection(DatabasePool *dbPool, PGXCNodePoolSlot *slot, int index, bool clean, char client_conn_type); static void destroy_slot(PGXCNodePoolSlot *slot); @@ -476,6 +478,8 @@ agent_create(void) agent->pool = NULL; agent->dn_connections = NULL; agent->coord_connections = NULL; + agent->session_params = NULL; + agent->local_params = NULL; agent->pid = 0; /* Append new agent to the list */ @@ -528,6 +532,36 @@ PoolManagerConnect(PoolHandle *handle, const char *database, const char *user_na pool_flush(&handle->port); } +int +PoolManagerSetCommand(bool is_local, const char *set_command) +{ + int n32; + char msgtype = 's'; + + Assert(set_command); + Assert(Handle); + + /* Message type */ + pool_putbytes(&Handle->port, &msgtype, 1); + + /* Message length */ + n32 = htonl(strlen(set_command) + 10); + pool_putbytes(&Handle->port, (char *) &n32, 4); + + /* LOCAL or SESSION parameter ? */ + pool_putbytes(&Handle->port, (char *) &is_local, 1); + + /* Length of SET command string */ + n32 = htonl(strlen(set_command) + 1); + pool_putbytes(&Handle->port, (char *) &n32, 4); + + /* Send command string followed by \0 terminator */ + pool_putbytes(&Handle->port, set_command, strlen(set_command) + 1); + pool_flush(&Handle->port); + + /* Get result */ + pool_recvres(&Handle->port); +} /* * Init PoolAgent @@ -567,9 +601,9 @@ agent_destroy(PoolAgent *agent) /* Discard connections if any remaining */ if (agent->pool) { - List *dn_conn = NIL; - List *co_conn = NIL; - int i; + List *dn_conn = NIL; + List *co_conn = NIL; + int i; /* gather abandoned datanode connections */ if (agent->dn_connections) @@ -583,6 +617,12 @@ agent_destroy(PoolAgent *agent) if (agent->coord_connections[i]) co_conn = lappend_int(co_conn, i+1); + /* + * agent is being destroyed, so reset session parameters + * before putting back connections to pool + */ + agent_reset_params(agent, dn_conn, co_conn); + /* release them all */ agent_release_connections(agent, dn_conn, co_conn); } @@ -603,6 +643,16 @@ agent_destroy(PoolAgent *agent) pfree(agent->coord_connections); agent->coord_connections = NULL; } + if (agent->local_params) + { + pfree(agent->local_params); + agent->local_params = NULL; + } + if (agent->session_params) + { + pfree(agent->session_params); + agent->session_params = NULL; + } pfree(agent); /* shrink the list and move last agent into the freed slot */ if (i < --agentCount) @@ -618,16 +668,14 @@ agent_destroy(PoolAgent *agent) * Release handle to pool manager */ void -PoolManagerDisconnect(PoolHandle *handle) +PoolManagerDisconnect(void) { - Assert(handle); + Assert(Handle); - pool_putmessage(&handle->port, 'd', NULL, 0); + pool_putmessage(&Handle->port, 'd', NULL, 0); pool_flush(&Handle->port); - close(Socket(handle->port)); - - pfree(handle); + close(Socket(Handle->port)); } @@ -784,6 +832,8 @@ agent_handle_input(PoolAgent * agent, StringInfo s) { const char *database; const char *user_name; + const char *set_command; + bool is_local; int datanodecount; int coordcount; List *datanodelist = NIL; @@ -905,6 +955,20 @@ agent_handle_input(PoolAgent * agent, StringInfo s) list_free(datanodelist); list_free(coordlist); break; + case 's': /* SET COMMAND */ + pool_getmessage(&agent->port, s, 0); + /* Determine if command is local or session */ + is_local = (bool) pq_getmsgbyte(s); + /* Get the SET command */ + len = pq_getmsgint(s, 4); + set_command = pq_getmsgbytes(s, len); + pq_getmsgend(s); + + res = agent_set_command(agent, set_command, is_local); + + /* Send success result */ + pool_sendres(&agent->port, res); + break; default: /* EOF or protocol violation */ agent_destroy(agent); return; @@ -915,6 +979,77 @@ agent_handle_input(PoolAgent * agent, StringInfo s) } } +/* + * Save a SET command and distribute it to the agent connections + * already in use. + */ +static int +agent_set_command(PoolAgent *agent, const char *set_command, bool is_local) +{ + char *params_string; + int i; + int res = 0; + + Assert(agent); + Assert(set_command); + + if (is_local) + params_string = agent->local_params; + else + params_string = agent->session_params; + + /* First command recorded */ + if (!params_string) + { + params_string = pstrdup(set_command); + if (!params_string) + ereport(ERROR, + (errcode(ERRCODE_OUT_OF_MEMORY), + errmsg("out of memory"))); + } + else + { + /* + * Second command or more recorded. + * Commands are saved with format 'SET param1 TO value1;...;SET paramN TO valueN' + */ + params_string = (char *) repalloc(params_string, + strlen(params_string) + strlen(set_command) + 2); + if (!params_string) + ereport(ERROR, + (errcode(ERRCODE_OUT_OF_MEMORY), + errmsg("out of memory"))); + + sprintf(params_string, "%s;%s", params_string, set_command); + } + + /* Launch the new command to all the connections already hold by the agent */ + if (agent->dn_connections) + { + for (i = 0; i < NumDataNodes; i++) + { + if (agent->dn_connections[i]) + res = PGXCNodeSendSetQuery(agent->dn_connections[i]->conn, set_command); + } + } + + if (agent->coord_connections) + { + for (i = 0; i < NumCoords; i++) + { + if (agent->coord_connections[i]) + res |= PGXCNodeSendSetQuery(agent->coord_connections[i]->conn, set_command); + } + } + + /* Save the latest string */ + if (is_local) + agent->local_params = params_string; + else + agent->session_params = params_string; + + return res; +} /* * acquire connection @@ -1005,6 +1140,12 @@ agent_acquire_connections(PoolAgent *agent, List *datanodelist, List *coordlist) /* Store in the descriptor */ agent->dn_connections[node - 1] = slot; + + /* Update newly-acquired slot with session parameters */ + if (agent->session_params) + PGXCNodeSendSetQuery(slot->conn, agent->session_params); + if (agent->local_params) + PGXCNodeSendSetQuery(slot->conn, agent->local_params); } result[i++] = PQsocket((PGconn *) agent->dn_connections[node - 1]->conn); @@ -1029,6 +1170,12 @@ agent_acquire_connections(PoolAgent *agent, List *datanodelist, List *coordlist) /* Store in the descriptor */ agent->coord_connections[node - 1] = slot; + + /* Update newly-acquired slot with session parameters */ + if (agent->session_params) + PGXCNodeSendSetQuery(slot->conn, agent->session_params); + if (agent->local_params) + PGXCNodeSendSetQuery(slot->conn, agent->local_params); } result[i++] = PQsocket((PGconn *) agent->coord_connections[node - 1]->conn); @@ -1095,6 +1242,20 @@ agent_release_connections(PoolAgent *agent, List *dn_discard, List *co_discard) if (!agent->dn_connections && !agent->coord_connections) return; + /* + * If there are some session parameters, do not put back connections to pool + * disconnection will be made when session is cut for this user. + * Local parameters are reset when transaction block is finished, + * so don't do anything for them, but just reset their list. + */ + if (agent->local_params) + { + pfree(agent->local_params); + agent->local_params = NULL; + } + if (agent->session_params) + return; + /* Discard first for Datanodes */ if (dn_discard) { @@ -1156,6 +1317,65 @@ agent_release_connections(PoolAgent *agent, List *dn_discard, List *co_discard) } } +/* + * Reset session parameters for given connections in the agent. + * This is done before putting back to pool connections that have been + * modified by session parameters. + */ +static void +agent_reset_params(PoolAgent *agent, List *dn_list, List *co_list) +{ + PGXCNodePoolSlot *slot; + + if (!agent->dn_connections && !agent->coord_connections) + return; + + /* Parameters are reset, so free commands */ + if (agent->session_params) + { + pfree(agent->session_params); + agent->session_params = NULL; + } + if (agent->local_params) + { + pfree(agent->local_params); + agent->local_params = NULL; + } + + /* Reset Datanode connection params */ + if (dn_list) + { + ListCell *lc; + + foreach(lc, dn_list) + { + int node = lfirst_int(lc); + Assert(node > 0 && node <= NumDataNodes); + slot = agent->dn_connections[node - 1]; + + /* Reset connection params */ + if (slot) + PGXCNodeSendSetQuery(slot->conn, "RESET ALL;"); + } + } + + /* Reset Coordinator connection params */ + if (co_list) + { + ListCell *lc; + + foreach(lc, co_list) + { + int node = lfirst_int(lc); + Assert(node > 0 && node <= NumCoords); + slot = agent->coord_connections[node - 1]; + + /* Reset connection params */ + if (slot) + PGXCNodeSendSetQuery(slot->conn, "RESET ALL;"); + } + } +} /* * Create new empty pool for a database. diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c index 1ffeead..8408b40 100644 --- a/src/backend/tcop/utility.c +++ b/src/backend/tcop/utility.c @@ -62,6 +62,7 @@ #include "pgxc/pgxc.h" #include "pgxc/planner.h" #include "pgxc/poolutils.h" +#include "pgxc/poolmgr.h" static void ExecUtilityStmtOnNodes(const char *queryString, ExecNodes *nodes, bool force_autocommit, RemoteQueryExecType exec_type); @@ -1414,11 +1415,18 @@ standard_ProcessUtility(Node *parsetree, case T_VariableSetStmt: ExecSetVariableStmt((VariableSetStmt *) parsetree); #ifdef PGXC -/* PGXCTODO - this currently causes an assertion failure. - We should change when we add SET handling properly - if (IS_PGXC_COORDINATOR) - ExecUtilityStmtOnNodes(queryString, NULL, false); -*/ + /* Let the pooler manage the statement */ + if (IS_PGXC_COORDINATOR && !IsConnFromCoord()) + { + VariableSetStmt *stmt = (VariableSetStmt *) parsetree; + /* + * If command is local and we are not in a transaction block do NOT + * send this query to backend nodes + */ + if (!stmt->is_local || !IsTransactionBlock()) + if (PoolManagerSetCommand(stmt->is_local, queryString) < 0) + elog(ERROR, "Postgres-XC: ERROR SET query"); + } #endif break; @@ -1576,6 +1584,14 @@ standard_ProcessUtility(Node *parsetree, case T_ConstraintsSetStmt: AfterTriggerSetState((ConstraintsSetStmt *) parsetree); + + /* + * PGXCTODO: SET CONSTRAINT management + * This can just be done inside a transaction block, + * so just launch it on all the Datanodes. + * For the time being only IMMEDIATE constraints are supported + * so this is not really useful... + */ break; case T_CheckPointStmt: diff --git a/src/include/pgxc/pgxcnode.h b/src/include/pgxc/pgxcnode.h index 007e1dc..83dce0c 100644 --- a/src/include/pgxc/pgxcnode.h +++ b/src/include/pgxc/pgxcnode.h @@ -96,6 +96,7 @@ extern void InitMultinodeExecutor(void); extern char *PGXCNodeConnStr(char *host, char *port, char *dbname, char *user, char *remote_type); extern NODE_CONNECTION *PGXCNodeConnect(char *connstr); +extern int PGXCNodeSendSetQuery(NODE_CONNECTION *conn, const char *sql_command); extern void PGXCNodeClose(NODE_CONNECTION * conn); extern int PGXCNodeConnected(NODE_CONNECTION * conn); extern int PGXCNodeConnClean(NODE_CONNECTION * conn); diff --git a/src/include/pgxc/poolmgr.h b/src/include/pgxc/poolmgr.h index 7e53b48..febf1d4 100644 --- a/src/include/pgxc/poolmgr.h +++ b/src/include/pgxc/poolmgr.h @@ -56,8 +56,10 @@ typedef struct databasepool struct databasepool *next; } DatabasePool; -/* Agent of client session (Pool Manager side) +/* + * Agent of client session (Pool Manager side) * Acts as a session manager, grouping connections together + * and managing session parameters */ typedef struct { @@ -68,6 +70,8 @@ typedef struct DatabasePool *pool; PGXCNodePoolSlot **dn_connections; /* one for each Datanode */ PGXCNodePoolSlot **coord_connections; /* one for each Coordinator */ + char *session_params; + char *local_params; } PoolAgent; /* Handle to the pool manager (Session's side) */ @@ -116,7 +120,7 @@ extern void PoolManagerCloseHandle(PoolHandle *handle); /* * Gracefully close connection to the PoolManager */ -extern void PoolManagerDisconnect(PoolHandle *handle); +extern void PoolManagerDisconnect(void); /* * Called from Session process after fork(). Associate handle with session @@ -125,6 +129,14 @@ extern void PoolManagerDisconnect(PoolHandle *handle); */ extern void PoolManagerConnect(PoolHandle *handle, const char *database, const char *user_name); +/* + * Save a SET command in Pooler. + * This command is run on existent agent connections + * and stored in pooler agent to be replayed when new connections + * are requested. + */ +extern int PoolManagerSetCommand(bool is_local, const char *set_command); + /* Get pooled connections */ extern int *PoolManagerGetConnections(List *datanodelist, List *coordlist); diff --git a/src/test/regress/expected/guc_1.out b/src/test/regress/expected/guc_1.out index d71a66c..83b5b65 100644 --- a/src/test/regress/expected/guc_1.out +++ b/src/test/regress/expected/guc_1.out @@ -513,6 +513,7 @@ SELECT current_user = 'temp_reset_user'; (1 row) DROP ROLE temp_reset_user; +ERROR: permission denied to drop role -- -- Tests for function-local GUC settings -- @@ -520,32 +521,35 @@ set work_mem = '3MB'; create function report_guc(text) returns text as $$ select current_setting($1) $$ language sql set work_mem = '1MB'; +ERROR: stable and volatile not yet supported, function volatility has to be immutable select report_guc('work_mem'), current_setting('work_mem'); - report_guc | current_setting -------------+----------------- - 1MB | 3MB -(1 row) - +ERROR: function report_guc(unknown) does not exist +LINE 1: select report_guc('work_mem'), current_setting('work_mem'); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. -- this should draw only a warning alter function report_guc(text) set search_path = no_such_schema; -NOTICE: schema "no_such_schema" does not exist +ERROR: function report_guc(text) does not exist -- with error occurring here select report_guc('work_mem'), current_setting('work_mem'); -ERROR: schema "no_such_schema" does not exist +ERROR: function report_guc(unknown) does not exist +LINE 1: select report_guc('work_mem'), current_setting('work_mem'); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. alter function report_guc(text) reset search_path set work_mem = '2MB'; +ERROR: function report_guc(text) does not exist select report_guc('work_mem'), current_setting('work_mem'); - report_guc | current_setting -------------+----------------- - 2MB | 3MB -(1 row) - +ERROR: function report_guc(unknown) does not exist +LINE 1: select report_guc('work_mem'), current_setting('work_mem'); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. alter function report_guc(text) reset all; +ERROR: function report_guc(text) does not exist select report_guc('work_mem'), current_setting('work_mem'); - report_guc | current_setting -------------+----------------- - 3MB | 3MB -(1 row) - +ERROR: function report_guc(unknown) does not exist +LINE 1: select report_guc('work_mem'), current_setting('work_mem'); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. -- SET LOCAL is restricted by a function SET option create or replace function myfunc(int) returns text as $$ begin @@ -554,19 +558,19 @@ begin end $$ language plpgsql set work_mem = '1MB'; +ERROR: stable and volatile not yet supported, function volatility has to be immutable select myfunc(0), current_setting('work_mem'); - myfunc | current_setting ---------+----------------- - 2MB | 3MB -(1 row) - +ERROR: function myfunc(integer) does not exist +LINE 1: select myfunc(0), current_setting('work_mem'); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. alter function myfunc(int) reset all; +ERROR: function myfunc(integer) does not exist select myfunc(0), current_setting('work_mem'); - myfunc | current_setting ---------+----------------- - 2MB | 2MB -(1 row) - +ERROR: function myfunc(integer) does not exist +LINE 1: select myfunc(0), current_setting('work_mem'); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. set work_mem = '3MB'; -- but SET isn't create or replace function myfunc(int) returns text as $$ @@ -576,12 +580,12 @@ begin end $$ language plpgsql set work_mem = '1MB'; +ERROR: stable and volatile not yet supported, function volatility has to be immutable select myfunc(0), current_setting('work_mem'); - myfunc | current_setting ---------+----------------- - 2MB | 2MB -(1 row) - +ERROR: function myfunc(integer) does not exist +LINE 1: select myfunc(0), current_setting('work_mem'); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. set work_mem = '3MB'; -- it should roll back on error, though create or replace function myfunc(int) returns text as $$ @@ -592,10 +596,12 @@ begin end $$ language plpgsql set work_mem = '1MB'; +ERROR: stable and volatile not yet supported, function volatility has to be immutable select myfunc(0); -ERROR: division by zero -CONTEXT: SQL statement "SELECT 1/$1" -PL/pgSQL function "myfunc" line 3 at PERFORM +ERROR: function myfunc(integer) does not exist +LINE 1: select myfunc(0); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. select current_setting('work_mem'); current_setting ----------------- @@ -603,8 +609,7 @@ select current_setting('work_mem'); (1 row) select myfunc(1), current_setting('work_mem'); - myfunc | current_setting ---------+----------------- - 2MB | 2MB -(1 row) - +ERROR: function myfunc(integer) does not exist +LINE 1: select myfunc(1), current_setting('work_mem'); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. diff --git a/src/test/regress/expected/plancache_1.out b/src/test/regress/expected/plancache_1.out index 389d0da..683a42e 100644 --- a/src/test/regress/expected/plancache_1.out +++ b/src/test/regress/expected/plancache_1.out @@ -150,7 +150,11 @@ ERROR: Postgres-XC does not support EXECUTE yet DETAIL: The feature is not currently supported set search_path = s2; select f1 from abc; -ERROR: relation "abc" does not exist + f1 +----- + 456 +(1 row) + execute p1; ERROR: Postgres-XC does not support EXECUTE yet DETAIL: The feature is not currently supported diff --git a/src/test/regress/expected/privileges_1.out b/src/test/regress/expected/privileges_1.out index d71fd34..51153a2 100644 --- a/src/test/regress/expected/privileges_1.out +++ b/src/test/regress/expected/privileges_1.out @@ -157,6 +157,7 @@ UPDATE atest2 SET col2 = NULL; -- ok UPDATE atest2 SET col2 = NOT col2; -- fails; requires SELECT on atest2 ERROR: permission denied for relation atest2 UPDATE atest2 SET col2 = true FROM atest1 WHERE atest1.a = 5; -- ok +ERROR: permission denied for relation atest2 SELECT * FROM atest1 FOR UPDATE; -- fail ERROR: permission denied for relation atest1 SELECT * FROM atest2 FOR UPDATE; -- fail @@ -217,26 +218,17 @@ SELECT * FROM atestv1; -- ok SELECT * FROM atestv2; -- fail ERROR: permission denied for relation atestv2 SELECT * FROM atestv3; -- ok - one | two | three ------+-----+------- -(0 rows) - +ERROR: permission denied for relation atest3 CREATE VIEW atestv4 AS SELECT * FROM atestv3; -- nested view SELECT * FROM atestv4; -- ok - one | two | three ------+-----+------- -(0 rows) - +ERROR: permission denied for relation atest3 GRANT SELECT ON atestv4 TO regressuser2; SET SESSION AUTHORIZATION regressuser2; -- Two complex cases: SELECT * FROM atestv3; -- fail ERROR: permission denied for relation atestv3 SELECT * FROM atestv4; -- ok (even though regressuser2 cannot access underlying atestv3) - one | two | three ------+-----+------- -(0 rows) - +ERROR: permission denied for relation atest3 SELECT * FROM atest2; -- ok col1 | col2 ------+------ @@ -294,17 +286,9 @@ ERROR: permission denied for relation atest5 SELECT * FROM atest1, atest5; -- fail ERROR: permission denied for relation atest5 SELECT atest1.* FROM atest1, atest5; -- ok - a | b ----+----- - 2 | two -(1 row) - +ERROR: permission denied for relation atest5 SELECT atest1.*,atest5.one FROM atest1, atest5; -- ok - a | b | one ----+-----+----- - 2 | two | 1 -(1 row) - +ERROR: permission denied for relation atest5 SELECT atest1.*,atest5.one FROM atest1 JOIN atest5 ON (atest1.a = atest5.two); -- fail ERROR: permission denied for relation atest5 SELECT atest1.*,atest5.one FROM atest1 JOIN atest5 ON (atest1.a = atest5.one); -- ok @@ -817,7 +801,6 @@ SELECT has_table_privilege('regressuser3', 'atest4', 'SELECT'); -- true REVOKE SELECT ON atest4 FROM regressuser2; -- fail ERROR: dependent privileges exist -HINT: Use CASCADE to revoke them too. REVOKE GRANT OPTION FOR SELECT ON atest4 FROM regressuser2 CASCADE; -- ok SELECT has_table_privilege('regressuser2', 'atest4', 'SELECT'); -- true has_table_privilege @@ -1235,8 +1218,6 @@ REVOKE USAGE ON LANGUAGE sql FROM regressuser1; DROP OWNED BY regressuser1; DROP USER regressuser1; DROP USER regressuser2; -ERROR: role "regressuser2" cannot be dropped because some objects depend on it -DETAIL: privileges for language sql DROP USER regressuser3; DROP USER regressuser4; DROP USER regressuser5; ----------------------------------------------------------------------- Summary of changes: src/backend/pgxc/pool/execRemote.c | 3 + src/backend/pgxc/pool/pgxcnode.c | 22 +++ src/backend/pgxc/pool/poolmgr.c | 238 ++++++++++++++++++++++++++- src/backend/tcop/utility.c | 26 +++- src/include/pgxc/pgxcnode.h | 1 + src/include/pgxc/poolmgr.h | 16 ++- src/test/regress/expected/guc_1.out | 85 ++++++----- src/test/regress/expected/plancache_1.out | 6 +- src/test/regress/expected/privileges_1.out | 31 +--- 9 files changed, 346 insertions(+), 82 deletions(-) hooks/post-receive -- Postgres-XC |
From: Michael P. <mic...@us...> - 2011-04-07 03:36:54
|
Project "Postgres-XC". The branch, master has been updated via 913fba843d425a786196bcffc965e5ceea75e55d (commit) from 112ad257947a5cc60b7c598880d335a5b9a351c1 (commit) - Log ----------------------------------------------------------------- commit 913fba843d425a786196bcffc965e5ceea75e55d Author: Michael P <mic...@us...> Date: Thu Apr 7 12:31:07 2011 +0900 Block FULL JOIN expressions This was leading to issues in join tests in XC planner like bug 3277038. It was not noticed before because this test has problems with relation "t1", which is created in test create_view but not removed correctly. diff --git a/src/backend/pgxc/plan/planner.c b/src/backend/pgxc/plan/planner.c index 45c453a..019b6b8 100644 --- a/src/backend/pgxc/plan/planner.c +++ b/src/backend/pgxc/plan/planner.c @@ -1355,6 +1355,12 @@ examine_conditions_fromlist(Node *treenode, XCWalkerContext *context) { JoinExpr *joinexpr = (JoinExpr *) treenode; + /* Block FULL JOIN expressions until it is supported */ + if (joinexpr->jointype == JOIN_FULL) + ereport(ERROR, + (errcode(ERRCODE_STATEMENT_TOO_COMPLEX), + (errmsg("FULL JOIN clause not yet supported")))); + /* recursively examine FROM join tree */ if (examine_conditions_fromlist(joinexpr->larg, context)) return true; ----------------------------------------------------------------------- Summary of changes: src/backend/pgxc/plan/planner.c | 6 ++++++ 1 files changed, 6 insertions(+), 0 deletions(-) hooks/post-receive -- Postgres-XC |
From: Koichi S. <koi...@us...> - 2011-04-06 04:55:48
|
Project "Postgres-XC". The branch, documentation has been created at 180361060615c52e211244be7d5939c4a5edc0c4 (commit) - Log ----------------------------------------------------------------- commit 180361060615c52e211244be7d5939c4a5edc0c4 Author: Koichi Suzuki <koi...@gm...> Date: Wed Apr 6 13:49:40 2011 +0900 This is the first commit of "documentation" branch. This branch is to construct reference manual, man page and html document for Postgres-XC. Deatailed information of document preparaton will be given in a file in "doc" directory. At this time, I added "makesgml" tool. To make it easy to merge with PostgreSQL documentation and to translate to other language, makesgml can be used. This tool accepts two kind of tags, <!## label> ... <!## end> in the input file (namely, *.sgmlin) and selects only an area specified by -I option of "makesgml" command. This pair of label can be nested and you can select any part of the source file to be used in the target SGML file. diff --git a/doc/tools/makesgml/makesgml.c b/doc/tools/makesgml/makesgml.c new file mode 100644 index 0000000..013942c --- /dev/null +++ b/doc/tools/makesgml/makesgml.c @@ -0,0 +1,331 @@ +#include <stdio.h> +#include <string.h> +#include <stdlib.h> +#include <unistd.h> +#include <errno.h> + + +typedef struct tokenlist +{ + struct tokenlist *next; + char *token; +} tokenlist; + + +#define STARTTOKEN "<!##" + +tokenlist *ignoreToks = NULL; +tokenlist *lastIgnoreToken = NULL; +tokenlist *includeToks = NULL; +tokenlist *lastIncludeToken = NULL; + +FILE *inf; +FILE *outf; +int inf_lno; +char *progname; +int default_include = 0; + +void make_sgml(int writeflag); +void usage(int exitcode); +void format_err(int lno); +int my_getline(char *buf); + +main(int argc, char *argv[]) +{ + int flags,opt; + char *ifnam = NULL; + char *ofnam = NULL; + + char *token; + + inf = stdin; + outf = stdout; + + progname = argv[0]; + while((opt = getopt(argc, argv, "i:o:E:I:d:")) != -1) + { + switch(opt) + { + case 'i': + if (ifnam) { + free(ifnam); + ifnam = NULL; + } + if ((strcmp(optarg, "-") == 0) || (strcmp(optarg, "stdin") == 0)) + { + inf = stdin; + } + else + { + ifnam = strndup(optarg, strlen(optarg)); + } + break; + case 'o': + if (ofnam) + { + free(ofnam); + ofnam = NULL; + } + if ((strcmp(optarg, "-") == 0) || (strcmp(optarg, "stdout") == 0)) + { + outf = stdout; + } + else + { + ofnam = strndup(optarg, strlen(optarg)); + } + break; + case 'E': + token = strndup(optarg,strlen(optarg)); + if (ignoreToks == NULL) + { + ignoreToks = (tokenlist *)malloc(sizeof(tokenlist)); + if (ignoreToks == NULL) goto memerr; + ignoreToks->token = token; + ignoreToks->next = NULL; + lastIgnoreToken = ignoreToks; + } + else + { + lastIgnoreToken->next = (tokenlist *)malloc(sizeof(tokenlist)); + if (lastIgnoreToken->next == NULL) goto memerr; + lastIgnoreToken = lastIgnoreToken->next; + lastIgnoreToken->next = NULL; + lastIgnoreToken->token = token; + } + break; + case 'I': + token = strndup(optarg, strlen(optarg)); + if (includeToks == NULL) + { + includeToks = (tokenlist *)malloc(sizeof(tokenlist)); + if (includeToks == NULL) goto memerr; + includeToks->token = token; + includeToks->next = NULL; + lastIncludeToken = includeToks; + } + else + { + lastIncludeToken->next = (tokenlist *)malloc(sizeof(tokenlist)); + if (lastIncludeToken->next == NULL) goto memerr; + lastIncludeToken = lastIncludeToken->next; + lastIncludeToken->next = NULL; + lastIncludeToken->token = token; + } + break; + case 'd': /* Default handling: include/exclude */ + if (strcmp(optarg, "i") == 0) + { + default_include = 1; + } + else if (strcmp(optarg, "e") == 0) + { + default_include = 0; + } + else + { + usage(1); + } + break; + default: + usage(1); + exit(1); + } + } + if (ifnam) + { + inf = fopen(ifnam, "r"); + if (inf == NULL) + { + fprintf(stderr, "Cannot open input file %s, %s\n", ifnam, strerror(errno)); + exit(1); + } + } + inf_lno = 0; + if (ofnam) + { + outf = fopen(ofnam, "w"); + if (outf == NULL) + { + fprintf(stderr, "Cannot open output file %s, %s\n", ofnam, strerror(errno)); + exit(1); + } + } + make_sgml(1); + exit(0); + + memerr: + fprintf(stderr, "Memory not available.\n"); + exit(1); +} + +int my_getline(char *buf) +{ + int c; + + c = getc(inf); + if (c == EOF) + { + *buf = 0; + return(EOF); + } + else + { + ungetc(c, inf); + } + for(;;) { + c = getc(inf); + switch(c) + { + case '\n': + *buf++ = c; + *buf = 0; + inf_lno++; + return(1); + case EOF: + *buf = 0; + inf_lno++; + return(1); + default: + *buf++ = c; + continue; + } + } + exit(1); +} + + +int find_match(char *token, tokenlist *toks) +{ + tokenlist *currToks; + + for (currToks = toks; currToks; currToks = currToks->next) + { + if (strcmp(token, currToks->token) == 0) + return(1); + } + return(0); +} + +int find_match_exclude(char *token) +{ + return(find_match(token, ignoreToks)); +} + +int find_match_include(char *token) +{ + return(find_match(token, includeToks)); +} + +void format_err(int lno) +{ + fprintf(stderr, "Input file format error. Line %d.\n", lno); + exit(1); +} + + +void make_sgml(int writeflag) +{ + int rv; + char inputline[4096]; + + for(;;) { + char *curr; + char *token; + + rv = my_getline(inputline); + if (rv == EOF) + return; + curr = inputline; + for (;;curr++) { + if (*curr == ' ' || *curr == '\t') + continue; + else + break; + } + if (memcmp(curr, STARTTOKEN, strlen(STARTTOKEN)) == 0) + { + curr += strlen(STARTTOKEN); + if (*curr != ' ' && *curr != '\t') { + format_err(inf_lno); + } + for (curr++;;curr++) { + if (*curr == '\n' || *curr == 0) { + format_err(inf_lno); + } + if (*curr == ' ' || *curr == '\t') { + continue; + } + else { + break; + } + } + token = curr; + for (;;curr++) { + if (*curr == '\n' || *curr == 0) { + format_err(inf_lno); + } + if (*curr == ' ' || *curr == '\t') { + *curr = 0; + curr++; + break; + } + else if (*curr == '>') { + *curr = 0; + curr++; + *curr = '>'; + break; + } + else { + continue; + } + } + for (;;curr++) { + if (*curr == '\n' || *curr == 0) { + format_err(inf_lno); + } + if (*curr == ' ' || *curr == '\t') { + continue; + } + else if (*curr == '>') { + break; + } + else { + format_err(inf_lno); + } + } + /* You can write anything after clsing '>' */ + fputc('\n', outf); + if (strcmp(token, "end") == 0) + return; + if (find_match_exclude(token)) { + make_sgml(0); + } + else if (find_match_include(token)) { + if (writeflag) + make_sgml(1); + else + make_sgml(0); + } + else { + make_sgml(0); + } + } + else + { + if (writeflag) + fputs(inputline, outf); + else + fputc('\n', outf); + } + } + exit(1); +} + +void usage(int exitcode) +{ + fprintf(stderr, + "%s -i infile -o outfile [-d i|e ] -D exclude_token -D ... -U include_token -U ...\n", + progname); + exit(exitcode); +} diff --git a/doc/tools/makesgml/test.multilang b/doc/tools/makesgml/test.multilang new file mode 100644 index 0000000..b6ce67c --- /dev/null +++ b/doc/tools/makesgml/test.multilang @@ -0,0 +1,22 @@ +Common to PostgreSQL and Postgres-XC, English or Japanese + +<!## PGXC > +PGXC +<!## en > +this +<!## end > +<!## jp > +ãã +<!## end > +<!## end > + +<!## PostgreSQL > +PostgreSQL +<!## en > +that +<!## end > +<!## jp > +ãã +<!## end > +<!## end > +ddd diff --git a/doc/tools/makesgml/test.out b/doc/tools/makesgml/test.out new file mode 100644 index 0000000..994ce81 --- /dev/null +++ b/doc/tools/makesgml/test.out @@ -0,0 +1,13 @@ +abcdefg + + +this + +more + + + + + + +ddd diff --git a/doc/tools/makesgml/test.test b/doc/tools/makesgml/test.test new file mode 100644 index 0000000..90f8bc7 --- /dev/null +++ b/doc/tools/makesgml/test.test @@ -0,0 +1,13 @@ +abcdefg + +<!## this> +this +<!## more> +more +<!## end> +<!## end> +<!## that> +that +<!## end> + +ddd ----------------------------------------------------------------------- hooks/post-receive -- Postgres-XC |
From: Michael P. <mic...@us...> - 2011-04-06 00:57:25
|
Project "Postgres-XC". The branch, master has been updated via 112ad257947a5cc60b7c598880d335a5b9a351c1 (commit) from e1946160fe64042e76b5252c66b6f6fb5da6b85d (commit) - Log ----------------------------------------------------------------- commit 112ad257947a5cc60b7c598880d335a5b9a351c1 Author: Michael P <mic...@us...> Date: Wed Apr 6 09:34:39 2011 +0900 Fix a memory leak in GTM: free connection data A memory free for string pgxc_node_id was not done, making it a possible memory leak. Patch written by Terasaka Mitsunobu diff --git a/src/gtm/client/fe-connect.c b/src/gtm/client/fe-connect.c index 9e2b564..52ce93c 100644 --- a/src/gtm/client/fe-connect.c +++ b/src/gtm/client/fe-connect.c @@ -869,6 +869,8 @@ freeGTM_Conn(GTM_Conn *conn) free(conn->pgport); if (conn->connect_timeout) free(conn->connect_timeout); + if (conn->pgxc_node_id) + free(conn->pgxc_node_id); if (conn->inBuffer) free(conn->inBuffer); if (conn->outBuffer) ----------------------------------------------------------------------- Summary of changes: src/gtm/client/fe-connect.c | 2 ++ 1 files changed, 2 insertions(+), 0 deletions(-) hooks/post-receive -- Postgres-XC |
From: Koichi S. <koi...@us...> - 2011-04-05 10:02:09
|
Project "Postgres-XC". The branch, ha_support has been updated via 0794d85a15b5124f666190926ae036dec9f39855 (commit) from c811d7f4a755154cdf9f4b5e0353aad56fa16331 (commit) - Log ----------------------------------------------------------------- commit 0794d85a15b5124f666190926ae036dec9f39855 Author: Koichi Suzuki <koi...@gm...> Date: Tue Apr 5 19:00:05 2011 +0900 This commit is to tweak the message from xcm_telhowto. When all the coordinators fails, it tells to stop the failed coordinator and then shutdown whole cluster. If all the mirrors of a datanode fails, it does not tell to stop the last failed mirror and just tells to shutdown the whole cluster. This commit tweaks this bahavior and tells to stop the last failed mirror before shutdown. diff --git a/src/pgxc/xcm/xcm_telhowto.c b/src/pgxc/xcm/xcm_telhowto.c index c53d257..3a3afa8 100644 --- a/src/pgxc/xcm/xcm_telhowto.c +++ b/src/pgxc/xcm/xcm_telhowto.c @@ -605,7 +605,11 @@ static void handle_mirror_failure(int ac, char *av[]) if (surviving_mirror <= 0) { /* * No mirror available! + * Anyway we should try to stop the mirro gracefully before shutdown. + * Please note it may not make sense to try to change primary. + * There's no datanode mirror surviving to take over primary. */ + printf("stop mirror %d %d\n", datanode_id, mirror_id); printf("shutdown\n"); fflush(stdout); return; ----------------------------------------------------------------------- Summary of changes: src/pgxc/xcm/xcm_telhowto.c | 4 ++++ 1 files changed, 4 insertions(+), 0 deletions(-) hooks/post-receive -- Postgres-XC |