You can subscribe to this list here.
2010 |
Jan
|
Feb
|
Mar
|
Apr
(4) |
May
(28) |
Jun
(12) |
Jul
(11) |
Aug
(12) |
Sep
(5) |
Oct
(19) |
Nov
(14) |
Dec
(12) |
---|---|---|---|---|---|---|---|---|---|---|---|---|
2011 |
Jan
(18) |
Feb
(30) |
Mar
(115) |
Apr
(89) |
May
(50) |
Jun
(44) |
Jul
(22) |
Aug
(13) |
Sep
(11) |
Oct
(30) |
Nov
(28) |
Dec
(39) |
2012 |
Jan
(38) |
Feb
(18) |
Mar
(43) |
Apr
(91) |
May
(108) |
Jun
(46) |
Jul
(37) |
Aug
(44) |
Sep
(33) |
Oct
(29) |
Nov
(36) |
Dec
(15) |
2013 |
Jan
(35) |
Feb
(611) |
Mar
(5) |
Apr
(55) |
May
(30) |
Jun
(28) |
Jul
(458) |
Aug
(34) |
Sep
(9) |
Oct
(39) |
Nov
(22) |
Dec
(32) |
2014 |
Jan
(16) |
Feb
(16) |
Mar
(42) |
Apr
(179) |
May
(7) |
Jun
(6) |
Jul
(9) |
Aug
|
Sep
(4) |
Oct
|
Nov
(3) |
Dec
|
2015 |
Jan
|
Feb
|
Mar
|
Apr
(2) |
May
(4) |
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
From: Koichi S. <koi...@us...> - 2011-05-30 04:39:16
|
Project "Postgres-XC". The branch, ha_support has been updated via 8caece68c1e81739c02390c7ddb1846be81a849c (commit) from a34aef207659537b5ee51038cfe30fe8d34a4b8a (commit) - Log ----------------------------------------------------------------- commit 8caece68c1e81739c02390c7ddb1846be81a849c Author: Koichi Suzuki <koi...@gm...> Date: Mon May 30 13:26:53 2011 +0900 This commit changes GTM-Standby Registration from PGXC_NODE_GTM to PGXC_NODE_STANDBY. This is needed to add synchronized backup of GTM-Standby. Present code just backs up current GTM status and does not trace further changes. Changed files are: modified: gtm/main/gtm_standby.c modified: gtm/recovery/register.c modified: include/gtm/gtm_c.h Further commit will include the following additions: 1. Initiate GTM-Act to be ready to backup it's further update to GTM-Standby. 2. Instruct all the worker thread and main thread to connect to GTM-Standby and back-up their update to GTM-Standby. Please note that GTM-Standby cannot pull all the updates synchronously. 3. GTM worker thread and main thread back up their update to GTM-Standby. 4. Each response to GTM-clients (GTM-Proxy, in group response, at first), will be given response ID (which may wrap-around. 8bit will be sufficient). 5. When GTM-Proxy reconnect to promoted GTM, it gives the latest and the second latest response ID. Promoted GTM will determine if the last response is valid or not. Details of the above may change according to a practice. diff --git a/src/gtm/main/gtm_standby.c b/src/gtm/main/gtm_standby.c index 4e9393b..d710a09 100644 --- a/src/gtm/main/gtm_standby.c +++ b/src/gtm/main/gtm_standby.c @@ -53,7 +53,7 @@ gtm_standby_start_startup() elog(LOG, "Connecting the GTM active on %s:%d...", GTM_ActiveAddress, GTM_ActivePort); sprintf(connect_string, "host=%s port=%d pgxc_node_id=1 remote_type=%d", - GTM_ActiveAddress, GTM_ActivePort, PGXC_NODE_GTM); + GTM_ActiveAddress, GTM_ActivePort, PGXC_NODE_GTM_STANDBY); GTM_ActiveConn = PQconnectGTM(connect_string); if (GTM_ActiveConn == NULL) @@ -298,7 +298,7 @@ gtm_standby_register_self(GTM_PGXCNodeId nodenum, int port, const char *datadir) standbyPortNumber = port; standbyDataDir = (char *)datadir; - rc = node_register2(GTM_ActiveConn, PGXC_NODE_GTM, standbyNodeName, standbyPortNumber, + rc = node_register2(GTM_ActiveConn, PGXC_NODE_GTM_STANDBY, standbyNodeName, standbyPortNumber, standbyNodeNum, standbyDataDir, NODE_DISCONNECTED); if ( rc<0 ) { @@ -323,14 +323,14 @@ gtm_standby_activate_self() elog(LOG, "Updating the standby-GTM status to \"CONNECTED\"..."); - rc = node_unregister(GTM_ActiveConn, PGXC_NODE_GTM, standbyNodeNum); + rc = node_unregister(GTM_ActiveConn, PGXC_NODE_GTM_STANDBY, standbyNodeNum); if ( rc<0 ) { elog(LOG, "Failed to unregister old standby-GTM status."); return 0; } - rc = node_register2(GTM_ActiveConn, PGXC_NODE_GTM, standbyNodeName, standbyPortNumber, + rc = node_register2(GTM_ActiveConn, PGXC_NODE_GTM_STANDBY, standbyNodeName, standbyPortNumber, standbyNodeNum, standbyDataDir, NODE_CONNECTED); if ( rc<0 ) { diff --git a/src/gtm/recovery/register.c b/src/gtm/recovery/register.c index 1ee30ce..24fb7cf 100644 --- a/src/gtm/recovery/register.c +++ b/src/gtm/recovery/register.c @@ -497,11 +497,15 @@ ProcessPGXCNodeRegister(Port *myport, StringInfo message) status = pq_getmsgint(message, sizeof (GTM_PGXCNodeStatus)); +#if 1 + elog(LOG, "PGXC_NODE_GTM_STANDBY = %d, Nodetype = %d", PGXC_NODE_GTM_STANDBY, type); +#endif if ( (type!=PGXC_NODE_GTM_PROXY) && (type!=PGXC_NODE_GTM_PROXY_POSTMASTER) && (type!=PGXC_NODE_COORDINATOR) && (type!=PGXC_NODE_DATANODE) && (type!=PGXC_NODE_GTM) && + (type!=PGXC_NODE_GTM_STANDBY) && (type!=PGXC_NODE_DEFAULT) ) ereport(ERROR, (EINVAL, diff --git a/src/include/gtm/gtm_c.h b/src/include/gtm/gtm_c.h index 3d7c179..f49c5dd 100644 --- a/src/include/gtm/gtm_c.h +++ b/src/include/gtm/gtm_c.h @@ -54,6 +54,7 @@ typedef enum GTM_PGXCNodeType PGXC_NODE_COORDINATOR, PGXC_NODE_DATANODE, PGXC_NODE_GTM, + PGXC_NODE_GTM_STANDBY, PGXC_NODE_DEFAULT /* In case nothing is associated to connection */ } GTM_PGXCNodeType; ----------------------------------------------------------------------- Summary of changes: src/gtm/main/gtm_standby.c | 8 ++++---- src/gtm/recovery/register.c | 4 ++++ src/include/gtm/gtm_c.h | 1 + 3 files changed, 9 insertions(+), 4 deletions(-) hooks/post-receive -- Postgres-XC |
From: Michael P. <mic...@us...> - 2011-05-30 02:38:48
|
Project "Postgres-XC". The branch, PGXC-TrialMaster has been updated via 2c8118bade2df8636928aac73b73a2fc1e6c25cf (commit) via 49025616ae6204a041412f5fe2204b41a5113fbf (commit) from c07ee5cf650c03a298650ddaf5fa9f0aa9f91876 (commit) - Log ----------------------------------------------------------------- commit 2c8118bade2df8636928aac73b73a2fc1e6c25cf Author: Michael P <mic...@us...> Date: Fri May 27 17:14:24 2011 +0900 Fix for bug 3307846: multiple INSERT with JDBC driver This fixes issues when JDBC was used with multi INSERT such as: INSERT INTO table_name VALUES (1),(2); diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c index 16998f9..9dc1617 100644 --- a/src/backend/tcop/postgres.c +++ b/src/backend/tcop/postgres.c @@ -667,11 +667,11 @@ pg_analyze_and_rewrite(Node *parsetree, const char *query_string, if (IS_PGXC_COORDINATOR && !IsConnFromCoord()) { ListCell *lc; - + foreach(lc, querytree_list) { Query *query = (Query *) lfirst(lc); - + if (query->sql_statement == NULL) query->sql_statement = pstrdup(query_string); } @@ -1380,7 +1380,9 @@ exec_parse_message(const char *query_string, /* string to execute */ foreach(lc, querytree_list) { Query *query = (Query *) lfirst(lc); - query->sql_statement = pstrdup(query_string); + + if (query->sql_statement == NULL) + query->sql_statement = pstrdup(query_string); } } #endif commit 49025616ae6204a041412f5fe2204b41a5113fbf Author: Michael P <mic...@us...> Date: Thu May 26 15:40:07 2011 +0900 Fix for bug 3299211: JDBC and DML queries Running DML queries through JDBC to an XC instance was crashing the server. Report and patch from sch19831106, with some code refactoring by me. diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c index a54ac74..662baa7 100644 --- a/src/backend/commands/prepare.c +++ b/src/backend/commands/prepare.c @@ -475,6 +475,10 @@ InitQueryHashTable(void) static int set_remote_stmtname(Plan *plan, const char *stmt_name, int n) { + /* If no plan simply return */ + if (!plan) + return 0; + if (IsA(plan, RemoteQuery)) { DatanodeStatement *entry; diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c index cbe9cc5..d8cd830 100644 --- a/src/backend/utils/cache/plancache.c +++ b/src/backend/utils/cache/plancache.c @@ -676,12 +676,17 @@ ReleaseCachedPlan(CachedPlan *plan, bool useResOwner) if (plan->fully_planned) { ListCell *lc; - /* close any active datanode statements */ + + /* Close any active planned datanode statements */ foreach (lc, plan->stmt_list) { - PlannedStmt *ps = (PlannedStmt *)lfirst(lc); + Node *node = lfirst(lc); - release_datanode_statements(ps->planTree); + if (IsA(node, PlannedStmt)) + { + PlannedStmt *ps = (PlannedStmt *)node; + release_datanode_statements(ps->planTree); + } } } #endif ----------------------------------------------------------------------- Summary of changes: src/backend/commands/prepare.c | 4 ++++ src/backend/tcop/postgres.c | 8 +++++--- src/backend/utils/cache/plancache.c | 11 ++++++++--- 3 files changed, 17 insertions(+), 6 deletions(-) hooks/post-receive -- Postgres-XC |
From: Michael P. <mic...@us...> - 2011-05-26 06:44:33
|
Project "Postgres-XC". The branch, master has been updated via 3bdbebb9ab2ea6fb9f3fbff5470df82f96b5f90c (commit) from 4a7fcb29f0ac168c99f78bdf86078be24015ddb0 (commit) - Log ----------------------------------------------------------------- commit 3bdbebb9ab2ea6fb9f3fbff5470df82f96b5f90c Author: Michael P <mic...@us...> Date: Thu May 26 15:40:07 2011 +0900 Fix for bug 3299211: JDBC and DML queries Running DML queries through JDBC to an XC instance was crashing the server. Report and patch from sch19831106, with some code refactoring by me. diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c index a334a34..7d6e90e 100644 --- a/src/backend/commands/prepare.c +++ b/src/backend/commands/prepare.c @@ -476,6 +476,10 @@ InitQueryHashTable(void) static int set_remote_stmtname(Plan *plan, const char *stmt_name, int n) { + /* If no plan simply return */ + if (!plan) + return 0; + if (IsA(plan, RemoteQuery)) { DatanodeStatement *entry; diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c index 3cb09dc..2728c32 100644 --- a/src/backend/utils/cache/plancache.c +++ b/src/backend/utils/cache/plancache.c @@ -678,12 +678,17 @@ ReleaseCachedPlan(CachedPlan *plan, bool useResOwner) if (plan->fully_planned) { ListCell *lc; - /* close any active datanode statements */ + + /* Close any active planned datanode statements */ foreach (lc, plan->stmt_list) { - PlannedStmt *ps = (PlannedStmt *)lfirst(lc); + Node *node = lfirst(lc); - release_datanode_statements(ps->planTree); + if (IsA(node, PlannedStmt)) + { + PlannedStmt *ps = (PlannedStmt *)node; + release_datanode_statements(ps->planTree); + } } } #endif ----------------------------------------------------------------------- Summary of changes: src/backend/commands/prepare.c | 4 ++++ src/backend/utils/cache/plancache.c | 11 ++++++++--- 2 files changed, 12 insertions(+), 3 deletions(-) hooks/post-receive -- Postgres-XC |
From: Michael P. <mic...@us...> - 2011-05-26 01:30:56
|
Project "Postgres-XC". The branch, PGXC-TrialMaster has been updated via c07ee5cf650c03a298650ddaf5fa9f0aa9f91876 (commit) from ac040624b275cca2c3b9c62c88be2bc88887f885 (commit) - Log ----------------------------------------------------------------- commit c07ee5cf650c03a298650ddaf5fa9f0aa9f91876 Author: Michael P <mic...@us...> Date: Thu May 26 10:27:52 2011 +0900 Fix for regression test float8 Test was failing due to a query not correctly written. diff --git a/src/test/regress/expected/float8_1.out b/src/test/regress/expected/float8_1.out index 8ce7930..3708d21 100644 --- a/src/test/regress/expected/float8_1.out +++ b/src/test/regress/expected/float8_1.out @@ -382,7 +382,8 @@ UPDATE FLOAT8_TBL SET f1 = FLOAT8_TBL.f1 * '-1' WHERE FLOAT8_TBL.f1 > '0.0'; ERROR: Partition column can't be updated in current version -SELECT '' AS bad, f.f1 ^ '1e200' from FLOAT8_TBL f ORDER BY f1; + +SELECT '' AS bad, f.f1 * '1e200' from FLOAT8_TBL f ORDER BY f1; ERROR: value out of range: overflow SELECT '' AS bad, f.f1 ^ '1e200' from FLOAT8_TBL f ORDER BY f1; ERROR: value out of range: overflow ----------------------------------------------------------------------- Summary of changes: src/test/regress/expected/float8_1.out | 3 ++- 1 files changed, 2 insertions(+), 1 deletions(-) hooks/post-receive -- Postgres-XC |
From: Michael P. <mic...@us...> - 2011-05-25 23:52:34
|
Project "Postgres-XC". The branch, PGXC-TrialMaster has been deleted was 47e7f5141b5af7e17a3838d10a1758c945d1fd67 ----------------------------------------------------------------------- 47e7f5141b5af7e17a3838d10a1758c945d1fd67 Addition of GIT files to XC branch ----------------------------------------------------------------------- hooks/post-receive -- Postgres-XC |
From: Abbas B. <abb...@te...> - 2011-05-25 10:29:32
|
On Wed, May 25, 2011 at 3:27 PM, Andrei Martsinchyk < and...@gm...> wrote: > > > 2011/5/25 Abbas Butt <abb...@te...> > >> >> >> On Tue, May 24, 2011 at 10:18 PM, Andrei Martsinchyk < >> and...@gm...> wrote: >> >>> Hi Abbas, >>> >>> I looked at the code and see that for some data types the compute_hash() >>> returns not a hash code, but original value: >>> >>> + case INT8OID: >>> >>> + /* This gives added advantage that >>> + * a = 8446744073709551359 >>> + * and a = 8446744073709551359::int8 both work*/ >>> + return DatumGetInt32(value); >>> + case INT2OID: >>> + return DatumGetInt16(value); >>> + case OIDOID: >>> + return DatumGetObjectId(value); >>> + case INT4OID: >>> + return DatumGetInt32(value); >>> + case BOOLOID: >>> + return DatumGetBool(value); >>> >>> That not a critical error and gives a bit better calculation speed but >>> may cause poor distributions, when, for example, distribution column >>> contains only even or only odd values. >>> >> >> That would happen only it the user is choosing to use modulo distribution. >> If the user knows his/her dist column is not uniformly distributed and wants >> rows to be distributed uniformly, a better choice would be to use >> hash distribution in which case hash will be computed. >> >> > > > Ok, I have just noticed the value returned is hashed using existing > algorithm. So for other data types value is hashed twice? > If the user chose has distribution then yes. > > >> Some node may have many rows while other may not have rows at all. I >>> suggest using hashintX functions here. >>> And another point: Oid's are generated on data nodes, does it make sense >>> to allow hashing here, where it is supposed the value is coming from >>> coordinator? >>> >> >> Oid can be used like this e.g. >> >> CREATE TABLE abc(a oid) distribute by modulo(a); >> >> Oid is the only unsigned integer data type available to the user and if >> the user wants a table to have a column of type Oid and wants to distribute >> by that column, then the provided option will be used. >> >> > OK > > >> >>> >>> 2011/5/24 Abbas Butt <ga...@us...> >>> >>>> Project "Postgres-XC". >>>> >>>> The branch, master has been updated >>>> via 49b66c77343ae1e9921118e0c902b1528f1cc2ae (commit) >>>> from 87a62879ab3492e3dd37d00478ffa857639e2b85 (commit) >>>> >>>> >>>> - Log ----------------------------------------------------------------- >>>> commit 49b66c77343ae1e9921118e0c902b1528f1cc2ae >>>> Author: Abbas <abb...@en...> >>>> Date: Tue May 24 17:06:30 2011 +0500 >>>> >>>> This patch adds support for the following data types to be used as >>>> distribution key >>>> >>>> INT8, INT2, OID, INT4, BOOL, INT2VECTOR, OIDVECTOR >>>> CHAR, NAME, TEXT, BPCHAR, BYTEA, VARCHAR >>>> FLOAT4, FLOAT8, NUMERIC, CASH >>>> ABSTIME, RELTIME, DATE, TIME, TIMESTAMP, TIMESTAMPTZ, INTERVAL, >>>> TIMETZ >>>> >>>> A new function compute_hash is added in the system which is used to >>>> compute hash of a any of the supported data types. >>>> The computed hash is used in the function GetRelationNodes to >>>> find the targeted data node. >>>> >>>> EXPLAIN for RemoteQuery has been modified to show the number of >>>> data nodes targeted for a certain query. This is essential >>>> to spot bugs in the optimizer in case it is targeting all nodes >>>> by mistake. >>>> >>>> In case of optimisations where comparison with a constant leads >>>> the optimiser to point to a single data node, there were a couple >>>> of mistakes in examine_conditions_walker. >>>> First it was not supporting RelabelType, which represents a "dummy" >>>> type coercion between two binary compatible datatypes. >>>> This was resulting in the optimization not working for varchar >>>> type for example. >>>> Secondly it was not catering for the case where the user specifies >>>> the >>>> condition such that the constant expression is written towards LHS >>>> and the >>>> variable towards the RHS of the = operator. >>>> i.e. 23 = a >>>> >>>> A number of test cases have been added in regression to make sure >>>> further enhancements do not break this functionality. >>>> >>>> This change has a sizeable impact on current regression tests in the >>>> following manner. >>>> >>>> 1. horology test case crashes the server and has been commented out >>>> in serial_schedule. >>>> 2. In money test case the planner optimizer wrongly kicks in to >>>> optimize this query >>>> SELECT m = '$123.01' FROM money_data; >>>> to point to a single data node. >>>> 3. There were a few un-necessary EXPLAINs in create_index test case. >>>> Since we have added support in EXPLAIN to show the number of >>>> data nodes targeted for RemoteQuery, this test case was producing >>>> output dependent on the cluster configuration. >>>> 4. In guc test case >>>> DROP ROLE temp_reset_user; >>>> results in >>>> ERROR: permission denied to drop role >>>> >>>> diff --git a/src/backend/access/hash/hashfunc.c >>>> b/src/backend/access/hash/hashfunc.c >>>> index 577873b..22766c5 100644 >>>> --- a/src/backend/access/hash/hashfunc.c >>>> +++ b/src/backend/access/hash/hashfunc.c >>>> @@ -28,6 +28,13 @@ >>>> >>>> #include "access/hash.h" >>>> >>>> +#ifdef PGXC >>>> +#include "catalog/pg_type.h" >>>> +#include "utils/builtins.h" >>>> +#include "utils/timestamp.h" >>>> +#include "utils/date.h" >>>> +#include "utils/nabstime.h" >>>> +#endif >>>> >>>> /* Note: this is used for both "char" and boolean datatypes */ >>>> Datum >>>> @@ -521,3 +528,91 @@ hash_uint32(uint32 k) >>>> /* report the result */ >>>> return UInt32GetDatum(c); >>>> } >>>> + >>>> +#ifdef PGXC >>>> +/* >>>> + * compute_hash() -- Generaic hash function for all datatypes >>>> + * >>>> + */ >>>> + >>>> +Datum >>>> +compute_hash(Oid type, Datum value, int *pErr) >>>> +{ >>>> + Assert(pErr); >>>> + >>>> + *pErr = 0; >>>> + >>>> + if (value == NULL) >>>> + { >>>> + *pErr = 1; >>>> + return 0; >>>> + } >>>> + >>>> + switch(type) >>>> + { >>>> + case INT8OID: >>>> + /* This gives added advantage that >>>> + * a = 8446744073709551359 >>>> + * and a = 8446744073709551359::int8 both >>>> work*/ >>>> + return DatumGetInt32(value); >>>> + case INT2OID: >>>> + return DatumGetInt16(value); >>>> + case OIDOID: >>>> + return DatumGetObjectId(value); >>>> + case INT4OID: >>>> + return DatumGetInt32(value); >>>> + case BOOLOID: >>>> + return DatumGetBool(value); >>>> + >>>> + case CHAROID: >>>> + return DirectFunctionCall1(hashchar, value); >>>> + case NAMEOID: >>>> + return DirectFunctionCall1(hashname, value); >>>> + case INT2VECTOROID: >>>> + return DirectFunctionCall1(hashint2vector, >>>> value); >>>> + >>>> + case VARCHAROID: >>>> + case TEXTOID: >>>> + return DirectFunctionCall1(hashtext, value); >>>> + >>>> + case OIDVECTOROID: >>>> + return DirectFunctionCall1(hashoidvector, >>>> value); >>>> + case FLOAT4OID: >>>> + return DirectFunctionCall1(hashfloat4, value); >>>> + case FLOAT8OID: >>>> + return DirectFunctionCall1(hashfloat8, value); >>>> + >>>> + case ABSTIMEOID: >>>> + return DatumGetAbsoluteTime(value); >>>> + case RELTIMEOID: >>>> + return DatumGetRelativeTime(value); >>>> + case CASHOID: >>>> + return DirectFunctionCall1(hashint8, value); >>>> + >>>> + case BPCHAROID: >>>> + return DirectFunctionCall1(hashbpchar, value); >>>> + case BYTEAOID: >>>> + return DirectFunctionCall1(hashvarlena, value); >>>> + >>>> + case DATEOID: >>>> + return DatumGetDateADT(value); >>>> + case TIMEOID: >>>> + return DirectFunctionCall1(time_hash, value); >>>> + case TIMESTAMPOID: >>>> + return DirectFunctionCall1(timestamp_hash, >>>> value); >>>> + case TIMESTAMPTZOID: >>>> + return DirectFunctionCall1(timestamp_hash, >>>> value); >>>> + case INTERVALOID: >>>> + return DirectFunctionCall1(interval_hash, >>>> value); >>>> + case TIMETZOID: >>>> + return DirectFunctionCall1(timetz_hash, value); >>>> + >>>> + case NUMERICOID: >>>> + return DirectFunctionCall1(hash_numeric, value); >>>> + default: >>>> + *pErr = 1; >>>> + return 0; >>>> + } >>>> +} >>>> + >>>> +#endif >>>> diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c >>>> index 613d5ff..714190f 100644 >>>> --- a/src/backend/commands/copy.c >>>> +++ b/src/backend/commands/copy.c >>>> @@ -1645,14 +1645,14 @@ CopyTo(CopyState cstate) >>>> } >>>> >>>> #ifdef PGXC >>>> - if (IS_PGXC_COORDINATOR && cstate->rel_loc) >>>> + if (IS_PGXC_COORDINATOR && cstate->rel_loc) >>>> { >>>> cstate->processed = DataNodeCopyOut( >>>> - GetRelationNodes(cstate->rel_loc, NULL, >>>> RELATION_ACCESS_READ), >>>> + GetRelationNodes(cstate->rel_loc, 0, >>>> UNKNOWNOID, RELATION_ACCESS_READ), >>>> cstate->connections, >>>> cstate->copy_file); >>>> } >>>> - else >>>> + else >>>> { >>>> #endif >>>> >>>> @@ -2417,15 +2417,18 @@ CopyFrom(CopyState cstate) >>>> #ifdef PGXC >>>> if (IS_PGXC_COORDINATOR && cstate->rel_loc) >>>> { >>>> - Datum *dist_col_value = NULL; >>>> + Datum dist_col_value; >>>> + Oid dist_col_type = UNKNOWNOID; >>>> >>>> if (cstate->idx_dist_by_col >= 0 && >>>> !nulls[cstate->idx_dist_by_col]) >>>> - dist_col_value = >>>> &values[cstate->idx_dist_by_col]; >>>> + { >>>> + dist_col_value = >>>> values[cstate->idx_dist_by_col]; >>>> + dist_col_type = >>>> attr[cstate->idx_dist_by_col]->atttypid; >>>> + } >>>> >>>> if (DataNodeCopyIn(cstate->line_buf.data, >>>> cstate->line_buf.len, >>>> - >>>> GetRelationNodes(cstate->rel_loc, (long *)dist_col_value, >>>> - >>>> RELATION_ACCESS_INSERT), >>>> + >>>> GetRelationNodes(cstate->rel_loc, dist_col_value, dist_col_type, >>>> RELATION_ACCESS_INSERT), >>>> cstate->connections)) >>>> ereport(ERROR, >>>> >>>> (errcode(ERRCODE_CONNECTION_EXCEPTION), >>>> @@ -4037,7 +4040,8 @@ DoInsertSelectCopy(EState *estate, TupleTableSlot >>>> *slot) >>>> HeapTuple tuple; >>>> Datum *values; >>>> bool *nulls; >>>> - Datum *dist_col_value = NULL; >>>> + Datum dist_col_value; >>>> + Oid dist_col_type; >>>> MemoryContext oldcontext; >>>> CopyState cstate; >>>> >>>> @@ -4082,6 +4086,11 @@ DoInsertSelectCopy(EState *estate, TupleTableSlot >>>> *slot) >>>> cstate->fe_msgbuf = makeStringInfo(); >>>> attr = cstate->tupDesc->attrs; >>>> >>>> + if (cstate->idx_dist_by_col >= 0) >>>> + dist_col_type = >>>> attr[cstate->idx_dist_by_col]->atttypid; >>>> + else >>>> + dist_col_type = UNKNOWNOID; >>>> + >>>> /* Get info about the columns we need to process. */ >>>> cstate->out_functions = (FmgrInfo *) >>>> palloc(cstate->tupDesc->natts * sizeof(FmgrInfo)); >>>> foreach(lc, cstate->attnumlist) >>>> @@ -4152,12 +4161,14 @@ DoInsertSelectCopy(EState *estate, >>>> TupleTableSlot *slot) >>>> >>>> /* Get dist column, if any */ >>>> if (cstate->idx_dist_by_col >= 0 && >>>> !nulls[cstate->idx_dist_by_col]) >>>> - dist_col_value = &values[cstate->idx_dist_by_col]; >>>> + dist_col_value = values[cstate->idx_dist_by_col]; >>>> + else >>>> + dist_col_type = UNKNOWNOID; >>>> >>>> /* Send item to the appropriate data node(s) (buffer) */ >>>> if (DataNodeCopyIn(cstate->fe_msgbuf->data, >>>> cstate->fe_msgbuf->len, >>>> - GetRelationNodes(cstate->rel_loc, >>>> (long *)dist_col_value, RELATION_ACCESS_INSERT), >>>> + GetRelationNodes(cstate->rel_loc, >>>> dist_col_value, dist_col_type, RELATION_ACCESS_INSERT), >>>> cstate->connections)) >>>> ereport(ERROR, >>>> (errcode(ERRCODE_CONNECTION_EXCEPTION), >>>> diff --git a/src/backend/commands/explain.c >>>> b/src/backend/commands/explain.c >>>> index a361186..fe74569 100644 >>>> --- a/src/backend/commands/explain.c >>>> +++ b/src/backend/commands/explain.c >>>> @@ -851,8 +851,28 @@ ExplainNode(Plan *plan, PlanState *planstate, >>>> case T_WorkTableScan: >>>> #ifdef PGXC >>>> case T_RemoteQuery: >>>> + { >>>> + RemoteQuery *remote_query = (RemoteQuery >>>> *) plan; >>>> + int pnc, nc; >>>> + >>>> + pnc = 0; >>>> + nc = 0; >>>> + if (remote_query->exec_nodes != NULL) >>>> + { >>>> + if >>>> (remote_query->exec_nodes->primarynodelist != NULL) >>>> + { >>>> + pnc = >>>> list_length(remote_query->exec_nodes->primarynodelist); >>>> + >>>> appendStringInfo(es->str, " (Primary Node Count [%d])", pnc); >>>> + } >>>> + if >>>> (remote_query->exec_nodes->nodelist) >>>> + { >>>> + nc = >>>> list_length(remote_query->exec_nodes->nodelist); >>>> + >>>> appendStringInfo(es->str, " (Node Count [%d])", nc); >>>> + } >>>> + } >>>> #endif >>>> - ExplainScanTarget((Scan *) plan, es); >>>> + ExplainScanTarget((Scan *) plan, es); >>>> + } >>>> break; >>>> case T_BitmapIndexScan: >>>> { >>>> diff --git a/src/backend/optimizer/plan/createplan.c >>>> b/src/backend/optimizer/plan/createplan.c >>>> index b6252a3..c03938d 100644 >>>> --- a/src/backend/optimizer/plan/createplan.c >>>> +++ b/src/backend/optimizer/plan/createplan.c >>>> @@ -2418,9 +2418,7 @@ create_remotequery_plan(PlannerInfo *root, Path >>>> *best_path, >>>> scan_plan->exec_nodes->baselocatortype = >>>> rel_loc_info->locatorType; >>>> else >>>> scan_plan->exec_nodes->baselocatortype = '\0'; >>>> - scan_plan->exec_nodes = GetRelationNodes(rel_loc_info, >>>> - >>>> NULL, >>>> - >>>> RELATION_ACCESS_READ); >>>> + scan_plan->exec_nodes = GetRelationNodes(rel_loc_info, 0, >>>> UNKNOWNOID, RELATION_ACCESS_READ); >>>> copy_path_costsize(&scan_plan->scan.plan, best_path); >>>> >>>> /* PGXCTODO - get better estimates */ >>>> @@ -5024,8 +5022,7 @@ create_remotedelete_plan(PlannerInfo *root, Plan >>>> *topplan) >>>> fstep->sql_statement = pstrdup(buf->data); >>>> fstep->combine_type = COMBINE_TYPE_SAME; >>>> fstep->read_only = false; >>>> - fstep->exec_nodes = GetRelationNodes(rel_loc_info, NULL, >>>> - >>>> RELATION_ACCESS_UPDATE); >>>> + fstep->exec_nodes = GetRelationNodes(rel_loc_info, 0, >>>> UNKNOWNOID, RELATION_ACCESS_UPDATE); >>>> } >>>> else >>>> { >>>> diff --git a/src/backend/pgxc/locator/locator.c >>>> b/src/backend/pgxc/locator/locator.c >>>> index 0ab157d..33fe8ac 100644 >>>> --- a/src/backend/pgxc/locator/locator.c >>>> +++ b/src/backend/pgxc/locator/locator.c >>>> @@ -41,7 +41,7 @@ >>>> >>>> #include "catalog/pgxc_class.h" >>>> #include "catalog/namespace.h" >>>> - >>>> +#include "access/hash.h" >>>> >>>> /* >>>> * PGXCTODO For prototype, relations use the same hash mapping table. >>>> @@ -206,7 +206,32 @@ char *pColName; >>>> bool >>>> IsHashDistributable(Oid col_type) >>>> { >>>> - if (col_type == INT4OID || col_type == INT2OID) >>>> + if(col_type == INT8OID >>>> + || col_type == INT2OID >>>> + || col_type == OIDOID >>>> + || col_type == INT4OID >>>> + || col_type == BOOLOID >>>> + || col_type == CHAROID >>>> + || col_type == NAMEOID >>>> + || col_type == INT2VECTOROID >>>> + || col_type == TEXTOID >>>> + || col_type == OIDVECTOROID >>>> + || col_type == FLOAT4OID >>>> + || col_type == FLOAT8OID >>>> + || col_type == ABSTIMEOID >>>> + || col_type == RELTIMEOID >>>> + || col_type == CASHOID >>>> + || col_type == BPCHAROID >>>> + || col_type == BYTEAOID >>>> + || col_type == VARCHAROID >>>> + || col_type == DATEOID >>>> + || col_type == TIMEOID >>>> + || col_type == TIMESTAMPOID >>>> + || col_type == TIMESTAMPTZOID >>>> + || col_type == INTERVALOID >>>> + || col_type == TIMETZOID >>>> + || col_type == NUMERICOID >>>> + ) >>>> return true; >>>> >>>> return false; >>>> @@ -296,7 +321,32 @@ RelationLocInfo *rel_loc_info; >>>> bool >>>> IsModuloDistributable(Oid col_type) >>>> { >>>> - if (col_type == INT4OID || col_type == INT2OID) >>>> + if(col_type == INT8OID >>>> + || col_type == INT2OID >>>> + || col_type == OIDOID >>>> + || col_type == INT4OID >>>> + || col_type == BOOLOID >>>> + || col_type == CHAROID >>>> + || col_type == NAMEOID >>>> + || col_type == INT2VECTOROID >>>> + || col_type == TEXTOID >>>> + || col_type == OIDVECTOROID >>>> + || col_type == FLOAT4OID >>>> + || col_type == FLOAT8OID >>>> + || col_type == ABSTIMEOID >>>> + || col_type == RELTIMEOID >>>> + || col_type == CASHOID >>>> + || col_type == BPCHAROID >>>> + || col_type == BYTEAOID >>>> + || col_type == VARCHAROID >>>> + || col_type == DATEOID >>>> + || col_type == TIMEOID >>>> + || col_type == TIMESTAMPOID >>>> + || col_type == TIMESTAMPTZOID >>>> + || col_type == INTERVALOID >>>> + || col_type == TIMETZOID >>>> + || col_type == NUMERICOID >>>> + ) >>>> return true; >>>> >>>> return false; >>>> @@ -409,13 +459,13 @@ GetRoundRobinNode(Oid relid) >>>> * The returned List is a copy, so it should be freed when finished. >>>> */ >>>> ExecNodes * >>>> -GetRelationNodes(RelationLocInfo *rel_loc_info, long *partValue, >>>> - RelationAccessType accessType) >>>> +GetRelationNodes(RelationLocInfo *rel_loc_info, Datum valueForDistCol, >>>> Oid typeOfValueForDistCol, RelationAccessType accessType) >>>> { >>>> ListCell *prefItem; >>>> ListCell *stepItem; >>>> ExecNodes *exec_nodes; >>>> - >>>> + long hashValue; >>>> + int nError; >>>> >>>> if (rel_loc_info == NULL) >>>> return NULL; >>>> @@ -480,10 +530,10 @@ GetRelationNodes(RelationLocInfo *rel_loc_info, >>>> long *partValue, >>>> break; >>>> >>>> case LOCATOR_TYPE_HASH: >>>> - >>>> - if (partValue != NULL) >>>> + hashValue = compute_hash(typeOfValueForDistCol, >>>> valueForDistCol, &nError); >>>> + if (nError == 0) >>>> /* in prototype, all partitioned tables >>>> use same map */ >>>> - exec_nodes->nodelist = lappend_int(NULL, >>>> get_node_from_hash(hash_range_int(*partValue))); >>>> + exec_nodes->nodelist = lappend_int(NULL, >>>> get_node_from_hash(hash_range_int(hashValue))); >>>> else >>>> if (accessType == RELATION_ACCESS_INSERT) >>>> /* Insert NULL to node 1 */ >>>> @@ -494,9 +544,10 @@ GetRelationNodes(RelationLocInfo *rel_loc_info, >>>> long *partValue, >>>> break; >>>> >>>> case LOCATOR_TYPE_MODULO: >>>> - if (partValue != NULL) >>>> + hashValue = compute_hash(typeOfValueForDistCol, >>>> valueForDistCol, &nError); >>>> + if (nError == 0) >>>> /* in prototype, all partitioned tables >>>> use same map */ >>>> - exec_nodes->nodelist = lappend_int(NULL, >>>> get_node_from_modulo(compute_modulo(*partValue))); >>>> + exec_nodes->nodelist = lappend_int(NULL, >>>> get_node_from_modulo(compute_modulo(hashValue))); >>>> else >>>> if (accessType == RELATION_ACCESS_INSERT) >>>> /* Insert NULL to node 1 */ >>>> @@ -750,7 +801,6 @@ RelationLocInfo * >>>> GetRelationLocInfo(Oid relid) >>>> { >>>> RelationLocInfo *ret_loc_info = NULL; >>>> - char *namespace; >>>> >>>> Relation rel = relation_open(relid, AccessShareLock); >>>> >>>> diff --git a/src/backend/pgxc/plan/planner.c >>>> b/src/backend/pgxc/plan/planner.c >>>> index 2448a74..4873f19 100644 >>>> --- a/src/backend/pgxc/plan/planner.c >>>> +++ b/src/backend/pgxc/plan/planner.c >>>> @@ -43,20 +43,23 @@ >>>> #include "utils/lsyscache.h" >>>> #include "utils/portal.h" >>>> #include "utils/syscache.h" >>>> - >>>> +#include "utils/numeric.h" >>>> +#include "access/hash.h" >>>> +#include "utils/timestamp.h" >>>> +#include "utils/date.h" >>>> >>>> /* >>>> * Convenient format for literal comparisons >>>> * >>>> - * PGXCTODO - make constant type Datum, handle other types >>>> */ >>>> typedef struct >>>> { >>>> - Oid relid; >>>> - RelationLocInfo *rel_loc_info; >>>> - Oid attrnum; >>>> - char *col_name; >>>> - long constant; /* assume long PGXCTODO >>>> - should be Datum */ >>>> + Oid relid; >>>> + RelationLocInfo *rel_loc_info; >>>> + Oid attrnum; >>>> + char *col_name; >>>> + Datum constValue; >>>> + Oid constType; >>>> } Literal_Comparison; >>>> >>>> /* >>>> @@ -471,15 +474,12 @@ get_base_var(Var *var, XCWalkerContext *context) >>>> static void >>>> get_plan_nodes_insert(PlannerInfo *root, RemoteQuery *step) >>>> { >>>> - Query *query = root->parse; >>>> - RangeTblEntry *rte; >>>> - RelationLocInfo *rel_loc_info; >>>> - Const *constant; >>>> - ListCell *lc; >>>> - long part_value; >>>> - long *part_value_ptr = NULL; >>>> - Expr *eval_expr = NULL; >>>> - >>>> + Query *query = root->parse; >>>> + RangeTblEntry *rte; >>>> + RelationLocInfo *rel_loc_info; >>>> + Const *constant; >>>> + ListCell *lc; >>>> + Expr *eval_expr = NULL; >>>> >>>> step->exec_nodes = NULL; >>>> >>>> @@ -568,7 +568,7 @@ get_plan_nodes_insert(PlannerInfo *root, RemoteQuery >>>> *step) >>>> if (!lc) >>>> { >>>> /* Skip rest, handle NULL */ >>>> - step->exec_nodes = >>>> GetRelationNodes(rel_loc_info, NULL, RELATION_ACCESS_INSERT); >>>> + step->exec_nodes = >>>> GetRelationNodes(rel_loc_info, 0, UNKNOWNOID, RELATION_ACCESS_INSERT); >>>> return; >>>> } >>>> >>>> @@ -650,21 +650,11 @@ get_plan_nodes_insert(PlannerInfo *root, >>>> RemoteQuery *step) >>>> } >>>> >>>> constant = (Const *) checkexpr; >>>> - >>>> - if (constant->consttype == INT4OID || >>>> - constant->consttype == INT2OID || >>>> - constant->consttype == INT8OID) >>>> - { >>>> - part_value = (long) >>>> constant->constvalue; >>>> - part_value_ptr = &part_value; >>>> - } >>>> - /* PGXCTODO - handle other data types */ >>>> } >>>> } >>>> >>>> /* single call handles both replicated and partitioned types */ >>>> - step->exec_nodes = GetRelationNodes(rel_loc_info, >>>> part_value_ptr, >>>> - >>>> RELATION_ACCESS_INSERT); >>>> + step->exec_nodes = GetRelationNodes(rel_loc_info, >>>> constant->constvalue, constant->consttype, RELATION_ACCESS_INSERT); >>>> >>>> if (eval_expr) >>>> pfree(eval_expr); >>>> @@ -1047,6 +1037,28 @@ examine_conditions_walker(Node *expr_node, >>>> XCWalkerContext *context) >>>> { >>>> Expr *arg1 = linitial(opexpr->args); >>>> Expr *arg2 = lsecond(opexpr->args); >>>> + RelabelType *rt; >>>> + Expr *targ; >>>> + >>>> + if (IsA(arg1, RelabelType)) >>>> + { >>>> + rt = arg1; >>>> + arg1 = rt->arg; >>>> + } >>>> + >>>> + if (IsA(arg2, RelabelType)) >>>> + { >>>> + rt = arg2; >>>> + arg2 = rt->arg; >>>> + } >>>> + >>>> + /* Handle constant = var */ >>>> + if (IsA(arg2, Var)) >>>> + { >>>> + targ = arg1; >>>> + arg1 = arg2; >>>> + arg2 = targ; >>>> + } >>>> >>>> /* Look for a table */ >>>> if (IsA(arg1, Var)) >>>> @@ -1134,7 +1146,8 @@ examine_conditions_walker(Node *expr_node, >>>> XCWalkerContext *context) >>>> lit_comp->relid = >>>> column_base->relid; >>>> lit_comp->rel_loc_info = >>>> rel_loc_info1; >>>> lit_comp->col_name = >>>> column_base->colname; >>>> - lit_comp->constant = >>>> constant->constvalue; >>>> + lit_comp->constValue = >>>> constant->constvalue; >>>> + lit_comp->constType = >>>> constant->consttype; >>>> >>>> >>>> context->conditions->partitioned_literal_comps = lappend( >>>> >>>> context->conditions->partitioned_literal_comps, >>>> @@ -1742,9 +1755,7 @@ get_plan_nodes_walker(Node *query_node, >>>> XCWalkerContext *context) >>>> if (rel_loc_info->locatorType != LOCATOR_TYPE_HASH && >>>> rel_loc_info->locatorType != LOCATOR_TYPE_MODULO) >>>> /* do not need to determine partitioning >>>> expression */ >>>> - context->query_step->exec_nodes = >>>> GetRelationNodes(rel_loc_info, >>>> - >>>> NULL, >>>> - >>>> context->accessType); >>>> + context->query_step->exec_nodes = >>>> GetRelationNodes(rel_loc_info, 0, UNKNOWNOID, context->accessType); >>>> >>>> /* Note replicated table usage for determining safe >>>> queries */ >>>> if (context->query_step->exec_nodes) >>>> @@ -1800,9 +1811,7 @@ get_plan_nodes_walker(Node *query_node, >>>> XCWalkerContext *context) >>>> { >>>> Literal_Comparison *lit_comp = >>>> (Literal_Comparison *) lfirst(lc); >>>> >>>> - test_exec_nodes = GetRelationNodes( >>>> - lit_comp->rel_loc_info, >>>> &(lit_comp->constant), >>>> - RELATION_ACCESS_READ); >>>> + test_exec_nodes = >>>> GetRelationNodes(lit_comp->rel_loc_info, lit_comp->constValue, >>>> lit_comp->constType, RELATION_ACCESS_READ); >>>> >>>> test_exec_nodes->tableusagetype = >>>> table_usage_type; >>>> if (context->query_step->exec_nodes == NULL) >>>> @@ -1828,9 +1837,7 @@ get_plan_nodes_walker(Node *query_node, >>>> XCWalkerContext *context) >>>> parent_child = (Parent_Child_Join *) >>>> >>>> linitial(context->conditions->partitioned_parent_child); >>>> >>>> - context->query_step->exec_nodes = >>>> GetRelationNodes(parent_child->rel_loc_info1, >>>> - >>>> NULL, >>>> - >>>> context->accessType); >>>> + context->query_step->exec_nodes = >>>> GetRelationNodes(parent_child->rel_loc_info1, 0, UNKNOWNOID, >>>> context->accessType); >>>> context->query_step->exec_nodes->tableusagetype = >>>> table_usage_type; >>>> } >>>> >>>> @@ -3378,8 +3385,6 @@ GetHashExecNodes(RelationLocInfo *rel_loc_info, >>>> ExecNodes **exec_nodes, const Ex >>>> Expr *checkexpr; >>>> Expr *eval_expr = NULL; >>>> Const *constant; >>>> - long part_value; >>>> - long *part_value_ptr = NULL; >>>> >>>> eval_expr = (Expr *) eval_const_expressions(NULL, (Node *)expr); >>>> checkexpr = get_numeric_constant(eval_expr); >>>> @@ -3389,17 +3394,8 @@ GetHashExecNodes(RelationLocInfo *rel_loc_info, >>>> ExecNodes **exec_nodes, const Ex >>>> >>>> constant = (Const *) checkexpr; >>>> >>>> - if (constant->consttype == INT4OID || >>>> - constant->consttype == INT2OID || >>>> - constant->consttype == INT8OID) >>>> - { >>>> - part_value = (long) constant->constvalue; >>>> - part_value_ptr = &part_value; >>>> - } >>>> - >>>> /* single call handles both replicated and partitioned types */ >>>> - *exec_nodes = GetRelationNodes(rel_loc_info, part_value_ptr, >>>> - >>>> RELATION_ACCESS_INSERT); >>>> + *exec_nodes = GetRelationNodes(rel_loc_info, >>>> constant->constvalue, constant->consttype, RELATION_ACCESS_INSERT); >>>> if (eval_expr) >>>> pfree(eval_expr); >>>> >>>> diff --git a/src/backend/pgxc/pool/execRemote.c >>>> b/src/backend/pgxc/pool/execRemote.c >>>> index 75aca21..76e3eef 100644 >>>> --- a/src/backend/pgxc/pool/execRemote.c >>>> +++ b/src/backend/pgxc/pool/execRemote.c >>>> @@ -1061,7 +1061,8 @@ BufferConnection(PGXCNodeHandle *conn) >>>> RemoteQueryState *combiner = conn->combiner; >>>> MemoryContext oldcontext; >>>> >>>> - Assert(conn->state == DN_CONNECTION_STATE_QUERY && combiner); >>>> + if (combiner == NULL || conn->state != >>>> DN_CONNECTION_STATE_QUERY) >>>> + return; >>>> >>>> /* >>>> * When BufferConnection is invoked CurrentContext is related to >>>> other >>>> @@ -3076,9 +3077,8 @@ get_exec_connections(RemoteQueryState *planstate, >>>> if (!isnull) >>>> { >>>> RelationLocInfo *rel_loc_info = >>>> GetRelationLocInfo(exec_nodes->relid); >>>> - ExecNodes *nodes = >>>> GetRelationNodes(rel_loc_info, >>>> - >>>> (long *) &partvalue, >>>> - >>>> exec_nodes->accesstype); >>>> + /* PGXCTODO what is the type of >>>> partvalue here*/ >>>> + ExecNodes *nodes = >>>> GetRelationNodes(rel_loc_info, partvalue, UNKNOWNOID, >>>> exec_nodes->accesstype); >>>> if (nodes) >>>> { >>>> nodelist = >>>> nodes->nodelist; >>>> diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c >>>> index 415fc47..6d7939b 100644 >>>> --- a/src/backend/tcop/postgres.c >>>> +++ b/src/backend/tcop/postgres.c >>>> @@ -670,18 +670,18 @@ pg_analyze_and_rewrite(Node *parsetree, const char >>>> *query_string, >>>> querytree_list = pg_rewrite_query(query); >>>> >>>> #ifdef PGXC >>>> - if (IS_PGXC_COORDINATOR && !IsConnFromCoord()) >>>> - { >>>> - ListCell *lc; >>>> - >>>> - foreach(lc, querytree_list) >>>> - { >>>> - Query *query = (Query *) lfirst(lc); >>>> - >>>> - if (query->sql_statement == NULL) >>>> - query->sql_statement = pstrdup(query_string); >>>> - } >>>> - } >>>> + if (IS_PGXC_COORDINATOR && !IsConnFromCoord()) >>>> + { >>>> + ListCell *lc; >>>> + >>>> + foreach(lc, querytree_list) >>>> + { >>>> + Query *query = (Query *) lfirst(lc); >>>> + >>>> + if (query->sql_statement == NULL) >>>> + query->sql_statement = >>>> pstrdup(query_string); >>>> + } >>>> + } >>>> #endif >>>> >>>> TRACE_POSTGRESQL_QUERY_REWRITE_DONE(query_string); >>>> @@ -1043,7 +1043,7 @@ exec_simple_query(const char *query_string) >>>> >>>> querytree_list = pg_analyze_and_rewrite(parsetree, >>>> query_string, >>>> >>>> NULL, 0); >>>> - >>>> + >>>> plantree_list = pg_plan_queries(querytree_list, 0, NULL); >>>> >>>> /* Done with the snapshot used for parsing/planning */ >>>> diff --git a/src/include/access/hash.h b/src/include/access/hash.h >>>> index d5899f4..4aaffaa 100644 >>>> --- a/src/include/access/hash.h >>>> +++ b/src/include/access/hash.h >>>> @@ -353,4 +353,8 @@ extern OffsetNumber _hash_binsearch_last(Page page, >>>> uint32 hash_value); >>>> extern void hash_redo(XLogRecPtr lsn, XLogRecord *record); >>>> extern void hash_desc(StringInfo buf, uint8 xl_info, char *rec); >>>> >>>> +#ifdef PGXC >>>> +extern Datum compute_hash(Oid type, Datum value, int *pErr); >>>> +#endif >>>> + >>>> #endif /* HASH_H */ >>>> diff --git a/src/include/pgxc/locator.h b/src/include/pgxc/locator.h >>>> index 9f669d9..9ee983c 100644 >>>> --- a/src/include/pgxc/locator.h >>>> +++ b/src/include/pgxc/locator.h >>>> @@ -100,8 +100,7 @@ extern char ConvertToLocatorType(int disttype); >>>> extern char *GetRelationHashColumn(RelationLocInfo *rel_loc_info); >>>> extern RelationLocInfo *GetRelationLocInfo(Oid relid); >>>> extern RelationLocInfo *CopyRelationLocInfo(RelationLocInfo *src_info); >>>> -extern ExecNodes *GetRelationNodes(RelationLocInfo *rel_loc_info, long >>>> *partValue, >>>> - RelationAccessType accessType); >>>> +extern ExecNodes *GetRelationNodes(RelationLocInfo *rel_loc_info, Datum >>>> valueForDistCol, Oid typeOfValueForDistCol, RelationAccessType accessType); >>>> extern bool IsHashColumn(RelationLocInfo *rel_loc_info, char >>>> *part_col_name); >>>> extern bool IsHashColumnForRelId(Oid relid, char *part_col_name); >>>> extern int GetRoundRobinNode(Oid relid); >>>> diff --git a/src/test/regress/expected/create_index_1.out >>>> b/src/test/regress/expected/create_index_1.out >>>> index 52fdc91..ab3807c 100644 >>>> --- a/src/test/regress/expected/create_index_1.out >>>> +++ b/src/test/regress/expected/create_index_1.out >>>> @@ -174,15 +174,10 @@ SELECT count(*) FROM point_tbl p WHERE p.f1 ~= >>>> '(-5, -12)'; >>>> SET enable_seqscan = OFF; >>>> SET enable_indexscan = ON; >>>> SET enable_bitmapscan = ON; >>>> -EXPLAIN (COSTS OFF) >>>> -SELECT * FROM fast_emp4000 >>>> - WHERE home_base @ '(200,200),(2000,1000)'::box >>>> - ORDER BY (home_base[0])[0]; >>>> - QUERY PLAN >>>> ----------------- >>>> - Data Node Scan >>>> >>>> -(1 row) >>>> - >>>> +--EXPLAIN (COSTS OFF) >>>> +--SELECT * FROM fast_emp4000 >>>> +-- WHERE home_base @ '(200,200),(2000,1000)'::box >>>> +-- ORDER BY (home_base[0])[0]; >>>> SELECT * FROM fast_emp4000 >>>> WHERE home_base @ '(200,200),(2000,1000)'::box >>>> ORDER BY (home_base[0])[0]; >>>> @@ -190,40 +185,25 @@ SELECT * FROM fast_emp4000 >>>> ----------- >>>> (0 rows) >>>> >>>> -EXPLAIN (COSTS OFF) >>>> -SELECT count(*) FROM fast_emp4000 WHERE home_base && >>>> '(1000,1000,0,0)'::box; >>>> - QUERY PLAN >>>> ----------------- >>>> - Data Node Scan >>>> >>>> -(1 row) >>>> - >>>> +--EXPLAIN (COSTS OFF) >>>> +--SELECT count(*) FROM fast_emp4000 WHERE home_base && >>>> '(1000,1000,0,0)'::box; >>>> SELECT count(*) FROM fast_emp4000 WHERE home_base && >>>> '(1000,1000,0,0)'::box; >>>> count >>>> ------- >>>> 1 >>>> (1 row) >>>> >>>> -EXPLAIN (COSTS OFF) >>>> -SELECT count(*) FROM fast_emp4000 WHERE home_base IS NULL; >>>> - QUERY PLAN >>>> ----------------- >>>> - Data Node Scan >>>> >>>> -(1 row) >>>> - >>>> +--EXPLAIN (COSTS OFF) >>>> +--SELECT count(*) FROM fast_emp4000 WHERE home_base IS NULL; >>>> SELECT count(*) FROM fast_emp4000 WHERE home_base IS NULL; >>>> count >>>> ------- >>>> 138 >>>> (1 row) >>>> >>>> -EXPLAIN (COSTS OFF) >>>> -SELECT * FROM polygon_tbl WHERE f1 ~ '((1,1),(2,2),(2,1))'::polygon >>>> - ORDER BY (poly_center(f1))[0]; >>>> - QUERY PLAN >>>> ----------------- >>>> - Data Node Scan >>>> >>>> -(1 row) >>>> - >>>> +--EXPLAIN (COSTS OFF) >>>> +--SELECT * FROM polygon_tbl WHERE f1 ~ '((1,1),(2,2),(2,1))'::polygon >>>> +-- ORDER BY (poly_center(f1))[0]; >>>> SELECT * FROM polygon_tbl WHERE f1 ~ '((1,1),(2,2),(2,1))'::polygon >>>> ORDER BY (poly_center(f1))[0]; >>>> id | f1 >>>> @@ -231,14 +211,9 @@ SELECT * FROM polygon_tbl WHERE f1 ~ >>>> '((1,1),(2,2),(2,1))'::polygon >>>> 1 | ((2,0),(2,4),(0,0)) >>>> (1 row) >>>> >>>> -EXPLAIN (COSTS OFF) >>>> -SELECT * FROM circle_tbl WHERE f1 && circle(point(1,-2), 1) >>>> - ORDER BY area(f1); >>>> - QUERY PLAN >>>> ----------------- >>>> - Data Node Scan >>>> >>>> -(1 row) >>>> - >>>> +--EXPLAIN (COSTS OFF) >>>> +--SELECT * FROM circle_tbl WHERE f1 && circle(point(1,-2), 1) >>>> +-- ORDER BY area(f1); >>>> SELECT * FROM circle_tbl WHERE f1 && circle(point(1,-2), 1) >>>> ORDER BY area(f1); >>>> f1 >>>> @@ -269,9 +244,9 @@ LINE 1: SELECT count(*) FROM gcircle_tbl WHERE f1 && >>>> '<(500,500),500... >>>> ^ >>>> EXPLAIN (COSTS OFF) >>>> SELECT count(*) FROM point_tbl WHERE f1 <@ box '(0,0,100,100)'; >>>> - QUERY PLAN >>>> ----------------- >>>> - Data Node Scan >>>> + QUERY PLAN >>>> >>>> +--------------------------------- >>>> + Data Node Scan (Node Count [1]) >>>> (1 row) >>>> >>>> SELECT count(*) FROM point_tbl WHERE f1 <@ box '(0,0,100,100)'; >>>> @@ -282,9 +257,9 @@ SELECT count(*) FROM point_tbl WHERE f1 <@ box >>>> '(0,0,100,100)'; >>>> >>>> EXPLAIN (COSTS OFF) >>>> SELECT count(*) FROM point_tbl WHERE box '(0,0,100,100)' @> f1; >>>> - QUERY PLAN >>>> ----------------- >>>> - Data Node Scan >>>> + QUERY PLAN >>>> >>>> +--------------------------------- >>>> + Data Node Scan (Node Count [1]) >>>> (1 row) >>>> >>>> SELECT count(*) FROM point_tbl WHERE box '(0,0,100,100)' @> f1; >>>> @@ -295,9 +270,9 @@ SELECT count(*) FROM point_tbl WHERE box >>>> '(0,0,100,100)' @> f1; >>>> >>>> EXPLAIN (COSTS OFF) >>>> SELECT count(*) FROM point_tbl WHERE f1 <@ polygon >>>> '(0,0),(0,100),(100,100),(50,50),(100,0),(0,0)'; >>>> - QUERY PLAN >>>> ----------------- >>>> - Data Node Scan >>>> + QUERY PLAN >>>> >>>> +--------------------------------- >>>> + Data Node Scan (Node Count [1]) >>>> (1 row) >>>> >>>> SELECT count(*) FROM point_tbl WHERE f1 <@ polygon >>>> '(0,0),(0,100),(100,100),(50,50),(100,0),(0,0)'; >>>> @@ -308,9 +283,9 @@ SELECT count(*) FROM point_tbl WHERE f1 <@ polygon >>>> '(0,0),(0,100),(100,100),(50, >>>> >>>> EXPLAIN (COSTS OFF) >>>> SELECT count(*) FROM point_tbl WHERE f1 <@ circle '<(50,50),50>'; >>>> - QUERY PLAN >>>> ----------------- >>>> - Data Node Scan >>>> + QUERY PLAN >>>> >>>> +--------------------------------- >>>> + Data Node Scan (Node Count [1]) >>>> (1 row) >>>> >>>> SELECT count(*) FROM point_tbl WHERE f1 <@ circle '<(50,50),50>'; >>>> @@ -321,9 +296,9 @@ SELECT count(*) FROM point_tbl WHERE f1 <@ circle >>>> '<(50,50),50>'; >>>> >>>> EXPLAIN (COSTS OFF) >>>> SELECT count(*) FROM point_tbl p WHERE p.f1 << '(0.0, 0.0)'; >>>> - QUERY PLAN >>>> ----------------- >>>> - Data Node Scan >>>> + QUERY PLAN >>>> >>>> +--------------------------------- >>>> + Data Node Scan (Node Count [1]) >>>> (1 row) >>>> >>>> SELECT count(*) FROM point_tbl p WHERE p.f1 << '(0.0, 0.0)'; >>>> @@ -334,9 +309,9 @@ SELECT count(*) FROM point_tbl p WHERE p.f1 << >>>> '(0.0, 0.0)'; >>>> >>>> EXPLAIN (COSTS OFF) >>>> SELECT count(*) FROM point_tbl p WHERE p.f1 >> '(0.0, 0.0)'; >>>> - QUERY PLAN >>>> ----------------- >>>> - Data Node Scan >>>> + QUERY PLAN >>>> >>>> +--------------------------------- >>>> + Data Node Scan (Node Count [1]) >>>> (1 row) >>>> >>>> SELECT count(*) FROM point_tbl p WHERE p.f1 >> '(0.0, 0.0)'; >>>> @@ -347,9 +322,9 @@ SELECT count(*) FROM point_tbl p WHERE p.f1 >> >>>> '(0.0, 0.0)'; >>>> >>>> EXPLAIN (COSTS OFF) >>>> SELECT count(*) FROM point_tbl p WHERE p.f1 <^ '(0.0, 0.0)'; >>>> - QUERY PLAN >>>> ----------------- >>>> - Data Node Scan >>>> + QUERY PLAN >>>> >>>> +--------------------------------- >>>> + Data Node Scan (Node Count [1]) >>>> (1 row) >>>> >>>> SELECT count(*) FROM point_tbl p WHERE p.f1 <^ '(0.0, 0.0)'; >>>> @@ -360,9 +335,9 @@ SELECT count(*) FROM point_tbl p WHERE p.f1 <^ >>>> '(0.0, 0.0)'; >>>> >>>> EXPLAIN (COSTS OFF) >>>> SELECT count(*) FROM point_tbl p WHERE p.f1 >^ '(0.0, 0.0)'; >>>> - QUERY PLAN >>>> ----------------- >>>> - Data Node Scan >>>> + QUERY PLAN >>>> >>>> +--------------------------------- >>>> + Data Node Scan (Node Count [1]) >>>> (1 row) >>>> >>>> SELECT count(*) FROM point_tbl p WHERE p.f1 >^ '(0.0, 0.0)'; >>>> @@ -373,9 +348,9 @@ SELECT count(*) FROM point_tbl p WHERE p.f1 >^ >>>> '(0.0, 0.0)'; >>>> >>>> EXPLAIN (COSTS OFF) >>>> SELECT count(*) FROM point_tbl p WHERE p.f1 ~= '(-5, -12)'; >>>> - QUERY PLAN >>>> ----------------- >>>> - Data Node Scan >>>> + QUERY PLAN >>>> >>>> +--------------------------------- >>>> + Data Node Scan (Node Count [1]) >>>> (1 row) >>>> >>>> SELECT count(*) FROM point_tbl p WHERE p.f1 ~= '(-5, -12)'; >>>> @@ -774,7 +749,7 @@ CREATE INDEX hash_f8_index ON hash_f8_heap USING >>>> hash (random float8_ops); >>>> -- >>>> CREATE TABLE func_index_heap (f1 text, f2 text); >>>> CREATE UNIQUE INDEX func_index_index on func_index_heap >>>> (textcat(f1,f2)); >>>> -ERROR: Cannot locally enforce a unique index on round robin >>>> distributed table. >>>> +ERROR: Unique index of partitioned table must contain the hash/modulo >>>> distribution column. >>>> INSERT INTO func_index_heap VALUES('ABC','DEF'); >>>> INSERT INTO func_index_heap VALUES('AB','CDEFG'); >>>> INSERT INTO func_index_heap VALUES('QWE','RTY'); >>>> @@ -788,7 +763,7 @@ INSERT INTO func_index_heap VALUES('QWERTY'); >>>> DROP TABLE func_index_heap; >>>> CREATE TABLE func_index_heap (f1 text, f2 text); >>>> CREATE UNIQUE INDEX func_index_index on func_index_heap ((f1 || f2) >>>> text_ops); >>>> -ERROR: Cannot locally enforce a unique index on round robin >>>> distributed table. >>>> +ERROR: Unique index of partitioned table must contain the hash/modulo >>>> distribution column. >>>> INSERT INTO func_index_heap VALUES('ABC','DEF'); >>>> INSERT INTO func_index_heap VALUES('AB','CDEFG'); >>>> INSERT INTO func_index_heap VALUES('QWE','RTY'); >>>> diff --git a/src/test/regress/expected/float4_1.out >>>> b/src/test/regress/expected/float4_1.out >>>> index 432d159..f50147d 100644 >>>> --- a/src/test/regress/expected/float4_1.out >>>> +++ b/src/test/regress/expected/float4_1.out >>>> @@ -125,16 +125,6 @@ SELECT 'nan'::numeric::float4; >>>> NaN >>>> (1 row) >>>> >>>> -SELECT '' AS five, * FROM FLOAT4_TBL; >>>> - five | f1 >>>> -------+------------- >>>> - | 1004.3 >>>> - | 1.23457e+20 >>>> - | 0 >>>> - | -34.84 >>>> - | 1.23457e-20 >>>> -(5 rows) >>>> - >>>> SELECT '' AS five, * FROM FLOAT4_TBL ORDER BY f1; >>>> five | f1 >>>> ------+------------- >>>> @@ -257,13 +247,14 @@ SELECT '' AS five, f.f1, @f.f1 AS abs_f1 FROM >>>> FLOAT4_TBL f ORDER BY f1; >>>> UPDATE FLOAT4_TBL >>>> SET f1 = FLOAT4_TBL.f1 * '-1' >>>> WHERE FLOAT4_TBL.f1 > '0.0'; >>>> +ERROR: Partition column can't be updated in current version >>>> SELECT '' AS five, * FROM FLOAT4_TBL ORDER BY f1; >>>> - five | f1 >>>> -------+-------------- >>>> - | -1.23457e+20 >>>> - | -1004.3 >>>> - | -34.84 >>>> - | -1.23457e-20 >>>> - | 0 >>>> + five | f1 >>>> +------+------------- >>>> + | -34.84 >>>> + | 0 >>>> + | 1.23457e-20 >>>> + | 1004.3 >>>> + | 1.23457e+20 >>>> (5 rows) >>>> >>>> diff --git a/src/test/regress/expected/float8_1.out >>>> b/src/test/regress/expected/float8_1.out >>>> index 65fe187..8ce7930 100644 >>>> --- a/src/test/regress/expected/float8_1.out >>>> +++ b/src/test/regress/expected/float8_1.out >>>> @@ -381,6 +381,7 @@ SELECT '' AS five, * FROM FLOAT8_TBL ORDER BY f1; >>>> UPDATE FLOAT8_TBL >>>> SET f1 = FLOAT8_TBL.f1 * '-1' >>>> WHERE FLOAT8_TBL.f1 > '0.0'; >>>> +ERROR: Partition column can't be updated in current version >>>> SELECT '' AS bad, f.f1 ^ '1e200' from FLOAT8_TBL f ORDER BY f1; >>>> ERROR: value out of range: overflow >>>> SELECT '' AS bad, f.f1 ^ '1e200' from FLOAT8_TBL f ORDER BY f1; >>>> @@ -396,17 +397,17 @@ ERROR: cannot take logarithm of zero >>>> SELECT '' AS bad, ln(f.f1) from FLOAT8_TBL f where f.f1 < '0.0'; >>>> ERROR: cannot take logarithm of a negative number >>>> SELECT '' AS bad, exp(f.f1) from FLOAT8_TBL f ORDER BY f1; >>>> -ERROR: value out of range: underflow >>>> +ERROR: value out of range: overflow >>>> SELECT '' AS bad, f.f1 / '0.0' from FLOAT8_TBL f; >>>> ERROR: division by zero >>>> SELECT '' AS five, * FROM FLOAT8_TBL ORDER BY f1; >>>> - five | f1 >>>> -------+----------------------- >>>> - | -1.2345678901234e+200 >>>> - | -1004.3 >>>> - | -34.84 >>>> - | -1.2345678901234e-200 >>>> - | 0 >>>> + five | f1 >>>> +------+---------------------- >>>> + | -34.84 >>>> + | 0 >>>> + | 1.2345678901234e-200 >>>> + | 1004.3 >>>> + | 1.2345678901234e+200 >>>> (5 rows) >>>> >>>> -- test for over- and underflow >>>> diff --git a/src/test/regress/expected/foreign_key_1.out >>>> b/src/test/regress/expected/foreign_key_1.out >>>> index 7eccdc6..3cb7d17 100644 >>>> --- a/src/test/regress/expected/foreign_key_1.out >>>> +++ b/src/test/regress/expected/foreign_key_1.out >>>> @@ -773,9 +773,9 @@ INSERT INTO FKTABLE VALUES(43); -- >>>> should fail >>>> ERROR: insert or update on table "fktable" violates foreign key >>>> constraint "fktable_ftest1_fkey" >>>> DETAIL: Key (ftest1)=(43) is not present in table "pktable". >>>> UPDATE FKTABLE SET ftest1 = ftest1; -- should succeed >>>> +ERROR: Partition column can't be updated in current version >>>> UPDATE FKTABLE SET ftest1 = ftest1 + 1; -- should fail >>>> -ERROR: insert or update on table "fktable" violates foreign key >>>> constraint "fktable_ftest1_fkey" >>>> -DETAIL: Key (ftest1)=(43) is not present in table "pktable". >>>> +ERROR: Partition column can't be updated in current version >>>> DROP TABLE FKTABLE; >>>> -- This should fail, because we'd have to cast numeric to int which is >>>> -- not an implicit coercion (or use numeric=numeric, but that's not >>>> part >>>> @@ -787,34 +787,22 @@ DROP TABLE PKTABLE; >>>> -- On the other hand, this should work because int implicitly promotes >>>> to >>>> -- numeric, and we allow promotion on the FK side >>>> CREATE TABLE PKTABLE (ptest1 numeric PRIMARY KEY); >>>> -ERROR: Column ptest1 is not a hash distributable data type >>>> +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index >>>> "pktable_pkey" for table "pktable" >>>> INSERT INTO PKTABLE VALUES(42); >>>> -ERROR: relation "pktable" does not exist >>>> -LINE 1: INSERT INTO PKTABLE VALUES(42); >>>> - ^ >>>> CREATE TABLE FKTABLE (ftest1 int REFERENCES pktable); >>>> -ERROR: relation "pktable" does not exist >>>> -- Check it actually works >>>> INSERT INTO FKTABLE VALUES(42); -- should succeed >>>> -ERROR: relation "fktable" does not exist >>>> -LINE 1: INSERT INTO FKTABLE VALUES(42); >>>> - ^ >>>> +ERROR: insert or update on table "fktable" violates foreign key >>>> constraint "fktable_ftest1_fkey" >>>> +DETAIL: Key (ftest1)=(42) is not present in table "pktable". >>>> INSERT INTO FKTABLE VALUES(43); -- should fail >>>> -ERROR: relation "fktable" does not exist >>>> -LINE 1: INSERT INTO FKTABLE VALUES(43); >>>> - ^ >>>> +ERROR: insert or update on table "fktable" violates foreign key >>>> constraint "fktable_ftest1_fkey" >>>> +DETAIL: Key (ftest1)=(43) is not present in table "pktable". >>>> UPDATE FKTABLE SET ftest1 = ftest1; -- should succeed >>>> -ERROR: relation "fktable" does not exist >>>> -LINE 1: UPDATE FKTABLE SET ftest1 = ftest1; >>>> - ^ >>>> +ERROR: Partition column can't be updated in current version >>>> UPDATE FKTABLE SET ftest1 = ftest1 + 1; -- should fail >>>> -ERROR: relation "fktable" does not exist >>>> -LINE 1: UPDATE FKTABLE SET ftest1 = ftest1 + 1; >>>> - ^ >>>> +ERROR: Partition column can't be updated in current version >>>> DROP TABLE FKTABLE; >>>> -ERROR: table "fktable" does not exist >>>> DROP TABLE PKTABLE; >>>> -ERROR: table "pktable" does not exist >>>> -- Two columns, two tables >>>> CREATE TABLE PKTABLE (ptest1 int, ptest2 inet, PRIMARY KEY(ptest1, >>>> ptest2)); >>>> NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index >>>> "pktable_pkey" for table "pktable" >>>> diff --git a/src/test/regress/expected/money_1.out >>>> b/src/test/regress/expected/money_1.out >>>> new file mode 100644 >>>> index 0000000..6a15792 >>>> --- /dev/null >>>> +++ b/src/test/regress/expected/money_1.out >>>> @@ -0,0 +1,186 @@ >>>> +-- >>>> +-- MONEY >>>> +-- >>>> +CREATE TABLE money_data (m money); >>>> +INSERT INTO money_data VALUES ('123'); >>>> +SELECT * FROM money_data; >>>> + m >>>> +--------- >>>> + $123.00 >>>> +(1 row) >>>> + >>>> +SELECT m + '123' FROM money_data; >>>> + ?column? >>>> +---------- >>>> + $246.00 >>>> +(1 row) >>>> + >>>> +SELECT m + '123.45' FROM money_data; >>>> + ?column? >>>> +---------- >>>> + $246.45 >>>> +(1 row) >>>> + >>>> +SELECT m - '123.45' FROM money_data; >>>> + ?column? >>>> +---------- >>>> + -$0.45 >>>> +(1 row) >>>> + >>>> +SELECT m * 2 FROM money_data; >>>> + ?column? >>>> +---------- >>>> + $246.00 >>>> +(1 row) >>>> + >>>> +SELECT m / 2 FROM money_data; >>>> + ?column? >>>> +---------- >>>> + $61.50 >>>> +(1 row) >>>> + >>>> +-- All true >>>> +SELECT m = '$123.00' FROM money_data; >>>> + ?column? >>>> +---------- >>>> + t >>>> +(1 row) >>>> + >>>> +SELECT m != '$124.00' FROM money_data; >>>> + ?column? >>>> +---------- >>>> + t >>>> +(1 row) >>>> + >>>> +SELECT m <= '$123.00' FROM money_data; >>>> + ?column? >>>> +---------- >>>> + t >>>> +(1 row) >>>> + >>>> +SELECT m >= '$123.00' FROM money_data; >>>> + ?column? >>>> +---------- >>>> + t >>>> +(1 row) >>>> + >>>> +SELECT m < '$124.00' FROM money_data; >>>> + ?column? >>>> +---------- >>>> + t >>>> +(1 row) >>>> + >>>> +SELECT m > '$122.00' FROM money_data; >>>> + ?column? >>>> +---------- >>>> + t >>>> +(1 row) >>>> + >>>> +-- All false >>>> +SELECT m = '$123.01' FROM money_data; >>>> + ?column? >>>> +---------- >>>> +(0 rows) >>>> + >>>> +SELECT m != '$123.00' FROM money_data; >>>> + ?column? >>>> +---------- >>>> + f >>>> +(1 row) >>>> + >>>> +SELECT m <= '$122.99' FROM money_data; >>>> + ?column? >>>> +---------- >>>> + f >>>> +(1 row) >>>> + >>>> +SELECT m >= '$123.01' FROM money_data; >>>> + ?column? >>>> +---------- >>>> + f >>>> +(1 row) >>>> + >>>> +SELECT m > '$124.00' FROM money_data; >>>> + ?column? >>>> +---------- >>>> + f >>>> +(1 row) >>>> + >>>> +SELECT m < '$122.00' FROM money_data; >>>> + ?column? >>>> +---------- >>>> + f >>>> +(1 row) >>>> + >>>> +SELECT cashlarger(m, '$124.00') FROM money_data; >>>> + cashlarger >>>> +------------ >>>> + $124.00 >>>> +(1 row) >>>> + >>>> +SELECT cashsmaller(m, '$124.00') FROM money_data; >>>> + cashsmaller >>>> +------------- >>>> + $123.00 >>>> +(1 row) >>>> + >>>> +SELECT cash_words(m) FROM money_data; >>>> + cash_words >>>> +------------------------------------------------- >>>> + One hundred twenty three dollars and zero cents >>>> +(1 row) >>>> + >>>> +SELECT cash_words(m + '1.23') FROM money_data; >>>> + cash_words >>>> +-------------------------------------------------------- >>>> + One hundred twenty four dollars and twenty three cents >>>> +(1 row) >>>> + >>>> +DELETE FROM money_data; >>>> +INSERT INTO money_data VALUES ('$123.45'); >>>> +SELECT * FROM money_data; >>>> + m >>>> +--------- >>>> + $123.45 >>>> +(1 row) >>>> + >>>> +DELETE FROM money_data; >>>> +INSERT INTO money_data VALUES ('$123.451'); >>>> +SELECT * FROM money_data; >>>> + m >>>> +--------- >>>> + $123.45 >>>> +(1 row) >>>> + >>>> +DELETE FROM money_data; >>>> +INSERT INTO money_data VALUES ('$123.454'); >>>> +SELECT * FROM money_data; >>>> + m >>>> +--------- >>>> + $123.45 >>>> +(1 row) >>>> + >>>> +DELETE FROM money_data; >>>> +INSERT INTO money_data VALUES ('$123.455'); >>>> +SELECT * FROM money_data; >>>> + m >>>> +--------- >>>> + $123.46 >>>> +(1 row) >>>> + >>>> +DELETE FROM money_data; >>>> +INSERT INTO money_data VALUES ('$123.456'); >>>> +SELECT * FROM money_data; >>>> + m >>>> +--------- >>>> + $123.46 >>>> +(1 row) >>>> + >>>> +DELETE FROM money_data; >>>> +INSERT INTO money_data VALUES ('$123.459'); >>>> +SELECT * FROM money_data; >>>> + m >>>> +--------- >>>> + $123.46 >>>> +(1 row) >>>> + >>>> diff --git a/src/test/regress/expected/prepared_xacts_2.out >>>> b/src/test/regress/expected/prepared_xacts_2.out >>>> index e456200..307ffad 100644 >>>> --- a/src/test/regress/expected/prepared_xacts_2.out >>>> +++ b/src/test/regress/expected/prepared_xacts_2.out >>>> @@ -6,7 +6,7 @@ >>>> -- isn't really needed ... stopping and starting the postmaster would >>>> -- be enough, but we can't even do that here. >>>> -- create a simple table that we'll use in the tests >>>> -CREATE TABLE pxtest1 (foobar VARCHAR(10)); >>>> +CREATE TABLE pxtest1 (foobar VARCHAR(10)) distribute by replication; >>>> INSERT INTO pxtest1 VALUES ('aaa'); >>>> -- Test PREPARE TRANSACTION >>>> BEGIN; >>>> diff --git a/src/test/regress/expected/reltime_1.out >>>> b/src/test/regress/expected/reltime_1.out >>>> new file mode 100644 >>>> index 0000000..83f61f9 >>>> --- /dev/null >>>> +++ b/src/test/regress/expected/reltime_1.out >>>> @@ -0,0 +1,109 @@ >>>> +-- >>>> +-- RELTIME >>>> +-- >>>> +CREATE TABLE RELTIME_TBL (f1 reltime); >>>> +INSERT INTO RELTIME_TBL (f1) VALUES ('@ 1 minute'); >>>> +INSERT INTO RELTIME_TBL (f1) VALUES ('@ 5 hour'); >>>> +INSERT INTO RELTIME_TBL (f1) VALUES ('@ 10 day'); >>>> +INSERT INTO RELTIME_TBL (f1) VALUES ('@ 34 year'); >>>> +INSERT INTO RELTIME_TBL (f1) VALUES ('@ 3 months'); >>>> +INSERT INTO RELTIME_TBL (f1) VALUES ('@ 14 seconds ago'); >>>> +-- badly formatted reltimes >>>> +INSERT INTO RELTIME_TBL (f1) VALUES ('badly formatted reltime'); >>>> +ERROR: invalid input syntax for type reltime: "badly formatted >>>> reltime" >>>> +LINE 1: INSERT INTO RELTIME_TBL (f1) VALUES ('badly formatted reltim... >>>> + ^ >>>> +INSERT INTO RELTIME_TBL (f1) VALUES ('@ 30 eons ago'); >>>> +ERROR: invalid input syntax for type reltime: "@ 30 eons ago" >>>> +LINE 1: INSERT INTO RELTIME_TBL (f1) VALUES ('@ 30 eons ago'); >>>> + ^ >>>> +-- test reltime operators >>>> +SELECT '' AS six, * FROM RELTIME_TBL ORDER BY f1; >>>> + six | f1 >>>> +-----+--------------- >>>> + | @ 14 secs ago >>>> + | @ 1 min >>>> + | @ 5 hours >>>> + | @ 10 days >>>> + | @ 3 mons >>>> + | @ 34 years >>>> +(6 rows) >>>> + >>>> +SELECT '' AS five, * FROM RELTIME_TBL >>>> + WHERE RELTIME_TBL.f1 <> reltime '@ 10 days' ORDER BY f1; >>>> + five | f1 >>>> +------+--------------- >>>> + | @ 14 secs ago >>>> + | @ 1 min >>>> + | @ 5 hours >>>> + | @ 3 mons >>>> + | @ 34 years >>>> +(5 rows) >>>> + >>>> +SELECT '' AS three, * FROM RELTIME_TBL >>>> + WHERE RELTIME_TBL.f1 <= reltime '@ 5 hours' ORDER BY f1; >>>> + three | f1 >>>> +-------+--------------- >>>> + | @ 14 secs ago >>>> + | @ 1 min >>>> + | @ 5 hours >>>> +(3 rows) >>>> + >>>> +SELECT '' AS three, * FROM RELTIME_TBL >>>> + WHERE RELTIME_TBL.f1 < reltime '@ 1 day' ORDER BY f1; >>>> + three | f1 >>>> +-------+--------------- >>>> + | @ 14 secs ago >>>> + | @ 1 min >>>> + | @ 5 hours >>>> +(3 rows) >>>> + >>>> +SELECT '' AS one, * FROM RELTIME_TBL >>>> + WHERE RELTIME_TBL.f1 = reltime '@ 34 years' ORDER BY f1; >>>> + one | f1 >>>> +-----+---------- >>>> + | 34 years >>>> +(1 row) >>>> + >>>> +SELECT '' AS two, * FROM RELTIME_TBL >>>> + WHERE RELTIME_TBL.f1 >= reltime '@ 1 month' ORDER BY f1; >>>> + two | f1 >>>> +-----+------------ >>>> + | @ 3 mons >>>> + | @ 34 years >>>> +(2 rows) >>>> + >>>> +SELECT '' AS five, * FROM RELTIME_TBL >>>> + WHERE RELTIME_TBL.f1 > reltime '@ 3 seconds ago' ORDER BY f1; >>>> + five | f1 >>>> +------+------------ >>>> + | @ 1 min >>>> + | @ 5 hours >>>> + | @ 10 days >>>> + | @ 3 mons >>>> + | @ 34 years >>>> +(5 rows) >>>> + >>>> +SELECT '' AS fifteen, r1.*, r2.* >>>> + FROM RELTIME_TBL r1, RELTIME_TBL r2 >>>> + WHERE r1.f1 > r2.f1 >>>> + ORDER BY r1.f1, r2.f1; >>>> + fifteen | f1 | f1 >>>> +---------+------------+--------------- >>>> + | @ 1 min | @ 14 secs ago >>>> + | @ 5 hours | @ 14 secs ago >>>> + | @ 5 hours | @ 1 min >>>> + | @ 10 days | @ 14 secs ago >>>> + | @ 10 days | @ 1 min >>>> + | @ 10 days | @ 5 hours >>>> + | @ 3 mons | @ 14 secs ago >>>> + | @ 3 mons | @ 1 min >>>>... [truncated message content] |
From: Andrei M. <and...@gm...> - 2011-05-25 10:28:00
|
2011/5/25 Abbas Butt <abb...@te...> > > > On Tue, May 24, 2011 at 10:18 PM, Andrei Martsinchyk < > and...@gm...> wrote: > >> Hi Abbas, >> >> I looked at the code and see that for some data types the compute_hash() >> returns not a hash code, but original value: >> >> + case INT8OID: >> >> + /* This gives added advantage that >> + * a = 8446744073709551359 >> + * and a = 8446744073709551359::int8 both work*/ >> + return DatumGetInt32(value); >> + case INT2OID: >> + return DatumGetInt16(value); >> + case OIDOID: >> + return DatumGetObjectId(value); >> + case INT4OID: >> + return DatumGetInt32(value); >> + case BOOLOID: >> + return DatumGetBool(value); >> >> That not a critical error and gives a bit better calculation speed but may >> cause poor distributions, when, for example, distribution column contains >> only even or only odd values. >> > > That would happen only it the user is choosing to use modulo distribution. > If the user knows his/her dist column is not uniformly distributed and wants > rows to be distributed uniformly, a better choice would be to use > hash distribution in which case hash will be computed. > > Ok, I have just noticed the value returned is hashed using existing algorithm. So for other data types value is hashed twice? > Some node may have many rows while other may not have rows at all. I >> suggest using hashintX functions here. >> And another point: Oid's are generated on data nodes, does it make sense >> to allow hashing here, where it is supposed the value is coming from >> coordinator? >> > > Oid can be used like this e.g. > > CREATE TABLE abc(a oid) distribute by modulo(a); > > Oid is the only unsigned integer data type available to the user and if > the user wants a table to have a column of type Oid and wants to distribute > by that column, then the provided option will be used. > > OK > >> >> 2011/5/24 Abbas Butt <ga...@us...> >> >>> Project "Postgres-XC". >>> >>> The branch, master has been updated >>> via 49b66c77343ae1e9921118e0c902b1528f1cc2ae (commit) >>> from 87a62879ab3492e3dd37d00478ffa857639e2b85 (commit) >>> >>> >>> - Log ----------------------------------------------------------------- >>> commit 49b66c77343ae1e9921118e0c902b1528f1cc2ae >>> Author: Abbas <abb...@en...> >>> Date: Tue May 24 17:06:30 2011 +0500 >>> >>> This patch adds support for the following data types to be used as >>> distribution key >>> >>> INT8, INT2, OID, INT4, BOOL, INT2VECTOR, OIDVECTOR >>> CHAR, NAME, TEXT, BPCHAR, BYTEA, VARCHAR >>> FLOAT4, FLOAT8, NUMERIC, CASH >>> ABSTIME, RELTIME, DATE, TIME, TIMESTAMP, TIMESTAMPTZ, INTERVAL, TIMETZ >>> >>> A new function compute_hash is added in the system which is used to >>> compute hash of a any of the supported data types. >>> The computed hash is used in the function GetRelationNodes to >>> find the targeted data node. >>> >>> EXPLAIN for RemoteQuery has been modified to show the number of >>> data nodes targeted for a certain query. This is essential >>> to spot bugs in the optimizer in case it is targeting all nodes >>> by mistake. >>> >>> In case of optimisations where comparison with a constant leads >>> the optimiser to point to a single data node, there were a couple >>> of mistakes in examine_conditions_walker. >>> First it was not supporting RelabelType, which represents a "dummy" >>> type coercion between two binary compatible datatypes. >>> This was resulting in the optimization not working for varchar >>> type for example. >>> Secondly it was not catering for the case where the user specifies the >>> condition such that the constant expression is written towards LHS and >>> the >>> variable towards the RHS of the = operator. >>> i.e. 23 = a >>> >>> A number of test cases have been added in regression to make sure >>> further enhancements do not break this functionality. >>> >>> This change has a sizeable impact on current regression tests in the >>> following manner. >>> >>> 1. horology test case crashes the server and has been commented out in >>> serial_schedule. >>> 2. In money test case the planner optimizer wrongly kicks in to >>> optimize this query >>> SELECT m = '$123.01' FROM money_data; >>> to point to a single data node. >>> 3. There were a few un-necessary EXPLAINs in create_index test case. >>> Since we have added support in EXPLAIN to show the number of >>> data nodes targeted for RemoteQuery, this test case was producing >>> output dependent on the cluster configuration. >>> 4. In guc test case >>> DROP ROLE temp_reset_user; >>> results in >>> ERROR: permission denied to drop role >>> >>> diff --git a/src/backend/access/hash/hashfunc.c >>> b/src/backend/access/hash/hashfunc.c >>> index 577873b..22766c5 100644 >>> --- a/src/backend/access/hash/hashfunc.c >>> +++ b/src/backend/access/hash/hashfunc.c >>> @@ -28,6 +28,13 @@ >>> >>> #include "access/hash.h" >>> >>> +#ifdef PGXC >>> +#include "catalog/pg_type.h" >>> +#include "utils/builtins.h" >>> +#include "utils/timestamp.h" >>> +#include "utils/date.h" >>> +#include "utils/nabstime.h" >>> +#endif >>> >>> /* Note: this is used for both "char" and boolean datatypes */ >>> Datum >>> @@ -521,3 +528,91 @@ hash_uint32(uint32 k) >>> /* report the result */ >>> return UInt32GetDatum(c); >>> } >>> + >>> +#ifdef PGXC >>> +/* >>> + * compute_hash() -- Generaic hash function for all datatypes >>> + * >>> + */ >>> + >>> +Datum >>> +compute_hash(Oid type, Datum value, int *pErr) >>> +{ >>> + Assert(pErr); >>> + >>> + *pErr = 0; >>> + >>> + if (value == NULL) >>> + { >>> + *pErr = 1; >>> + return 0; >>> + } >>> + >>> + switch(type) >>> + { >>> + case INT8OID: >>> + /* This gives added advantage that >>> + * a = 8446744073709551359 >>> + * and a = 8446744073709551359::int8 both work*/ >>> + return DatumGetInt32(value); >>> + case INT2OID: >>> + return DatumGetInt16(value); >>> + case OIDOID: >>> + return DatumGetObjectId(value); >>> + case INT4OID: >>> + return DatumGetInt32(value); >>> + case BOOLOID: >>> + return DatumGetBool(value); >>> + >>> + case CHAROID: >>> + return DirectFunctionCall1(hashchar, value); >>> + case NAMEOID: >>> + return DirectFunctionCall1(hashname, value); >>> + case INT2VECTOROID: >>> + return DirectFunctionCall1(hashint2vector, >>> value); >>> + >>> + case VARCHAROID: >>> + case TEXTOID: >>> + return DirectFunctionCall1(hashtext, value); >>> + >>> + case OIDVECTOROID: >>> + return DirectFunctionCall1(hashoidvector, value); >>> + case FLOAT4OID: >>> + return DirectFunctionCall1(hashfloat4, value); >>> + case FLOAT8OID: >>> + return DirectFunctionCall1(hashfloat8, value); >>> + >>> + case ABSTIMEOID: >>> + return DatumGetAbsoluteTime(value); >>> + case RELTIMEOID: >>> + return DatumGetRelativeTime(value); >>> + case CASHOID: >>> + return DirectFunctionCall1(hashint8, value); >>> + >>> + case BPCHAROID: >>> + return DirectFunctionCall1(hashbpchar, value); >>> + case BYTEAOID: >>> + return DirectFunctionCall1(hashvarlena, value); >>> + >>> + case DATEOID: >>> + return DatumGetDateADT(value); >>> + case TIMEOID: >>> + return DirectFunctionCall1(time_hash, value); >>> + case TIMESTAMPOID: >>> + return DirectFunctionCall1(timestamp_hash, >>> value); >>> + case TIMESTAMPTZOID: >>> + return DirectFunctionCall1(timestamp_hash, >>> value); >>> + case INTERVALOID: >>> + return DirectFunctionCall1(interval_hash, value); >>> + case TIMETZOID: >>> + return DirectFunctionCall1(timetz_hash, value); >>> + >>> + case NUMERICOID: >>> + return DirectFunctionCall1(hash_numeric, value); >>> + default: >>> + *pErr = 1; >>> + return 0; >>> + } >>> +} >>> + >>> +#endif >>> diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c >>> index 613d5ff..714190f 100644 >>> --- a/src/backend/commands/copy.c >>> +++ b/src/backend/commands/copy.c >>> @@ -1645,14 +1645,14 @@ CopyTo(CopyState cstate) >>> } >>> >>> #ifdef PGXC >>> - if (IS_PGXC_COORDINATOR && cstate->rel_loc) >>> + if (IS_PGXC_COORDINATOR && cstate->rel_loc) >>> { >>> cstate->processed = DataNodeCopyOut( >>> - GetRelationNodes(cstate->rel_loc, NULL, >>> RELATION_ACCESS_READ), >>> + GetRelationNodes(cstate->rel_loc, 0, >>> UNKNOWNOID, RELATION_ACCESS_READ), >>> cstate->connections, >>> cstate->copy_file); >>> } >>> - else >>> + else >>> { >>> #endif >>> >>> @@ -2417,15 +2417,18 @@ CopyFrom(CopyState cstate) >>> #ifdef PGXC >>> if (IS_PGXC_COORDINATOR && cstate->rel_loc) >>> { >>> - Datum *dist_col_value = NULL; >>> + Datum dist_col_value; >>> + Oid dist_col_type = UNKNOWNOID; >>> >>> if (cstate->idx_dist_by_col >= 0 && >>> !nulls[cstate->idx_dist_by_col]) >>> - dist_col_value = >>> &values[cstate->idx_dist_by_col]; >>> + { >>> + dist_col_value = >>> values[cstate->idx_dist_by_col]; >>> + dist_col_type = >>> attr[cstate->idx_dist_by_col]->atttypid; >>> + } >>> >>> if (DataNodeCopyIn(cstate->line_buf.data, >>> cstate->line_buf.len, >>> - >>> GetRelationNodes(cstate->rel_loc, (long *)dist_col_value, >>> - >>> RELATION_ACCESS_INSERT), >>> + >>> GetRelationNodes(cstate->rel_loc, dist_col_value, dist_col_type, >>> RELATION_ACCESS_INSERT), >>> cstate->connections)) >>> ereport(ERROR, >>> >>> (errcode(ERRCODE_CONNECTION_EXCEPTION), >>> @@ -4037,7 +4040,8 @@ DoInsertSelectCopy(EState *estate, TupleTableSlot >>> *slot) >>> HeapTuple tuple; >>> Datum *values; >>> bool *nulls; >>> - Datum *dist_col_value = NULL; >>> + Datum dist_col_value; >>> + Oid dist_col_type; >>> MemoryContext oldcontext; >>> CopyState cstate; >>> >>> @@ -4082,6 +4086,11 @@ DoInsertSelectCopy(EState *estate, TupleTableSlot >>> *slot) >>> cstate->fe_msgbuf = makeStringInfo(); >>> attr = cstate->tupDesc->attrs; >>> >>> + if (cstate->idx_dist_by_col >= 0) >>> + dist_col_type = >>> attr[cstate->idx_dist_by_col]->atttypid; >>> + else >>> + dist_col_type = UNKNOWNOID; >>> + >>> /* Get info about the columns we need to process. */ >>> cstate->out_functions = (FmgrInfo *) >>> palloc(cstate->tupDesc->natts * sizeof(FmgrInfo)); >>> foreach(lc, cstate->attnumlist) >>> @@ -4152,12 +4161,14 @@ DoInsertSelectCopy(EState *estate, TupleTableSlot >>> *slot) >>> >>> /* Get dist column, if any */ >>> if (cstate->idx_dist_by_col >= 0 && >>> !nulls[cstate->idx_dist_by_col]) >>> - dist_col_value = &values[cstate->idx_dist_by_col]; >>> + dist_col_value = values[cstate->idx_dist_by_col]; >>> + else >>> + dist_col_type = UNKNOWNOID; >>> >>> /* Send item to the appropriate data node(s) (buffer) */ >>> if (DataNodeCopyIn(cstate->fe_msgbuf->data, >>> cstate->fe_msgbuf->len, >>> - GetRelationNodes(cstate->rel_loc, >>> (long *)dist_col_value, RELATION_ACCESS_INSERT), >>> + GetRelationNodes(cstate->rel_loc, >>> dist_col_value, dist_col_type, RELATION_ACCESS_INSERT), >>> cstate->connections)) >>> ereport(ERROR, >>> (errcode(ERRCODE_CONNECTION_EXCEPTION), >>> diff --git a/src/backend/commands/explain.c >>> b/src/backend/commands/explain.c >>> index a361186..fe74569 100644 >>> --- a/src/backend/commands/explain.c >>> +++ b/src/backend/commands/explain.c >>> @@ -851,8 +851,28 @@ ExplainNode(Plan *plan, PlanState *planstate, >>> case T_WorkTableScan: >>> #ifdef PGXC >>> case T_RemoteQuery: >>> + { >>> + RemoteQuery *remote_query = (RemoteQuery >>> *) plan; >>> + int pnc, nc; >>> + >>> + pnc = 0; >>> + nc = 0; >>> + if (remote_query->exec_nodes != NULL) >>> + { >>> + if >>> (remote_query->exec_nodes->primarynodelist != NULL) >>> + { >>> + pnc = >>> list_length(remote_query->exec_nodes->primarynodelist); >>> + appendStringInfo(es->str, >>> " (Primary Node Count [%d])", pnc); >>> + } >>> + if >>> (remote_query->exec_nodes->nodelist) >>> + { >>> + nc = >>> list_length(remote_query->exec_nodes->nodelist); >>> + appendStringInfo(es->str, >>> " (Node Count [%d])", nc); >>> + } >>> + } >>> #endif >>> - ExplainScanTarget((Scan *) plan, es); >>> + ExplainScanTarget((Scan *) plan, es); >>> + } >>> break; >>> case T_BitmapIndexScan: >>> { >>> diff --git a/src/backend/optimizer/plan/createplan.c >>> b/src/backend/optimizer/plan/createplan.c >>> index b6252a3..c03938d 100644 >>> --- a/src/backend/optimizer/plan/createplan.c >>> +++ b/src/backend/optimizer/plan/createplan.c >>> @@ -2418,9 +2418,7 @@ create_remotequery_plan(PlannerInfo *root, Path >>> *best_path, >>> scan_plan->exec_nodes->baselocatortype = >>> rel_loc_info->locatorType; >>> else >>> scan_plan->exec_nodes->baselocatortype = '\0'; >>> - scan_plan->exec_nodes = GetRelationNodes(rel_loc_info, >>> - >>> NULL, >>> - >>> RELATION_ACCESS_READ); >>> + scan_plan->exec_nodes = GetRelationNodes(rel_loc_info, 0, >>> UNKNOWNOID, RELATION_ACCESS_READ); >>> copy_path_costsize(&scan_plan->scan.plan, best_path); >>> >>> /* PGXCTODO - get better estimates */ >>> @@ -5024,8 +5022,7 @@ create_remotedelete_plan(PlannerInfo *root, Plan >>> *topplan) >>> fstep->sql_statement = pstrdup(buf->data); >>> fstep->combine_type = COMBINE_TYPE_SAME; >>> fstep->read_only = false; >>> - fstep->exec_nodes = GetRelationNodes(rel_loc_info, NULL, >>> - >>> RELATION_ACCESS_UPDATE); >>> + fstep->exec_nodes = GetRelationNodes(rel_loc_info, 0, >>> UNKNOWNOID, RELATION_ACCESS_UPDATE); >>> } >>> else >>> { >>> diff --git a/src/backend/pgxc/locator/locator.c >>> b/src/backend/pgxc/locator/locator.c >>> index 0ab157d..33fe8ac 100644 >>> --- a/src/backend/pgxc/locator/locator.c >>> +++ b/src/backend/pgxc/locator/locator.c >>> @@ -41,7 +41,7 @@ >>> >>> #include "catalog/pgxc_class.h" >>> #include "catalog/namespace.h" >>> - >>> +#include "access/hash.h" >>> >>> /* >>> * PGXCTODO For prototype, relations use the same hash mapping table. >>> @@ -206,7 +206,32 @@ char *pColName; >>> bool >>> IsHashDistributable(Oid col_type) >>> { >>> - if (col_type == INT4OID || col_type == INT2OID) >>> + if(col_type == INT8OID >>> + || col_type == INT2OID >>> + || col_type == OIDOID >>> + || col_type == INT4OID >>> + || col_type == BOOLOID >>> + || col_type == CHAROID >>> + || col_type == NAMEOID >>> + || col_type == INT2VECTOROID >>> + || col_type == TEXTOID >>> + || col_type == OIDVECTOROID >>> + || col_type == FLOAT4OID >>> + || col_type == FLOAT8OID >>> + || col_type == ABSTIMEOID >>> + || col_type == RELTIMEOID >>> + || col_type == CASHOID >>> + || col_type == BPCHAROID >>> + || col_type == BYTEAOID >>> + || col_type == VARCHAROID >>> + || col_type == DATEOID >>> + || col_type == TIMEOID >>> + || col_type == TIMESTAMPOID >>> + || col_type == TIMESTAMPTZOID >>> + || col_type == INTERVALOID >>> + || col_type == TIMETZOID >>> + || col_type == NUMERICOID >>> + ) >>> return true; >>> >>> return false; >>> @@ -296,7 +321,32 @@ RelationLocInfo *rel_loc_info; >>> bool >>> IsModuloDistributable(Oid col_type) >>> { >>> - if (col_type == INT4OID || col_type == INT2OID) >>> + if(col_type == INT8OID >>> + || col_type == INT2OID >>> + || col_type == OIDOID >>> + || col_type == INT4OID >>> + || col_type == BOOLOID >>> + || col_type == CHAROID >>> + || col_type == NAMEOID >>> + || col_type == INT2VECTOROID >>> + || col_type == TEXTOID >>> + || col_type == OIDVECTOROID >>> + || col_type == FLOAT4OID >>> + || col_type == FLOAT8OID >>> + || col_type == ABSTIMEOID >>> + || col_type == RELTIMEOID >>> + || col_type == CASHOID >>> + || col_type == BPCHAROID >>> + || col_type == BYTEAOID >>> + || col_type == VARCHAROID >>> + || col_type == DATEOID >>> + || col_type == TIMEOID >>> + || col_type == TIMESTAMPOID >>> + || col_type == TIMESTAMPTZOID >>> + || col_type == INTERVALOID >>> + || col_type == TIMETZOID >>> + || col_type == NUMERICOID >>> + ) >>> return true; >>> >>> return false; >>> @@ -409,13 +459,13 @@ GetRoundRobinNode(Oid relid) >>> * The returned List is a copy, so it should be freed when finished. >>> */ >>> ExecNodes * >>> -GetRelationNodes(RelationLocInfo *rel_loc_info, long *partValue, >>> - RelationAccessType accessType) >>> +GetRelationNodes(RelationLocInfo *rel_loc_info, Datum valueForDistCol, >>> Oid typeOfValueForDistCol, RelationAccessType accessType) >>> { >>> ListCell *prefItem; >>> ListCell *stepItem; >>> ExecNodes *exec_nodes; >>> - >>> + long hashValue; >>> + int nError; >>> >>> if (rel_loc_info == NULL) >>> return NULL; >>> @@ -480,10 +530,10 @@ GetRelationNodes(RelationLocInfo *rel_loc_info, >>> long *partValue, >>> break; >>> >>> case LOCATOR_TYPE_HASH: >>> - >>> - if (partValue != NULL) >>> + hashValue = compute_hash(typeOfValueForDistCol, >>> valueForDistCol, &nError); >>> + if (nError == 0) >>> /* in prototype, all partitioned tables >>> use same map */ >>> - exec_nodes->nodelist = lappend_int(NULL, >>> get_node_from_hash(hash_range_int(*partValue))); >>> + exec_nodes->nodelist = lappend_int(NULL, >>> get_node_from_hash(hash_range_int(hashValue))); >>> else >>> if (accessType == RELATION_ACCESS_INSERT) >>> /* Insert NULL to node 1 */ >>> @@ -494,9 +544,10 @@ GetRelationNodes(RelationLocInfo *rel_loc_info, long >>> *partValue, >>> break; >>> >>> case LOCATOR_TYPE_MODULO: >>> - if (partValue != NULL) >>> + hashValue = compute_hash(typeOfValueForDistCol, >>> valueForDistCol, &nError); >>> + if (nError == 0) >>> /* in prototype, all partitioned tables >>> use same map */ >>> - exec_nodes->nodelist = lappend_int(NULL, >>> get_node_from_modulo(compute_modulo(*partValue))); >>> + exec_nodes->nodelist = lappend_int(NULL, >>> get_node_from_modulo(compute_modulo(hashValue))); >>> else >>> if (accessType == RELATION_ACCESS_INSERT) >>> /* Insert NULL to node 1 */ >>> @@ -750,7 +801,6 @@ RelationLocInfo * >>> GetRelationLocInfo(Oid relid) >>> { >>> RelationLocInfo *ret_loc_info = NULL; >>> - char *namespace; >>> >>> Relation rel = relation_open(relid, AccessShareLock); >>> >>> diff --git a/src/backend/pgxc/plan/planner.c >>> b/src/backend/pgxc/plan/planner.c >>> index 2448a74..4873f19 100644 >>> --- a/src/backend/pgxc/plan/planner.c >>> +++ b/src/backend/pgxc/plan/planner.c >>> @@ -43,20 +43,23 @@ >>> #include "utils/lsyscache.h" >>> #include "utils/portal.h" >>> #include "utils/syscache.h" >>> - >>> +#include "utils/numeric.h" >>> +#include "access/hash.h" >>> +#include "utils/timestamp.h" >>> +#include "utils/date.h" >>> >>> /* >>> * Convenient format for literal comparisons >>> * >>> - * PGXCTODO - make constant type Datum, handle other types >>> */ >>> typedef struct >>> { >>> - Oid relid; >>> - RelationLocInfo *rel_loc_info; >>> - Oid attrnum; >>> - char *col_name; >>> - long constant; /* assume long PGXCTODO - >>> should be Datum */ >>> + Oid relid; >>> + RelationLocInfo *rel_loc_info; >>> + Oid attrnum; >>> + char *col_name; >>> + Datum constValue; >>> + Oid constType; >>> } Literal_Comparison; >>> >>> /* >>> @@ -471,15 +474,12 @@ get_base_var(Var *var, XCWalkerContext *context) >>> static void >>> get_plan_nodes_insert(PlannerInfo *root, RemoteQuery *step) >>> { >>> - Query *query = root->parse; >>> - RangeTblEntry *rte; >>> - RelationLocInfo *rel_loc_info; >>> - Const *constant; >>> - ListCell *lc; >>> - long part_value; >>> - long *part_value_ptr = NULL; >>> - Expr *eval_expr = NULL; >>> - >>> + Query *query = root->parse; >>> + RangeTblEntry *rte; >>> + RelationLocInfo *rel_loc_info; >>> + Const *constant; >>> + ListCell *lc; >>> + Expr *eval_expr = NULL; >>> >>> step->exec_nodes = NULL; >>> >>> @@ -568,7 +568,7 @@ get_plan_nodes_insert(PlannerInfo *root, RemoteQuery >>> *step) >>> if (!lc) >>> { >>> /* Skip rest, handle NULL */ >>> - step->exec_nodes = GetRelationNodes(rel_loc_info, >>> NULL, RELATION_ACCESS_INSERT); >>> + step->exec_nodes = GetRelationNodes(rel_loc_info, >>> 0, UNKNOWNOID, RELATION_ACCESS_INSERT); >>> return; >>> } >>> >>> @@ -650,21 +650,11 @@ get_plan_nodes_insert(PlannerInfo *root, >>> RemoteQuery *step) >>> } >>> >>> constant = (Const *) checkexpr; >>> - >>> - if (constant->consttype == INT4OID || >>> - constant->consttype == INT2OID || >>> - constant->consttype == INT8OID) >>> - { >>> - part_value = (long) constant->constvalue; >>> - part_value_ptr = &part_value; >>> - } >>> - /* PGXCTODO - handle other data types */ >>> } >>> } >>> >>> /* single call handles both replicated and partitioned types */ >>> - step->exec_nodes = GetRelationNodes(rel_loc_info, part_value_ptr, >>> - >>> RELATION_ACCESS_INSERT); >>> + step->exec_nodes = GetRelationNodes(rel_loc_info, >>> constant->constvalue, constant->consttype, RELATION_ACCESS_INSERT); >>> >>> if (eval_expr) >>> pfree(eval_expr); >>> @@ -1047,6 +1037,28 @@ examine_conditions_walker(Node *expr_node, >>> XCWalkerContext *context) >>> { >>> Expr *arg1 = linitial(opexpr->args); >>> Expr *arg2 = lsecond(opexpr->args); >>> + RelabelType *rt; >>> + Expr *targ; >>> + >>> + if (IsA(arg1, RelabelType)) >>> + { >>> + rt = arg1; >>> + arg1 = rt->arg; >>> + } >>> + >>> + if (IsA(arg2, RelabelType)) >>> + { >>> + rt = arg2; >>> + arg2 = rt->arg; >>> + } >>> + >>> + /* Handle constant = var */ >>> + if (IsA(arg2, Var)) >>> + { >>> + targ = arg1; >>> + arg1 = arg2; >>> + arg2 = targ; >>> + } >>> >>> /* Look for a table */ >>> if (IsA(arg1, Var)) >>> @@ -1134,7 +1146,8 @@ examine_conditions_walker(Node *expr_node, >>> XCWalkerContext *context) >>> lit_comp->relid = >>> column_base->relid; >>> lit_comp->rel_loc_info = >>> rel_loc_info1; >>> lit_comp->col_name = >>> column_base->colname; >>> - lit_comp->constant = >>> constant->constvalue; >>> + lit_comp->constValue = >>> constant->constvalue; >>> + lit_comp->constType = >>> constant->consttype; >>> >>> >>> context->conditions->partitioned_literal_comps = lappend( >>> >>> context->conditions->partitioned_literal_comps, >>> @@ -1742,9 +1755,7 @@ get_plan_nodes_walker(Node *query_node, >>> XCWalkerContext *context) >>> if (rel_loc_info->locatorType != LOCATOR_TYPE_HASH && >>> rel_loc_info->locatorType != LOCATOR_TYPE_MODULO) >>> /* do not need to determine partitioning >>> expression */ >>> - context->query_step->exec_nodes = >>> GetRelationNodes(rel_loc_info, >>> - >>> NULL, >>> - >>> context->accessType); >>> + context->query_step->exec_nodes = >>> GetRelationNodes(rel_loc_info, 0, UNKNOWNOID, context->accessType); >>> >>> /* Note replicated table usage for determining safe >>> queries */ >>> if (context->query_step->exec_nodes) >>> @@ -1800,9 +1811,7 @@ get_plan_nodes_walker(Node *query_node, >>> XCWalkerContext *context) >>> { >>> Literal_Comparison *lit_comp = (Literal_Comparison >>> *) lfirst(lc); >>> >>> - test_exec_nodes = GetRelationNodes( >>> - lit_comp->rel_loc_info, >>> &(lit_comp->constant), >>> - RELATION_ACCESS_READ); >>> + test_exec_nodes = >>> GetRelationNodes(lit_comp->rel_loc_info, lit_comp->constValue, >>> lit_comp->constType, RELATION_ACCESS_READ); >>> >>> test_exec_nodes->tableusagetype = >>> table_usage_type; >>> if (context->query_step->exec_nodes == NULL) >>> @@ -1828,9 +1837,7 @@ get_plan_nodes_walker(Node *query_node, >>> XCWalkerContext *context) >>> parent_child = (Parent_Child_Join *) >>> >>> linitial(context->conditions->partitioned_parent_child); >>> >>> - context->query_step->exec_nodes = >>> GetRelationNodes(parent_child->rel_loc_info1, >>> - >>> NULL, >>> - >>> context->accessType); >>> + context->query_step->exec_nodes = >>> GetRelationNodes(parent_child->rel_loc_info1, 0, UNKNOWNOID, >>> context->accessType); >>> context->query_step->exec_nodes->tableusagetype = >>> table_usage_type; >>> } >>> >>> @@ -3378,8 +3385,6 @@ GetHashExecNodes(RelationLocInfo *rel_loc_info, >>> ExecNodes **exec_nodes, const Ex >>> Expr *checkexpr; >>> Expr *eval_expr = NULL; >>> Const *constant; >>> - long part_value; >>> - long *part_value_ptr = NULL; >>> >>> eval_expr = (Expr *) eval_const_expressions(NULL, (Node *)expr); >>> checkexpr = get_numeric_constant(eval_expr); >>> @@ -3389,17 +3394,8 @@ GetHashExecNodes(RelationLocInfo *rel_loc_info, >>> ExecNodes **exec_nodes, const Ex >>> >>> constant = (Const *) checkexpr; >>> >>> - if (constant->consttype == INT4OID || >>> - constant->consttype == INT2OID || >>> - constant->consttype == INT8OID) >>> - { >>> - part_value = (long) constant->constvalue; >>> - part_value_ptr = &part_value; >>> - } >>> - >>> /* single call handles both replicated and partitioned types */ >>> - *exec_nodes = GetRelationNodes(rel_loc_info, part_value_ptr, >>> - >>> RELATION_ACCESS_INSERT); >>> + *exec_nodes = GetRelationNodes(rel_loc_info, >>> constant->constvalue, constant->consttype, RELATION_ACCESS_INSERT); >>> if (eval_expr) >>> pfree(eval_expr); >>> >>> diff --git a/src/backend/pgxc/pool/execRemote.c >>> b/src/backend/pgxc/pool/execRemote.c >>> index 75aca21..76e3eef 100644 >>> --- a/src/backend/pgxc/pool/execRemote.c >>> +++ b/src/backend/pgxc/pool/execRemote.c >>> @@ -1061,7 +1061,8 @@ BufferConnection(PGXCNodeHandle *conn) >>> RemoteQueryState *combiner = conn->combiner; >>> MemoryContext oldcontext; >>> >>> - Assert(conn->state == DN_CONNECTION_STATE_QUERY && combiner); >>> + if (combiner == NULL || conn->state != DN_CONNECTION_STATE_QUERY) >>> + return; >>> >>> /* >>> * When BufferConnection is invoked CurrentContext is related to >>> other >>> @@ -3076,9 +3077,8 @@ get_exec_connections(RemoteQueryState *planstate, >>> if (!isnull) >>> { >>> RelationLocInfo *rel_loc_info = >>> GetRelationLocInfo(exec_nodes->relid); >>> - ExecNodes *nodes = >>> GetRelationNodes(rel_loc_info, >>> - >>> (long *) &partvalue, >>> - >>> exec_nodes->accesstype); >>> + /* PGXCTODO what is the type of >>> partvalue here*/ >>> + ExecNodes *nodes = >>> GetRelationNodes(rel_loc_info, partvalue, UNKNOWNOID, >>> exec_nodes->accesstype); >>> if (nodes) >>> { >>> nodelist = >>> nodes->nodelist; >>> diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c >>> index 415fc47..6d7939b 100644 >>> --- a/src/backend/tcop/postgres.c >>> +++ b/src/backend/tcop/postgres.c >>> @@ -670,18 +670,18 @@ pg_analyze_and_rewrite(Node *parsetree, const char >>> *query_string, >>> querytree_list = pg_rewrite_query(query); >>> >>> #ifdef PGXC >>> - if (IS_PGXC_COORDINATOR && !IsConnFromCoord()) >>> - { >>> - ListCell *lc; >>> - >>> - foreach(lc, querytree_list) >>> - { >>> - Query *query = (Query *) lfirst(lc); >>> - >>> - if (query->sql_statement == NULL) >>> - query->sql_statement = pstrdup(query_string); >>> - } >>> - } >>> + if (IS_PGXC_COORDINATOR && !IsConnFromCoord()) >>> + { >>> + ListCell *lc; >>> + >>> + foreach(lc, querytree_list) >>> + { >>> + Query *query = (Query *) lfirst(lc); >>> + >>> + if (query->sql_statement == NULL) >>> + query->sql_statement = >>> pstrdup(query_string); >>> + } >>> + } >>> #endif >>> >>> TRACE_POSTGRESQL_QUERY_REWRITE_DONE(query_string); >>> @@ -1043,7 +1043,7 @@ exec_simple_query(const char *query_string) >>> >>> querytree_list = pg_analyze_and_rewrite(parsetree, >>> query_string, >>> >>> NULL, 0); >>> - >>> + >>> plantree_list = pg_plan_queries(querytree_list, 0, NULL); >>> >>> /* Done with the snapshot used for parsing/planning */ >>> diff --git a/src/include/access/hash.h b/src/include/access/hash.h >>> index d5899f4..4aaffaa 100644 >>> --- a/src/include/access/hash.h >>> +++ b/src/include/access/hash.h >>> @@ -353,4 +353,8 @@ extern OffsetNumber _hash_binsearch_last(Page page, >>> uint32 hash_value); >>> extern void hash_redo(XLogRecPtr lsn, XLogRecord *record); >>> extern void hash_desc(StringInfo buf, uint8 xl_info, char *rec); >>> >>> +#ifdef PGXC >>> +extern Datum compute_hash(Oid type, Datum value, int *pErr); >>> +#endif >>> + >>> #endif /* HASH_H */ >>> diff --git a/src/include/pgxc/locator.h b/src/include/pgxc/locator.h >>> index 9f669d9..9ee983c 100644 >>> --- a/src/include/pgxc/locator.h >>> +++ b/src/include/pgxc/locator.h >>> @@ -100,8 +100,7 @@ extern char ConvertToLocatorType(int disttype); >>> extern char *GetRelationHashColumn(RelationLocInfo *rel_loc_info); >>> extern RelationLocInfo *GetRelationLocInfo(Oid relid); >>> extern RelationLocInfo *CopyRelationLocInfo(RelationLocInfo *src_info); >>> -extern ExecNodes *GetRelationNodes(RelationLocInfo *rel_loc_info, long >>> *partValue, >>> - RelationAccessType accessType); >>> +extern ExecNodes *GetRelationNodes(RelationLocInfo *rel_loc_info, Datum >>> valueForDistCol, Oid typeOfValueForDistCol, RelationAccessType accessType); >>> extern bool IsHashColumn(RelationLocInfo *rel_loc_info, char >>> *part_col_name); >>> extern bool IsHashColumnForRelId(Oid relid, char *part_col_name); >>> extern int GetRoundRobinNode(Oid relid); >>> diff --git a/src/test/regress/expected/create_index_1.out >>> b/src/test/regress/expected/create_index_1.out >>> index 52fdc91..ab3807c 100644 >>> --- a/src/test/regress/expected/create_index_1.out >>> +++ b/src/test/regress/expected/create_index_1.out >>> @@ -174,15 +174,10 @@ SELECT count(*) FROM point_tbl p WHERE p.f1 ~= >>> '(-5, -12)'; >>> SET enable_seqscan = OFF; >>> SET enable_indexscan = ON; >>> SET enable_bitmapscan = ON; >>> -EXPLAIN (COSTS OFF) >>> -SELECT * FROM fast_emp4000 >>> - WHERE home_base @ '(200,200),(2000,1000)'::box >>> - ORDER BY (home_base[0])[0]; >>> - QUERY PLAN >>> ----------------- >>> - Data Node Scan >>> >>> -(1 row) >>> - >>> +--EXPLAIN (COSTS OFF) >>> +--SELECT * FROM fast_emp4000 >>> +-- WHERE home_base @ '(200,200),(2000,1000)'::box >>> +-- ORDER BY (home_base[0])[0]; >>> SELECT * FROM fast_emp4000 >>> WHERE home_base @ '(200,200),(2000,1000)'::box >>> ORDER BY (home_base[0])[0]; >>> @@ -190,40 +185,25 @@ SELECT * FROM fast_emp4000 >>> ----------- >>> (0 rows) >>> >>> -EXPLAIN (COSTS OFF) >>> -SELECT count(*) FROM fast_emp4000 WHERE home_base && >>> '(1000,1000,0,0)'::box; >>> - QUERY PLAN >>> ----------------- >>> - Data Node Scan >>> >>> -(1 row) >>> - >>> +--EXPLAIN (COSTS OFF) >>> +--SELECT count(*) FROM fast_emp4000 WHERE home_base && >>> '(1000,1000,0,0)'::box; >>> SELECT count(*) FROM fast_emp4000 WHERE home_base && >>> '(1000,1000,0,0)'::box; >>> count >>> ------- >>> 1 >>> (1 row) >>> >>> -EXPLAIN (COSTS OFF) >>> -SELECT count(*) FROM fast_emp4000 WHERE home_base IS NULL; >>> - QUERY PLAN >>> ----------------- >>> - Data Node Scan >>> >>> -(1 row) >>> - >>> +--EXPLAIN (COSTS OFF) >>> +--SELECT count(*) FROM fast_emp4000 WHERE home_base IS NULL; >>> SELECT count(*) FROM fast_emp4000 WHERE home_base IS NULL; >>> count >>> ------- >>> 138 >>> (1 row) >>> >>> -EXPLAIN (COSTS OFF) >>> -SELECT * FROM polygon_tbl WHERE f1 ~ '((1,1),(2,2),(2,1))'::polygon >>> - ORDER BY (poly_center(f1))[0]; >>> - QUERY PLAN >>> ----------------- >>> - Data Node Scan >>> >>> -(1 row) >>> - >>> +--EXPLAIN (COSTS OFF) >>> +--SELECT * FROM polygon_tbl WHERE f1 ~ '((1,1),(2,2),(2,1))'::polygon >>> +-- ORDER BY (poly_center(f1))[0]; >>> SELECT * FROM polygon_tbl WHERE f1 ~ '((1,1),(2,2),(2,1))'::polygon >>> ORDER BY (poly_center(f1))[0]; >>> id | f1 >>> @@ -231,14 +211,9 @@ SELECT * FROM polygon_tbl WHERE f1 ~ >>> '((1,1),(2,2),(2,1))'::polygon >>> 1 | ((2,0),(2,4),(0,0)) >>> (1 row) >>> >>> -EXPLAIN (COSTS OFF) >>> -SELECT * FROM circle_tbl WHERE f1 && circle(point(1,-2), 1) >>> - ORDER BY area(f1); >>> - QUERY PLAN >>> ----------------- >>> - Data Node Scan >>> >>> -(1 row) >>> - >>> +--EXPLAIN (COSTS OFF) >>> +--SELECT * FROM circle_tbl WHERE f1 && circle(point(1,-2), 1) >>> +-- ORDER BY area(f1); >>> SELECT * FROM circle_tbl WHERE f1 && circle(point(1,-2), 1) >>> ORDER BY area(f1); >>> f1 >>> @@ -269,9 +244,9 @@ LINE 1: SELECT count(*) FROM gcircle_tbl WHERE f1 && >>> '<(500,500),500... >>> ^ >>> EXPLAIN (COSTS OFF) >>> SELECT count(*) FROM point_tbl WHERE f1 <@ box '(0,0,100,100)'; >>> - QUERY PLAN >>> ----------------- >>> - Data Node Scan >>> + QUERY PLAN >>> >>> +--------------------------------- >>> + Data Node Scan (Node Count [1]) >>> (1 row) >>> >>> SELECT count(*) FROM point_tbl WHERE f1 <@ box '(0,0,100,100)'; >>> @@ -282,9 +257,9 @@ SELECT count(*) FROM point_tbl WHERE f1 <@ box >>> '(0,0,100,100)'; >>> >>> EXPLAIN (COSTS OFF) >>> SELECT count(*) FROM point_tbl WHERE box '(0,0,100,100)' @> f1; >>> - QUERY PLAN >>> ----------------- >>> - Data Node Scan >>> + QUERY PLAN >>> >>> +--------------------------------- >>> + Data Node Scan (Node Count [1]) >>> (1 row) >>> >>> SELECT count(*) FROM point_tbl WHERE box '(0,0,100,100)' @> f1; >>> @@ -295,9 +270,9 @@ SELECT count(*) FROM point_tbl WHERE box >>> '(0,0,100,100)' @> f1; >>> >>> EXPLAIN (COSTS OFF) >>> SELECT count(*) FROM point_tbl WHERE f1 <@ polygon >>> '(0,0),(0,100),(100,100),(50,50),(100,0),(0,0)'; >>> - QUERY PLAN >>> ----------------- >>> - Data Node Scan >>> + QUERY PLAN >>> >>> +--------------------------------- >>> + Data Node Scan (Node Count [1]) >>> (1 row) >>> >>> SELECT count(*) FROM point_tbl WHERE f1 <@ polygon >>> '(0,0),(0,100),(100,100),(50,50),(100,0),(0,0)'; >>> @@ -308,9 +283,9 @@ SELECT count(*) FROM point_tbl WHERE f1 <@ polygon >>> '(0,0),(0,100),(100,100),(50, >>> >>> EXPLAIN (COSTS OFF) >>> SELECT count(*) FROM point_tbl WHERE f1 <@ circle '<(50,50),50>'; >>> - QUERY PLAN >>> ----------------- >>> - Data Node Scan >>> + QUERY PLAN >>> >>> +--------------------------------- >>> + Data Node Scan (Node Count [1]) >>> (1 row) >>> >>> SELECT count(*) FROM point_tbl WHERE f1 <@ circle '<(50,50),50>'; >>> @@ -321,9 +296,9 @@ SELECT count(*) FROM point_tbl WHERE f1 <@ circle >>> '<(50,50),50>'; >>> >>> EXPLAIN (COSTS OFF) >>> SELECT count(*) FROM point_tbl p WHERE p.f1 << '(0.0, 0.0)'; >>> - QUERY PLAN >>> ----------------- >>> - Data Node Scan >>> + QUERY PLAN >>> >>> +--------------------------------- >>> + Data Node Scan (Node Count [1]) >>> (1 row) >>> >>> SELECT count(*) FROM point_tbl p WHERE p.f1 << '(0.0, 0.0)'; >>> @@ -334,9 +309,9 @@ SELECT count(*) FROM point_tbl p WHERE p.f1 << '(0.0, >>> 0.0)'; >>> >>> EXPLAIN (COSTS OFF) >>> SELECT count(*) FROM point_tbl p WHERE p.f1 >> '(0.0, 0.0)'; >>> - QUERY PLAN >>> ----------------- >>> - Data Node Scan >>> + QUERY PLAN >>> >>> +--------------------------------- >>> + Data Node Scan (Node Count [1]) >>> (1 row) >>> >>> SELECT count(*) FROM point_tbl p WHERE p.f1 >> '(0.0, 0.0)'; >>> @@ -347,9 +322,9 @@ SELECT count(*) FROM point_tbl p WHERE p.f1 >> '(0.0, >>> 0.0)'; >>> >>> EXPLAIN (COSTS OFF) >>> SELECT count(*) FROM point_tbl p WHERE p.f1 <^ '(0.0, 0.0)'; >>> - QUERY PLAN >>> ----------------- >>> - Data Node Scan >>> + QUERY PLAN >>> >>> +--------------------------------- >>> + Data Node Scan (Node Count [1]) >>> (1 row) >>> >>> SELECT count(*) FROM point_tbl p WHERE p.f1 <^ '(0.0, 0.0)'; >>> @@ -360,9 +335,9 @@ SELECT count(*) FROM point_tbl p WHERE p.f1 <^ '(0.0, >>> 0.0)'; >>> >>> EXPLAIN (COSTS OFF) >>> SELECT count(*) FROM point_tbl p WHERE p.f1 >^ '(0.0, 0.0)'; >>> - QUERY PLAN >>> ----------------- >>> - Data Node Scan >>> + QUERY PLAN >>> >>> +--------------------------------- >>> + Data Node Scan (Node Count [1]) >>> (1 row) >>> >>> SELECT count(*) FROM point_tbl p WHERE p.f1 >^ '(0.0, 0.0)'; >>> @@ -373,9 +348,9 @@ SELECT count(*) FROM point_tbl p WHERE p.f1 >^ '(0.0, >>> 0.0)'; >>> >>> EXPLAIN (COSTS OFF) >>> SELECT count(*) FROM point_tbl p WHERE p.f1 ~= '(-5, -12)'; >>> - QUERY PLAN >>> ----------------- >>> - Data Node Scan >>> + QUERY PLAN >>> >>> +--------------------------------- >>> + Data Node Scan (Node Count [1]) >>> (1 row) >>> >>> SELECT count(*) FROM point_tbl p WHERE p.f1 ~= '(-5, -12)'; >>> @@ -774,7 +749,7 @@ CREATE INDEX hash_f8_index ON hash_f8_heap USING hash >>> (random float8_ops); >>> -- >>> CREATE TABLE func_index_heap (f1 text, f2 text); >>> CREATE UNIQUE INDEX func_index_index on func_index_heap >>> (textcat(f1,f2)); >>> -ERROR: Cannot locally enforce a unique index on round robin distributed >>> table. >>> +ERROR: Unique index of partitioned table must contain the hash/modulo >>> distribution column. >>> INSERT INTO func_index_heap VALUES('ABC','DEF'); >>> INSERT INTO func_index_heap VALUES('AB','CDEFG'); >>> INSERT INTO func_index_heap VALUES('QWE','RTY'); >>> @@ -788,7 +763,7 @@ INSERT INTO func_index_heap VALUES('QWERTY'); >>> DROP TABLE func_index_heap; >>> CREATE TABLE func_index_heap (f1 text, f2 text); >>> CREATE UNIQUE INDEX func_index_index on func_index_heap ((f1 || f2) >>> text_ops); >>> -ERROR: Cannot locally enforce a unique index on round robin distributed >>> table. >>> +ERROR: Unique index of partitioned table must contain the hash/modulo >>> distribution column. >>> INSERT INTO func_index_heap VALUES('ABC','DEF'); >>> INSERT INTO func_index_heap VALUES('AB','CDEFG'); >>> INSERT INTO func_index_heap VALUES('QWE','RTY'); >>> diff --git a/src/test/regress/expected/float4_1.out >>> b/src/test/regress/expected/float4_1.out >>> index 432d159..f50147d 100644 >>> --- a/src/test/regress/expected/float4_1.out >>> +++ b/src/test/regress/expected/float4_1.out >>> @@ -125,16 +125,6 @@ SELECT 'nan'::numeric::float4; >>> NaN >>> (1 row) >>> >>> -SELECT '' AS five, * FROM FLOAT4_TBL; >>> - five | f1 >>> -------+------------- >>> - | 1004.3 >>> - | 1.23457e+20 >>> - | 0 >>> - | -34.84 >>> - | 1.23457e-20 >>> -(5 rows) >>> - >>> SELECT '' AS five, * FROM FLOAT4_TBL ORDER BY f1; >>> five | f1 >>> ------+------------- >>> @@ -257,13 +247,14 @@ SELECT '' AS five, f.f1, @f.f1 AS abs_f1 FROM >>> FLOAT4_TBL f ORDER BY f1; >>> UPDATE FLOAT4_TBL >>> SET f1 = FLOAT4_TBL.f1 * '-1' >>> WHERE FLOAT4_TBL.f1 > '0.0'; >>> +ERROR: Partition column can't be updated in current version >>> SELECT '' AS five, * FROM FLOAT4_TBL ORDER BY f1; >>> - five | f1 >>> -------+-------------- >>> - | -1.23457e+20 >>> - | -1004.3 >>> - | -34.84 >>> - | -1.23457e-20 >>> - | 0 >>> + five | f1 >>> +------+------------- >>> + | -34.84 >>> + | 0 >>> + | 1.23457e-20 >>> + | 1004.3 >>> + | 1.23457e+20 >>> (5 rows) >>> >>> diff --git a/src/test/regress/expected/float8_1.out >>> b/src/test/regress/expected/float8_1.out >>> index 65fe187..8ce7930 100644 >>> --- a/src/test/regress/expected/float8_1.out >>> +++ b/src/test/regress/expected/float8_1.out >>> @@ -381,6 +381,7 @@ SELECT '' AS five, * FROM FLOAT8_TBL ORDER BY f1; >>> UPDATE FLOAT8_TBL >>> SET f1 = FLOAT8_TBL.f1 * '-1' >>> WHERE FLOAT8_TBL.f1 > '0.0'; >>> +ERROR: Partition column can't be updated in current version >>> SELECT '' AS bad, f.f1 ^ '1e200' from FLOAT8_TBL f ORDER BY f1; >>> ERROR: value out of range: overflow >>> SELECT '' AS bad, f.f1 ^ '1e200' from FLOAT8_TBL f ORDER BY f1; >>> @@ -396,17 +397,17 @@ ERROR: cannot take logarithm of zero >>> SELECT '' AS bad, ln(f.f1) from FLOAT8_TBL f where f.f1 < '0.0'; >>> ERROR: cannot take logarithm of a negative number >>> SELECT '' AS bad, exp(f.f1) from FLOAT8_TBL f ORDER BY f1; >>> -ERROR: value out of range: underflow >>> +ERROR: value out of range: overflow >>> SELECT '' AS bad, f.f1 / '0.0' from FLOAT8_TBL f; >>> ERROR: division by zero >>> SELECT '' AS five, * FROM FLOAT8_TBL ORDER BY f1; >>> - five | f1 >>> -------+----------------------- >>> - | -1.2345678901234e+200 >>> - | -1004.3 >>> - | -34.84 >>> - | -1.2345678901234e-200 >>> - | 0 >>> + five | f1 >>> +------+---------------------- >>> + | -34.84 >>> + | 0 >>> + | 1.2345678901234e-200 >>> + | 1004.3 >>> + | 1.2345678901234e+200 >>> (5 rows) >>> >>> -- test for over- and underflow >>> diff --git a/src/test/regress/expected/foreign_key_1.out >>> b/src/test/regress/expected/foreign_key_1.out >>> index 7eccdc6..3cb7d17 100644 >>> --- a/src/test/regress/expected/foreign_key_1.out >>> +++ b/src/test/regress/expected/foreign_key_1.out >>> @@ -773,9 +773,9 @@ INSERT INTO FKTABLE VALUES(43); -- should >>> fail >>> ERROR: insert or update on table "fktable" violates foreign key >>> constraint "fktable_ftest1_fkey" >>> DETAIL: Key (ftest1)=(43) is not present in table "pktable". >>> UPDATE FKTABLE SET ftest1 = ftest1; -- should succeed >>> +ERROR: Partition column can't be updated in current version >>> UPDATE FKTABLE SET ftest1 = ftest1 + 1; -- should fail >>> -ERROR: insert or update on table "fktable" violates foreign key >>> constraint "fktable_ftest1_fkey" >>> -DETAIL: Key (ftest1)=(43) is not present in table "pktable". >>> +ERROR: Partition column can't be updated in current version >>> DROP TABLE FKTABLE; >>> -- This should fail, because we'd have to cast numeric to int which is >>> -- not an implicit coercion (or use numeric=numeric, but that's not part >>> @@ -787,34 +787,22 @@ DROP TABLE PKTABLE; >>> -- On the other hand, this should work because int implicitly promotes >>> to >>> -- numeric, and we allow promotion on the FK side >>> CREATE TABLE PKTABLE (ptest1 numeric PRIMARY KEY); >>> -ERROR: Column ptest1 is not a hash distributable data type >>> +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index >>> "pktable_pkey" for table "pktable" >>> INSERT INTO PKTABLE VALUES(42); >>> -ERROR: relation "pktable" does not exist >>> -LINE 1: INSERT INTO PKTABLE VALUES(42); >>> - ^ >>> CREATE TABLE FKTABLE (ftest1 int REFERENCES pktable); >>> -ERROR: relation "pktable" does not exist >>> -- Check it actually works >>> INSERT INTO FKTABLE VALUES(42); -- should succeed >>> -ERROR: relation "fktable" does not exist >>> -LINE 1: INSERT INTO FKTABLE VALUES(42); >>> - ^ >>> +ERROR: insert or update on table "fktable" violates foreign key >>> constraint "fktable_ftest1_fkey" >>> +DETAIL: Key (ftest1)=(42) is not present in table "pktable". >>> INSERT INTO FKTABLE VALUES(43); -- should fail >>> -ERROR: relation "fktable" does not exist >>> -LINE 1: INSERT INTO FKTABLE VALUES(43); >>> - ^ >>> +ERROR: insert or update on table "fktable" violates foreign key >>> constraint "fktable_ftest1_fkey" >>> +DETAIL: Key (ftest1)=(43) is not present in table "pktable". >>> UPDATE FKTABLE SET ftest1 = ftest1; -- should succeed >>> -ERROR: relation "fktable" does not exist >>> -LINE 1: UPDATE FKTABLE SET ftest1 = ftest1; >>> - ^ >>> +ERROR: Partition column can't be updated in current version >>> UPDATE FKTABLE SET ftest1 = ftest1 + 1; -- should fail >>> -ERROR: relation "fktable" does not exist >>> -LINE 1: UPDATE FKTABLE SET ftest1 = ftest1 + 1; >>> - ^ >>> +ERROR: Partition column can't be updated in current version >>> DROP TABLE FKTABLE; >>> -ERROR: table "fktable" does not exist >>> DROP TABLE PKTABLE; >>> -ERROR: table "pktable" does not exist >>> -- Two columns, two tables >>> CREATE TABLE PKTABLE (ptest1 int, ptest2 inet, PRIMARY KEY(ptest1, >>> ptest2)); >>> NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index >>> "pktable_pkey" for table "pktable" >>> diff --git a/src/test/regress/expected/money_1.out >>> b/src/test/regress/expected/money_1.out >>> new file mode 100644 >>> index 0000000..6a15792 >>> --- /dev/null >>> +++ b/src/test/regress/expected/money_1.out >>> @@ -0,0 +1,186 @@ >>> +-- >>> +-- MONEY >>> +-- >>> +CREATE TABLE money_data (m money); >>> +INSERT INTO money_data VALUES ('123'); >>> +SELECT * FROM money_data; >>> + m >>> +--------- >>> + $123.00 >>> +(1 row) >>> + >>> +SELECT m + '123' FROM money_data; >>> + ?column? >>> +---------- >>> + $246.00 >>> +(1 row) >>> + >>> +SELECT m + '123.45' FROM money_data; >>> + ?column? >>> +---------- >>> + $246.45 >>> +(1 row) >>> + >>> +SELECT m - '123.45' FROM money_data; >>> + ?column? >>> +---------- >>> + -$0.45 >>> +(1 row) >>> + >>> +SELECT m * 2 FROM money_data; >>> + ?column? >>> +---------- >>> + $246.00 >>> +(1 row) >>> + >>> +SELECT m / 2 FROM money_data; >>> + ?column? >>> +---------- >>> + $61.50 >>> +(1 row) >>> + >>> +-- All true >>> +SELECT m = '$123.00' FROM money_data; >>> + ?column? >>> +---------- >>> + t >>> +(1 row) >>> + >>> +SELECT m != '$124.00' FROM money_data; >>> + ?column? >>> +---------- >>> + t >>> +(1 row) >>> + >>> +SELECT m <= '$123.00' FROM money_data; >>> + ?column? >>> +---------- >>> + t >>> +(1 row) >>> + >>> +SELECT m >= '$123.00' FROM money_data; >>> + ?column? >>> +---------- >>> + t >>> +(1 row) >>> + >>> +SELECT m < '$124.00' FROM money_data; >>> + ?column? >>> +---------- >>> + t >>> +(1 row) >>> + >>> +SELECT m > '$122.00' FROM money_data; >>> + ?column? >>> +---------- >>> + t >>> +(1 row) >>> + >>> +-- All false >>> +SELECT m = '$123.01' FROM money_data; >>> + ?column? >>> +---------- >>> +(0 rows) >>> + >>> +SELECT m != '$123.00' FROM money_data; >>> + ?column? >>> +---------- >>> + f >>> +(1 row) >>> + >>> +SELECT m <= '$122.99' FROM money_data; >>> + ?column? >>> +---------- >>> + f >>> +(1 row) >>> + >>> +SELECT m >= '$123.01' FROM money_data; >>> + ?column? >>> +---------- >>> + f >>> +(1 row) >>> + >>> +SELECT m > '$124.00' FROM money_data; >>> + ?column? >>> +---------- >>> + f >>> +(1 row) >>> + >>> +SELECT m < '$122.00' FROM money_data; >>> + ?column? >>> +---------- >>> + f >>> +(1 row) >>> + >>> +SELECT cashlarger(m, '$124.00') FROM money_data; >>> + cashlarger >>> +------------ >>> + $124.00 >>> +(1 row) >>> + >>> +SELECT cashsmaller(m, '$124.00') FROM money_data; >>> + cashsmaller >>> +------------- >>> + $123.00 >>> +(1 row) >>> + >>> +SELECT cash_words(m) FROM money_data; >>> + cash_words >>> +------------------------------------------------- >>> + One hundred twenty three dollars and zero cents >>> +(1 row) >>> + >>> +SELECT cash_words(m + '1.23') FROM money_data; >>> + cash_words >>> +-------------------------------------------------------- >>> + One hundred twenty four dollars and twenty three cents >>> +(1 row) >>> + >>> +DELETE FROM money_data; >>> +INSERT INTO money_data VALUES ('$123.45'); >>> +SELECT * FROM money_data; >>> + m >>> +--------- >>> + $123.45 >>> +(1 row) >>> + >>> +DELETE FROM money_data; >>> +INSERT INTO money_data VALUES ('$123.451'); >>> +SELECT * FROM money_data; >>> + m >>> +--------- >>> + $123.45 >>> +(1 row) >>> + >>> +DELETE FROM money_data; >>> +INSERT INTO money_data VALUES ('$123.454'); >>> +SELECT * FROM money_data; >>> + m >>> +--------- >>> + $123.45 >>> +(1 row) >>> + >>> +DELETE FROM money_data; >>> +INSERT INTO money_data VALUES ('$123.455'); >>> +SELECT * FROM money_data; >>> + m >>> +--------- >>> + $123.46 >>> +(1 row) >>> + >>> +DELETE FROM money_data; >>> +INSERT INTO money_data VALUES ('$123.456'); >>> +SELECT * FROM money_data; >>> + m >>> +--------- >>> + $123.46 >>> +(1 row) >>> + >>> +DELETE FROM money_data; >>> +INSERT INTO money_data VALUES ('$123.459'); >>> +SELECT * FROM money_data; >>> + m >>> +--------- >>> + $123.46 >>> +(1 row) >>> + >>> diff --git a/src/test/regress/expected/prepared_xacts_2.out >>> b/src/test/regress/expected/prepared_xacts_2.out >>> index e456200..307ffad 100644 >>> --- a/src/test/regress/expected/prepared_xacts_2.out >>> +++ b/src/test/regress/expected/prepared_xacts_2.out >>> @@ -6,7 +6,7 @@ >>> -- isn't really needed ... stopping and starting the postmaster would >>> -- be enough, but we can't even do that here. >>> -- create a simple table that we'll use in the tests >>> -CREATE TABLE pxtest1 (foobar VARCHAR(10)); >>> +CREATE TABLE pxtest1 (foobar VARCHAR(10)) distribute by replication; >>> INSERT INTO pxtest1 VALUES ('aaa'); >>> -- Test PREPARE TRANSACTION >>> BEGIN; >>> diff --git a/src/test/regress/expected/reltime_1.out >>> b/src/test/regress/expected/reltime_1.out >>> new file mode 100644 >>> index 0000000..83f61f9 >>> --- /dev/null >>> +++ b/src/test/regress/expected/reltime_1.out >>> @@ -0,0 +1,109 @@ >>> +-- >>> +-- RELTIME >>> +-- >>> +CREATE TABLE RELTIME_TBL (f1 reltime); >>> +INSERT INTO RELTIME_TBL (f1) VALUES ('@ 1 minute'); >>> +INSERT INTO RELTIME_TBL (f1) VALUES ('@ 5 hour'); >>> +INSERT INTO RELTIME_TBL (f1) VALUES ('@ 10 day'); >>> +INSERT INTO RELTIME_TBL (f1) VALUES ('@ 34 year'); >>> +INSERT INTO RELTIME_TBL (f1) VALUES ('@ 3 months'); >>> +INSERT INTO RELTIME_TBL (f1) VALUES ('@ 14 seconds ago'); >>> +-- badly formatted reltimes >>> +INSERT INTO RELTIME_TBL (f1) VALUES ('badly formatted reltime'); >>> +ERROR: invalid input syntax for type reltime: "badly formatted reltime" >>> +LINE 1: INSERT INTO RELTIME_TBL (f1) VALUES ('badly formatted reltim... >>> + ^ >>> +INSERT INTO RELTIME_TBL (f1) VALUES ('@ 30 eons ago'); >>> +ERROR: invalid input syntax for type reltime: "@ 30 eons ago" >>> +LINE 1: INSERT INTO RELTIME_TBL (f1) VALUES ('@ 30 eons ago'); >>> + ^ >>> +-- test reltime operators >>> +SELECT '' AS six, * FROM RELTIME_TBL ORDER BY f1; >>> + six | f1 >>> +-----+--------------- >>> + | @ 14 secs ago >>> + | @ 1 min >>> + | @ 5 hours >>> + | @ 10 days >>> + | @ 3 mons >>> + | @ 34 years >>> +(6 rows) >>> + >>> +SELECT '' AS five, * FROM RELTIME_TBL >>> + WHERE RELTIME_TBL.f1 <> reltime '@ 10 days' ORDER BY f1; >>> + five | f1 >>> +------+--------------- >>> + | @ 14 secs ago >>> + | @ 1 min >>> + | @ 5 hours >>> + | @ 3 mons >>> + | @ 34 years >>> +(5 rows) >>> + >>> +SELECT '' AS three, * FROM RELTIME_TBL >>> + WHERE RELTIME_TBL.f1 <= reltime '@ 5 hours' ORDER BY f1; >>> + three | f1 >>> +-------+--------------- >>> + | @ 14 secs ago >>> + | @ 1 min >>> + | @ 5 hours >>> +(3 rows) >>> + >>> +SELECT '' AS three, * FROM RELTIME_TBL >>> + WHERE RELTIME_TBL.f1 < reltime '@ 1 day' ORDER BY f1; >>> + three | f1 >>> +-------+--------------- >>> + | @ 14 secs ago >>> + | @ 1 min >>> + | @ 5 hours >>> +(3 rows) >>> + >>> +SELECT '' AS one, * FROM RELTIME_TBL >>> + WHERE RELTIME_TBL.f1 = reltime '@ 34 years' ORDER BY f1; >>> + one | f1 >>> +-----+---------- >>> + | 34 years >>> +(1 row) >>> + >>> +SELECT '' AS two, * FROM RELTIME_TBL >>> + WHERE RELTIME_TBL.f1 >= reltime '@ 1 month' ORDER BY f1; >>> + two | f1 >>> +-----+------------ >>> + | @ 3 mons >>> + | @ 34 years >>> +(2 rows) >>> + >>> +SELECT '' AS five, * FROM RELTIME_TBL >>> + WHERE RELTIME_TBL.f1 > reltime '@ 3 seconds ago' ORDER BY f1; >>> + five | f1 >>> +------+------------ >>> + | @ 1 min >>> + | @ 5 hours >>> + | @ 10 days >>> + | @ 3 mons >>> + | @ 34 years >>> +(5 rows) >>> + >>> +SELECT '' AS fifteen, r1.*, r2.* >>> + FROM RELTIME_TBL r1, RELTIME_TBL r2 >>> + WHERE r1.f1 > r2.f1 >>> + ORDER BY r1.f1, r2.f1; >>> + fifteen | f1 | f1 >>> +---------+------------+--------------- >>> + | @ 1 min | @ 14 secs ago >>> + | @ 5 hours | @ 14 secs ago >>> + | @ 5 hours | @ 1 min >>> + | @ 10 days | @ 14 secs ago >>> + | @ 10 days | @ 1 min >>> + | @ 10 days | @ 5 hours >>> + | @ 3 mons | @ 14 secs ago >>> + | @ 3 mons | @ 1 min >>> + | @ 3 mons | @ 5 hours >>> + | @ 3 mons | @ 10 days >>> + | @ 34 years | @ 14 secs ago >>> + | @ 34 years | @ 1 min >>> + | @ 34 years | @ 5 hours >>> + | @ 34 years | @ 10 days >>> + | @ 34 years | @ 3 mons >>> +(15 rows) >>> + >>> diff --git a/src/test/regress/expected/triggers_1.out >>> b/src/test/regress/expected/triggers_1.out >>> index 5528c66..a9f83ec 100644 >>> --- a/src/test/regress/expected/triggers_1.out >>> +++ b/src/test/regress/expected/triggers_1.out >>> @@ -717,30 +717,30 @@ ERROR: Postgres-XC does not support TRIGGER yet >>> DETAIL: The feature is not currently supported >>> \set QUIET false >>> UPDATE min_updates_test SET f1 = f1; >>> -UPDATE 2 >>> -UPDATE min_updates_test SET f2 = f2 + 1; >>> ERROR: Partition column can't be updated in current version >>> +UPDATE min_updates_test SET f2 = f2 + 1; >>> +UPDATE 2 >>> UPDATE min_updates_test SET f3 = 2 WHERE f3 is null; >>> UPDATE 1 >>> UPDATE min_updates_test_oids SET f1 = f1; >>> -UPDATE 2 >>> -UPDATE min_updates_test_oids SET f2 = f2 + 1; >>> ERROR: Partition column can't be updated in current version >>> +UPDATE min_updates_test_oids SET f2 = f2 + 1; >>> +UPDATE 2 >>> UPDATE min_updates_test_oids SET f3 = 2 WHERE f3 is null; >>> UPDATE 1 >>> \set QUIET true >>> SELECT * FROM min_updates_test ORDER BY 1,2,3; >>> f1 | f2 | f3 >>> ----+----+---- >>> - a | 1 | 2 >>> - b | 2 | 2 >>> + a | 2 | 2 >>> + b | 3 | 2 >>> (2 rows) >>> >>> SELECT * FROM min_updates_test_oids ORDER BY 1,2,3; >>> f1 | f2 | f3 >>> ----+----+---- >>> - a | 1 | 2 >>> - b | 2 | 2 >>> + a | 2 | 2 >>> + b | 3 | 2 >>> (2 rows) >>> >>> DROP TABLE min_updates_test; >>> diff --git a/src/test/regress/expected/tsear... [truncated message content] |
From: Abbas B. <abb...@te...> - 2011-05-25 10:20:47
|
On Wed, May 25, 2011 at 5:54 AM, Koichi Suzuki <ko...@in...>wrote: > Hi, > > Current code utilizes existing hash-generation mechanism and I think this > is basically right thing to do. By using this, we can pick up almost any > column (I'm not sure about about geometric types and composit types, would > like to test) for hash distribution. > > Points are: 1) Is a distribution column stable enough? --- This is user's > choice and most of float attribute is not stable. 2) Can we reproduce the > same hash value from the same input value? > > Mason's point is 2). It will be better to handle this from more general > view. Anyway, I think current implementation is simple and general enough. > We need separete means to determine if specified column is good to select > as distribution column. This should be applied not only embedded types > but also user-defined types and need some design and implementation effort. > > At present, we may notice users that it is not recommended and may be > prohibited in the future. > Agreed. > > We can introduce new catalog table or extend pg_type to describe what types > are allowed as distribution key. > --- > Koichi > # Geometric types element values are float and they're not adequate to use > as distribution key. > I initially thought about adding geometric types too, but then decided to leave them for some time later. > > On Tue, 24 May 2011 09:03:29 -0400 > Mason <ma...@us...> wrote: > > > On Tue, May 24, 2011 at 8:08 AM, Abbas Butt > > <ga...@us...> wrote: > > > Project "Postgres-XC". > > > > > > The branch, master has been updated > > > via 49b66c77343ae1e9921118e0c902b1528f1cc2ae (commit) > > > from 87a62879ab3492e3dd37d00478ffa857639e2b85 (commit) > > > > > > > > > - Log ----------------------------------------------------------------- > > > commit 49b66c77343ae1e9921118e0c902b1528f1cc2ae > > > Author: Abbas <abb...@en...> > > > Date: Tue May 24 17:06:30 2011 +0500 > > > > > > This patch adds support for the following data types to be used as > distribution key > > > > > > INT8, INT2, OID, INT4, BOOL, INT2VECTOR, OIDVECTOR > > > CHAR, NAME, TEXT, BPCHAR, BYTEA, VARCHAR > > > FLOAT4, FLOAT8, NUMERIC, CASH > > > ABSTIME, RELTIME, DATE, TIME, TIMESTAMP, TIMESTAMPTZ, INTERVAL, > TIMETZ > > > > > > > I am not sure some of these data types are a good idea to use for > > distributing on. Float is inexact and seems problematic > > > > I just did a quick test: > > > > mds=# create table float1 (a float, b float) distribute by hash (a); > > CREATE TABLE > > > > mds=# insert into float1 values (2.0/3, 2); > > INSERT 0 1 > > > > mds=# select * from float1; > > a | b > > -------------------+--- > > 0.666666666666667 | 2 > > (1 row) > > > > Then, I copy and paste the output of a: > > > > mds=# select * from float1 where a = 0.666666666666667; > > a | b > > ---+--- > > (0 rows) > > > > Looking at the plan it tries to take advantage of partitioning: > > > > mds=# explain select * from float1 where a = 0.666666666666667; > > QUERY PLAN > > ------------------------------------------------------------------- > > Data Node Scan (Node Count [1]) (cost=0.00..0.00 rows=0 width=0) > > (1 row) > > > > I think we should remove support for floats as a possible distribution > > type; users may get themselves into trouble. > > > > > > There may be similar issues with the timestamp data types: > > > > mds=# create table timestamp1 (a timestamp, b int) distribute by hash(a); > > CREATE TABLE > > mds=# insert into timestamp1 values (now(), 1); > > INSERT 0 1 > > mds=# select * from timestamp1; > > a | b > > ----------------------------+--- > > 2011-05-24 08:51:21.597551 | 1 > > (1 row) > > > > mds=# select * from timestamp1 where a = '2011-05-24 08:51:21.597551'; > > a | b > > ---+--- > > (0 rows) > > > > > > As far as BOOL goes, I suppose it may be ok, but of course there are > > only two possible values. I would block it, or at the very least if > > the user leaves off the distribution clause, I would not consider BOOL > > columns and look at other columns as better partitioning candidates. > > > > In any event, I am very glad to see the various INT types, CHAR, > > VARCHAR, TEXT, NUMERIC and DATE supported. I am not so sure how useful > > some of the others are. > > > > Thanks, > > > > Mason > > > > > ------------------------------------------------------------------------------ > > vRanger cuts backup time in half-while increasing security. > > With the market-leading solution for virtual backup and recovery, > > you get blazing-fast, flexible, and affordable data protection. > > Download your free trial now. > > http://p.sf.net/sfu/quest-d2dcopy1 > > _______________________________________________ > > Postgres-xc-developers mailing list > > Pos...@li... > > https://lists.sourceforge.net/lists/listinfo/postgres-xc-developers > > > > > ------------------------------------------------------------------------------ > vRanger cuts backup time in half-while increasing security. > With the market-leading solution for virtual backup and recovery, > you get blazing-fast, flexible, and affordable data protection. > Download your free trial now. > http://p.sf.net/sfu/quest-d2dcopy1 > _______________________________________________ > Postgres-xc-developers mailing list > Pos...@li... > https://lists.sourceforge.net/lists/listinfo/postgres-xc-developers > |
From: Abbas B. <abb...@te...> - 2011-05-25 10:17:05
|
On Tue, May 24, 2011 at 10:18 PM, Andrei Martsinchyk < and...@gm...> wrote: > Hi Abbas, > > I looked at the code and see that for some data types the compute_hash() > returns not a hash code, but original value: > > + case INT8OID: > > + /* This gives added advantage that > + * a = 8446744073709551359 > + * and a = 8446744073709551359::int8 both work*/ > + return DatumGetInt32(value); > + case INT2OID: > + return DatumGetInt16(value); > + case OIDOID: > + return DatumGetObjectId(value); > + case INT4OID: > + return DatumGetInt32(value); > + case BOOLOID: > + return DatumGetBool(value); > > That not a critical error and gives a bit better calculation speed but may > cause poor distributions, when, for example, distribution column contains > only even or only odd values. > That would happen only it the user is choosing to use modulo distribution. If the user knows his/her dist column is not uniformly distributed and wants rows to be distributed uniformly, a better choice would be to use hash distribution in which case hash will be computed. > Some node may have many rows while other may not have rows at all. I > suggest using hashintX functions here. > And another point: Oid's are generated on data nodes, does it make sense to > allow hashing here, where it is supposed the value is coming from > coordinator? > Oid can be used like this e.g. CREATE TABLE abc(a oid) distribute by modulo(a); Oid is the only unsigned integer data type available to the user and if the user wants a table to have a column of type Oid and wants to distribute by that column, then the provided option will be used. > > 2011/5/24 Abbas Butt <ga...@us...> > >> Project "Postgres-XC". >> >> The branch, master has been updated >> via 49b66c77343ae1e9921118e0c902b1528f1cc2ae (commit) >> from 87a62879ab3492e3dd37d00478ffa857639e2b85 (commit) >> >> >> - Log ----------------------------------------------------------------- >> commit 49b66c77343ae1e9921118e0c902b1528f1cc2ae >> Author: Abbas <abb...@en...> >> Date: Tue May 24 17:06:30 2011 +0500 >> >> This patch adds support for the following data types to be used as >> distribution key >> >> INT8, INT2, OID, INT4, BOOL, INT2VECTOR, OIDVECTOR >> CHAR, NAME, TEXT, BPCHAR, BYTEA, VARCHAR >> FLOAT4, FLOAT8, NUMERIC, CASH >> ABSTIME, RELTIME, DATE, TIME, TIMESTAMP, TIMESTAMPTZ, INTERVAL, TIMETZ >> >> A new function compute_hash is added in the system which is used to >> compute hash of a any of the supported data types. >> The computed hash is used in the function GetRelationNodes to >> find the targeted data node. >> >> EXPLAIN for RemoteQuery has been modified to show the number of >> data nodes targeted for a certain query. This is essential >> to spot bugs in the optimizer in case it is targeting all nodes >> by mistake. >> >> In case of optimisations where comparison with a constant leads >> the optimiser to point to a single data node, there were a couple >> of mistakes in examine_conditions_walker. >> First it was not supporting RelabelType, which represents a "dummy" >> type coercion between two binary compatible datatypes. >> This was resulting in the optimization not working for varchar >> type for example. >> Secondly it was not catering for the case where the user specifies the >> condition such that the constant expression is written towards LHS and >> the >> variable towards the RHS of the = operator. >> i.e. 23 = a >> >> A number of test cases have been added in regression to make sure >> further enhancements do not break this functionality. >> >> This change has a sizeable impact on current regression tests in the >> following manner. >> >> 1. horology test case crashes the server and has been commented out in >> serial_schedule. >> 2. In money test case the planner optimizer wrongly kicks in to >> optimize this query >> SELECT m = '$123.01' FROM money_data; >> to point to a single data node. >> 3. There were a few un-necessary EXPLAINs in create_index test case. >> Since we have added support in EXPLAIN to show the number of >> data nodes targeted for RemoteQuery, this test case was producing >> output dependent on the cluster configuration. >> 4. In guc test case >> DROP ROLE temp_reset_user; >> results in >> ERROR: permission denied to drop role >> >> diff --git a/src/backend/access/hash/hashfunc.c >> b/src/backend/access/hash/hashfunc.c >> index 577873b..22766c5 100644 >> --- a/src/backend/access/hash/hashfunc.c >> +++ b/src/backend/access/hash/hashfunc.c >> @@ -28,6 +28,13 @@ >> >> #include "access/hash.h" >> >> +#ifdef PGXC >> +#include "catalog/pg_type.h" >> +#include "utils/builtins.h" >> +#include "utils/timestamp.h" >> +#include "utils/date.h" >> +#include "utils/nabstime.h" >> +#endif >> >> /* Note: this is used for both "char" and boolean datatypes */ >> Datum >> @@ -521,3 +528,91 @@ hash_uint32(uint32 k) >> /* report the result */ >> return UInt32GetDatum(c); >> } >> + >> +#ifdef PGXC >> +/* >> + * compute_hash() -- Generaic hash function for all datatypes >> + * >> + */ >> + >> +Datum >> +compute_hash(Oid type, Datum value, int *pErr) >> +{ >> + Assert(pErr); >> + >> + *pErr = 0; >> + >> + if (value == NULL) >> + { >> + *pErr = 1; >> + return 0; >> + } >> + >> + switch(type) >> + { >> + case INT8OID: >> + /* This gives added advantage that >> + * a = 8446744073709551359 >> + * and a = 8446744073709551359::int8 both work*/ >> + return DatumGetInt32(value); >> + case INT2OID: >> + return DatumGetInt16(value); >> + case OIDOID: >> + return DatumGetObjectId(value); >> + case INT4OID: >> + return DatumGetInt32(value); >> + case BOOLOID: >> + return DatumGetBool(value); >> + >> + case CHAROID: >> + return DirectFunctionCall1(hashchar, value); >> + case NAMEOID: >> + return DirectFunctionCall1(hashname, value); >> + case INT2VECTOROID: >> + return DirectFunctionCall1(hashint2vector, value); >> + >> + case VARCHAROID: >> + case TEXTOID: >> + return DirectFunctionCall1(hashtext, value); >> + >> + case OIDVECTOROID: >> + return DirectFunctionCall1(hashoidvector, value); >> + case FLOAT4OID: >> + return DirectFunctionCall1(hashfloat4, value); >> + case FLOAT8OID: >> + return DirectFunctionCall1(hashfloat8, value); >> + >> + case ABSTIMEOID: >> + return DatumGetAbsoluteTime(value); >> + case RELTIMEOID: >> + return DatumGetRelativeTime(value); >> + case CASHOID: >> + return DirectFunctionCall1(hashint8, value); >> + >> + case BPCHAROID: >> + return DirectFunctionCall1(hashbpchar, value); >> + case BYTEAOID: >> + return DirectFunctionCall1(hashvarlena, value); >> + >> + case DATEOID: >> + return DatumGetDateADT(value); >> + case TIMEOID: >> + return DirectFunctionCall1(time_hash, value); >> + case TIMESTAMPOID: >> + return DirectFunctionCall1(timestamp_hash, value); >> + case TIMESTAMPTZOID: >> + return DirectFunctionCall1(timestamp_hash, value); >> + case INTERVALOID: >> + return DirectFunctionCall1(interval_hash, value); >> + case TIMETZOID: >> + return DirectFunctionCall1(timetz_hash, value); >> + >> + case NUMERICOID: >> + return DirectFunctionCall1(hash_numeric, value); >> + default: >> + *pErr = 1; >> + return 0; >> + } >> +} >> + >> +#endif >> diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c >> index 613d5ff..714190f 100644 >> --- a/src/backend/commands/copy.c >> +++ b/src/backend/commands/copy.c >> @@ -1645,14 +1645,14 @@ CopyTo(CopyState cstate) >> } >> >> #ifdef PGXC >> - if (IS_PGXC_COORDINATOR && cstate->rel_loc) >> + if (IS_PGXC_COORDINATOR && cstate->rel_loc) >> { >> cstate->processed = DataNodeCopyOut( >> - GetRelationNodes(cstate->rel_loc, NULL, >> RELATION_ACCESS_READ), >> + GetRelationNodes(cstate->rel_loc, 0, >> UNKNOWNOID, RELATION_ACCESS_READ), >> cstate->connections, >> cstate->copy_file); >> } >> - else >> + else >> { >> #endif >> >> @@ -2417,15 +2417,18 @@ CopyFrom(CopyState cstate) >> #ifdef PGXC >> if (IS_PGXC_COORDINATOR && cstate->rel_loc) >> { >> - Datum *dist_col_value = NULL; >> + Datum dist_col_value; >> + Oid dist_col_type = UNKNOWNOID; >> >> if (cstate->idx_dist_by_col >= 0 && >> !nulls[cstate->idx_dist_by_col]) >> - dist_col_value = >> &values[cstate->idx_dist_by_col]; >> + { >> + dist_col_value = >> values[cstate->idx_dist_by_col]; >> + dist_col_type = >> attr[cstate->idx_dist_by_col]->atttypid; >> + } >> >> if (DataNodeCopyIn(cstate->line_buf.data, >> cstate->line_buf.len, >> - >> GetRelationNodes(cstate->rel_loc, (long *)dist_col_value, >> - >> RELATION_ACCESS_INSERT), >> + >> GetRelationNodes(cstate->rel_loc, dist_col_value, dist_col_type, >> RELATION_ACCESS_INSERT), >> cstate->connections)) >> ereport(ERROR, >> >> (errcode(ERRCODE_CONNECTION_EXCEPTION), >> @@ -4037,7 +4040,8 @@ DoInsertSelectCopy(EState *estate, TupleTableSlot >> *slot) >> HeapTuple tuple; >> Datum *values; >> bool *nulls; >> - Datum *dist_col_value = NULL; >> + Datum dist_col_value; >> + Oid dist_col_type; >> MemoryContext oldcontext; >> CopyState cstate; >> >> @@ -4082,6 +4086,11 @@ DoInsertSelectCopy(EState *estate, TupleTableSlot >> *slot) >> cstate->fe_msgbuf = makeStringInfo(); >> attr = cstate->tupDesc->attrs; >> >> + if (cstate->idx_dist_by_col >= 0) >> + dist_col_type = >> attr[cstate->idx_dist_by_col]->atttypid; >> + else >> + dist_col_type = UNKNOWNOID; >> + >> /* Get info about the columns we need to process. */ >> cstate->out_functions = (FmgrInfo *) >> palloc(cstate->tupDesc->natts * sizeof(FmgrInfo)); >> foreach(lc, cstate->attnumlist) >> @@ -4152,12 +4161,14 @@ DoInsertSelectCopy(EState *estate, TupleTableSlot >> *slot) >> >> /* Get dist column, if any */ >> if (cstate->idx_dist_by_col >= 0 && >> !nulls[cstate->idx_dist_by_col]) >> - dist_col_value = &values[cstate->idx_dist_by_col]; >> + dist_col_value = values[cstate->idx_dist_by_col]; >> + else >> + dist_col_type = UNKNOWNOID; >> >> /* Send item to the appropriate data node(s) (buffer) */ >> if (DataNodeCopyIn(cstate->fe_msgbuf->data, >> cstate->fe_msgbuf->len, >> - GetRelationNodes(cstate->rel_loc, (long >> *)dist_col_value, RELATION_ACCESS_INSERT), >> + GetRelationNodes(cstate->rel_loc, >> dist_col_value, dist_col_type, RELATION_ACCESS_INSERT), >> cstate->connections)) >> ereport(ERROR, >> (errcode(ERRCODE_CONNECTION_EXCEPTION), >> diff --git a/src/backend/commands/explain.c >> b/src/backend/commands/explain.c >> index a361186..fe74569 100644 >> --- a/src/backend/commands/explain.c >> +++ b/src/backend/commands/explain.c >> @@ -851,8 +851,28 @@ ExplainNode(Plan *plan, PlanState *planstate, >> case T_WorkTableScan: >> #ifdef PGXC >> case T_RemoteQuery: >> + { >> + RemoteQuery *remote_query = (RemoteQuery >> *) plan; >> + int pnc, nc; >> + >> + pnc = 0; >> + nc = 0; >> + if (remote_query->exec_nodes != NULL) >> + { >> + if >> (remote_query->exec_nodes->primarynodelist != NULL) >> + { >> + pnc = >> list_length(remote_query->exec_nodes->primarynodelist); >> + appendStringInfo(es->str, >> " (Primary Node Count [%d])", pnc); >> + } >> + if >> (remote_query->exec_nodes->nodelist) >> + { >> + nc = >> list_length(remote_query->exec_nodes->nodelist); >> + appendStringInfo(es->str, >> " (Node Count [%d])", nc); >> + } >> + } >> #endif >> - ExplainScanTarget((Scan *) plan, es); >> + ExplainScanTarget((Scan *) plan, es); >> + } >> break; >> case T_BitmapIndexScan: >> { >> diff --git a/src/backend/optimizer/plan/createplan.c >> b/src/backend/optimizer/plan/createplan.c >> index b6252a3..c03938d 100644 >> --- a/src/backend/optimizer/plan/createplan.c >> +++ b/src/backend/optimizer/plan/createplan.c >> @@ -2418,9 +2418,7 @@ create_remotequery_plan(PlannerInfo *root, Path >> *best_path, >> scan_plan->exec_nodes->baselocatortype = >> rel_loc_info->locatorType; >> else >> scan_plan->exec_nodes->baselocatortype = '\0'; >> - scan_plan->exec_nodes = GetRelationNodes(rel_loc_info, >> - >> NULL, >> - >> RELATION_ACCESS_READ); >> + scan_plan->exec_nodes = GetRelationNodes(rel_loc_info, 0, >> UNKNOWNOID, RELATION_ACCESS_READ); >> copy_path_costsize(&scan_plan->scan.plan, best_path); >> >> /* PGXCTODO - get better estimates */ >> @@ -5024,8 +5022,7 @@ create_remotedelete_plan(PlannerInfo *root, Plan >> *topplan) >> fstep->sql_statement = pstrdup(buf->data); >> fstep->combine_type = COMBINE_TYPE_SAME; >> fstep->read_only = false; >> - fstep->exec_nodes = GetRelationNodes(rel_loc_info, NULL, >> - >> RELATION_ACCESS_UPDATE); >> + fstep->exec_nodes = GetRelationNodes(rel_loc_info, 0, >> UNKNOWNOID, RELATION_ACCESS_UPDATE); >> } >> else >> { >> diff --git a/src/backend/pgxc/locator/locator.c >> b/src/backend/pgxc/locator/locator.c >> index 0ab157d..33fe8ac 100644 >> --- a/src/backend/pgxc/locator/locator.c >> +++ b/src/backend/pgxc/locator/locator.c >> @@ -41,7 +41,7 @@ >> >> #include "catalog/pgxc_class.h" >> #include "catalog/namespace.h" >> - >> +#include "access/hash.h" >> >> /* >> * PGXCTODO For prototype, relations use the same hash mapping table. >> @@ -206,7 +206,32 @@ char *pColName; >> bool >> IsHashDistributable(Oid col_type) >> { >> - if (col_type == INT4OID || col_type == INT2OID) >> + if(col_type == INT8OID >> + || col_type == INT2OID >> + || col_type == OIDOID >> + || col_type == INT4OID >> + || col_type == BOOLOID >> + || col_type == CHAROID >> + || col_type == NAMEOID >> + || col_type == INT2VECTOROID >> + || col_type == TEXTOID >> + || col_type == OIDVECTOROID >> + || col_type == FLOAT4OID >> + || col_type == FLOAT8OID >> + || col_type == ABSTIMEOID >> + || col_type == RELTIMEOID >> + || col_type == CASHOID >> + || col_type == BPCHAROID >> + || col_type == BYTEAOID >> + || col_type == VARCHAROID >> + || col_type == DATEOID >> + || col_type == TIMEOID >> + || col_type == TIMESTAMPOID >> + || col_type == TIMESTAMPTZOID >> + || col_type == INTERVALOID >> + || col_type == TIMETZOID >> + || col_type == NUMERICOID >> + ) >> return true; >> >> return false; >> @@ -296,7 +321,32 @@ RelationLocInfo *rel_loc_info; >> bool >> IsModuloDistributable(Oid col_type) >> { >> - if (col_type == INT4OID || col_type == INT2OID) >> + if(col_type == INT8OID >> + || col_type == INT2OID >> + || col_type == OIDOID >> + || col_type == INT4OID >> + || col_type == BOOLOID >> + || col_type == CHAROID >> + || col_type == NAMEOID >> + || col_type == INT2VECTOROID >> + || col_type == TEXTOID >> + || col_type == OIDVECTOROID >> + || col_type == FLOAT4OID >> + || col_type == FLOAT8OID >> + || col_type == ABSTIMEOID >> + || col_type == RELTIMEOID >> + || col_type == CASHOID >> + || col_type == BPCHAROID >> + || col_type == BYTEAOID >> + || col_type == VARCHAROID >> + || col_type == DATEOID >> + || col_type == TIMEOID >> + || col_type == TIMESTAMPOID >> + || col_type == TIMESTAMPTZOID >> + || col_type == INTERVALOID >> + || col_type == TIMETZOID >> + || col_type == NUMERICOID >> + ) >> return true; >> >> return false; >> @@ -409,13 +459,13 @@ GetRoundRobinNode(Oid relid) >> * The returned List is a copy, so it should be freed when finished. >> */ >> ExecNodes * >> -GetRelationNodes(RelationLocInfo *rel_loc_info, long *partValue, >> - RelationAccessType accessType) >> +GetRelationNodes(RelationLocInfo *rel_loc_info, Datum valueForDistCol, >> Oid typeOfValueForDistCol, RelationAccessType accessType) >> { >> ListCell *prefItem; >> ListCell *stepItem; >> ExecNodes *exec_nodes; >> - >> + long hashValue; >> + int nError; >> >> if (rel_loc_info == NULL) >> return NULL; >> @@ -480,10 +530,10 @@ GetRelationNodes(RelationLocInfo *rel_loc_info, long >> *partValue, >> break; >> >> case LOCATOR_TYPE_HASH: >> - >> - if (partValue != NULL) >> + hashValue = compute_hash(typeOfValueForDistCol, >> valueForDistCol, &nError); >> + if (nError == 0) >> /* in prototype, all partitioned tables use >> same map */ >> - exec_nodes->nodelist = lappend_int(NULL, >> get_node_from_hash(hash_range_int(*partValue))); >> + exec_nodes->nodelist = lappend_int(NULL, >> get_node_from_hash(hash_range_int(hashValue))); >> else >> if (accessType == RELATION_ACCESS_INSERT) >> /* Insert NULL to node 1 */ >> @@ -494,9 +544,10 @@ GetRelationNodes(RelationLocInfo *rel_loc_info, long >> *partValue, >> break; >> >> case LOCATOR_TYPE_MODULO: >> - if (partValue != NULL) >> + hashValue = compute_hash(typeOfValueForDistCol, >> valueForDistCol, &nError); >> + if (nError == 0) >> /* in prototype, all partitioned tables use >> same map */ >> - exec_nodes->nodelist = lappend_int(NULL, >> get_node_from_modulo(compute_modulo(*partValue))); >> + exec_nodes->nodelist = lappend_int(NULL, >> get_node_from_modulo(compute_modulo(hashValue))); >> else >> if (accessType == RELATION_ACCESS_INSERT) >> /* Insert NULL to node 1 */ >> @@ -750,7 +801,6 @@ RelationLocInfo * >> GetRelationLocInfo(Oid relid) >> { >> RelationLocInfo *ret_loc_info = NULL; >> - char *namespace; >> >> Relation rel = relation_open(relid, AccessShareLock); >> >> diff --git a/src/backend/pgxc/plan/planner.c >> b/src/backend/pgxc/plan/planner.c >> index 2448a74..4873f19 100644 >> --- a/src/backend/pgxc/plan/planner.c >> +++ b/src/backend/pgxc/plan/planner.c >> @@ -43,20 +43,23 @@ >> #include "utils/lsyscache.h" >> #include "utils/portal.h" >> #include "utils/syscache.h" >> - >> +#include "utils/numeric.h" >> +#include "access/hash.h" >> +#include "utils/timestamp.h" >> +#include "utils/date.h" >> >> /* >> * Convenient format for literal comparisons >> * >> - * PGXCTODO - make constant type Datum, handle other types >> */ >> typedef struct >> { >> - Oid relid; >> - RelationLocInfo *rel_loc_info; >> - Oid attrnum; >> - char *col_name; >> - long constant; /* assume long PGXCTODO - >> should be Datum */ >> + Oid relid; >> + RelationLocInfo *rel_loc_info; >> + Oid attrnum; >> + char *col_name; >> + Datum constValue; >> + Oid constType; >> } Literal_Comparison; >> >> /* >> @@ -471,15 +474,12 @@ get_base_var(Var *var, XCWalkerContext *context) >> static void >> get_plan_nodes_insert(PlannerInfo *root, RemoteQuery *step) >> { >> - Query *query = root->parse; >> - RangeTblEntry *rte; >> - RelationLocInfo *rel_loc_info; >> - Const *constant; >> - ListCell *lc; >> - long part_value; >> - long *part_value_ptr = NULL; >> - Expr *eval_expr = NULL; >> - >> + Query *query = root->parse; >> + RangeTblEntry *rte; >> + RelationLocInfo *rel_loc_info; >> + Const *constant; >> + ListCell *lc; >> + Expr *eval_expr = NULL; >> >> step->exec_nodes = NULL; >> >> @@ -568,7 +568,7 @@ get_plan_nodes_insert(PlannerInfo *root, RemoteQuery >> *step) >> if (!lc) >> { >> /* Skip rest, handle NULL */ >> - step->exec_nodes = GetRelationNodes(rel_loc_info, >> NULL, RELATION_ACCESS_INSERT); >> + step->exec_nodes = GetRelationNodes(rel_loc_info, >> 0, UNKNOWNOID, RELATION_ACCESS_INSERT); >> return; >> } >> >> @@ -650,21 +650,11 @@ get_plan_nodes_insert(PlannerInfo *root, RemoteQuery >> *step) >> } >> >> constant = (Const *) checkexpr; >> - >> - if (constant->consttype == INT4OID || >> - constant->consttype == INT2OID || >> - constant->consttype == INT8OID) >> - { >> - part_value = (long) constant->constvalue; >> - part_value_ptr = &part_value; >> - } >> - /* PGXCTODO - handle other data types */ >> } >> } >> >> /* single call handles both replicated and partitioned types */ >> - step->exec_nodes = GetRelationNodes(rel_loc_info, part_value_ptr, >> - >> RELATION_ACCESS_INSERT); >> + step->exec_nodes = GetRelationNodes(rel_loc_info, >> constant->constvalue, constant->consttype, RELATION_ACCESS_INSERT); >> >> if (eval_expr) >> pfree(eval_expr); >> @@ -1047,6 +1037,28 @@ examine_conditions_walker(Node *expr_node, >> XCWalkerContext *context) >> { >> Expr *arg1 = linitial(opexpr->args); >> Expr *arg2 = lsecond(opexpr->args); >> + RelabelType *rt; >> + Expr *targ; >> + >> + if (IsA(arg1, RelabelType)) >> + { >> + rt = arg1; >> + arg1 = rt->arg; >> + } >> + >> + if (IsA(arg2, RelabelType)) >> + { >> + rt = arg2; >> + arg2 = rt->arg; >> + } >> + >> + /* Handle constant = var */ >> + if (IsA(arg2, Var)) >> + { >> + targ = arg1; >> + arg1 = arg2; >> + arg2 = targ; >> + } >> >> /* Look for a table */ >> if (IsA(arg1, Var)) >> @@ -1134,7 +1146,8 @@ examine_conditions_walker(Node *expr_node, >> XCWalkerContext *context) >> lit_comp->relid = >> column_base->relid; >> lit_comp->rel_loc_info = >> rel_loc_info1; >> lit_comp->col_name = >> column_base->colname; >> - lit_comp->constant = >> constant->constvalue; >> + lit_comp->constValue = >> constant->constvalue; >> + lit_comp->constType = >> constant->consttype; >> >> >> context->conditions->partitioned_literal_comps = lappend( >> >> context->conditions->partitioned_literal_comps, >> @@ -1742,9 +1755,7 @@ get_plan_nodes_walker(Node *query_node, >> XCWalkerContext *context) >> if (rel_loc_info->locatorType != LOCATOR_TYPE_HASH && >> rel_loc_info->locatorType != LOCATOR_TYPE_MODULO) >> /* do not need to determine partitioning expression >> */ >> - context->query_step->exec_nodes = >> GetRelationNodes(rel_loc_info, >> - >> NULL, >> - >> context->accessType); >> + context->query_step->exec_nodes = >> GetRelationNodes(rel_loc_info, 0, UNKNOWNOID, context->accessType); >> >> /* Note replicated table usage for determining safe queries >> */ >> if (context->query_step->exec_nodes) >> @@ -1800,9 +1811,7 @@ get_plan_nodes_walker(Node *query_node, >> XCWalkerContext *context) >> { >> Literal_Comparison *lit_comp = (Literal_Comparison >> *) lfirst(lc); >> >> - test_exec_nodes = GetRelationNodes( >> - lit_comp->rel_loc_info, >> &(lit_comp->constant), >> - RELATION_ACCESS_READ); >> + test_exec_nodes = >> GetRelationNodes(lit_comp->rel_loc_info, lit_comp->constValue, >> lit_comp->constType, RELATION_ACCESS_READ); >> >> test_exec_nodes->tableusagetype = table_usage_type; >> if (context->query_step->exec_nodes == NULL) >> @@ -1828,9 +1837,7 @@ get_plan_nodes_walker(Node *query_node, >> XCWalkerContext *context) >> parent_child = (Parent_Child_Join *) >> >> linitial(context->conditions->partitioned_parent_child); >> >> - context->query_step->exec_nodes = >> GetRelationNodes(parent_child->rel_loc_info1, >> - >> NULL, >> - >> context->accessType); >> + context->query_step->exec_nodes = >> GetRelationNodes(parent_child->rel_loc_info1, 0, UNKNOWNOID, >> context->accessType); >> context->query_step->exec_nodes->tableusagetype = >> table_usage_type; >> } >> >> @@ -3378,8 +3385,6 @@ GetHashExecNodes(RelationLocInfo *rel_loc_info, >> ExecNodes **exec_nodes, const Ex >> Expr *checkexpr; >> Expr *eval_expr = NULL; >> Const *constant; >> - long part_value; >> - long *part_value_ptr = NULL; >> >> eval_expr = (Expr *) eval_const_expressions(NULL, (Node *)expr); >> checkexpr = get_numeric_constant(eval_expr); >> @@ -3389,17 +3394,8 @@ GetHashExecNodes(RelationLocInfo *rel_loc_info, >> ExecNodes **exec_nodes, const Ex >> >> constant = (Const *) checkexpr; >> >> - if (constant->consttype == INT4OID || >> - constant->consttype == INT2OID || >> - constant->consttype == INT8OID) >> - { >> - part_value = (long) constant->constvalue; >> - part_value_ptr = &part_value; >> - } >> - >> /* single call handles both replicated and partitioned types */ >> - *exec_nodes = GetRelationNodes(rel_loc_info, part_value_ptr, >> - >> RELATION_ACCESS_INSERT); >> + *exec_nodes = GetRelationNodes(rel_loc_info, constant->constvalue, >> constant->consttype, RELATION_ACCESS_INSERT); >> if (eval_expr) >> pfree(eval_expr); >> >> diff --git a/src/backend/pgxc/pool/execRemote.c >> b/src/backend/pgxc/pool/execRemote.c >> index 75aca21..76e3eef 100644 >> --- a/src/backend/pgxc/pool/execRemote.c >> +++ b/src/backend/pgxc/pool/execRemote.c >> @@ -1061,7 +1061,8 @@ BufferConnection(PGXCNodeHandle *conn) >> RemoteQueryState *combiner = conn->combiner; >> MemoryContext oldcontext; >> >> - Assert(conn->state == DN_CONNECTION_STATE_QUERY && combiner); >> + if (combiner == NULL || conn->state != DN_CONNECTION_STATE_QUERY) >> + return; >> >> /* >> * When BufferConnection is invoked CurrentContext is related to >> other >> @@ -3076,9 +3077,8 @@ get_exec_connections(RemoteQueryState *planstate, >> if (!isnull) >> { >> RelationLocInfo *rel_loc_info = >> GetRelationLocInfo(exec_nodes->relid); >> - ExecNodes *nodes = >> GetRelationNodes(rel_loc_info, >> - >> (long *) &partvalue, >> - >> exec_nodes->accesstype); >> + /* PGXCTODO what is the type of >> partvalue here*/ >> + ExecNodes *nodes = >> GetRelationNodes(rel_loc_info, partvalue, UNKNOWNOID, >> exec_nodes->accesstype); >> if (nodes) >> { >> nodelist = nodes->nodelist; >> diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c >> index 415fc47..6d7939b 100644 >> --- a/src/backend/tcop/postgres.c >> +++ b/src/backend/tcop/postgres.c >> @@ -670,18 +670,18 @@ pg_analyze_and_rewrite(Node *parsetree, const char >> *query_string, >> querytree_list = pg_rewrite_query(query); >> >> #ifdef PGXC >> - if (IS_PGXC_COORDINATOR && !IsConnFromCoord()) >> - { >> - ListCell *lc; >> - >> - foreach(lc, querytree_list) >> - { >> - Query *query = (Query *) lfirst(lc); >> - >> - if (query->sql_statement == NULL) >> - query->sql_statement = pstrdup(query_string); >> - } >> - } >> + if (IS_PGXC_COORDINATOR && !IsConnFromCoord()) >> + { >> + ListCell *lc; >> + >> + foreach(lc, querytree_list) >> + { >> + Query *query = (Query *) lfirst(lc); >> + >> + if (query->sql_statement == NULL) >> + query->sql_statement = >> pstrdup(query_string); >> + } >> + } >> #endif >> >> TRACE_POSTGRESQL_QUERY_REWRITE_DONE(query_string); >> @@ -1043,7 +1043,7 @@ exec_simple_query(const char *query_string) >> >> querytree_list = pg_analyze_and_rewrite(parsetree, >> query_string, >> >> NULL, 0); >> - >> + >> plantree_list = pg_plan_queries(querytree_list, 0, NULL); >> >> /* Done with the snapshot used for parsing/planning */ >> diff --git a/src/include/access/hash.h b/src/include/access/hash.h >> index d5899f4..4aaffaa 100644 >> --- a/src/include/access/hash.h >> +++ b/src/include/access/hash.h >> @@ -353,4 +353,8 @@ extern OffsetNumber _hash_binsearch_last(Page page, >> uint32 hash_value); >> extern void hash_redo(XLogRecPtr lsn, XLogRecord *record); >> extern void hash_desc(StringInfo buf, uint8 xl_info, char *rec); >> >> +#ifdef PGXC >> +extern Datum compute_hash(Oid type, Datum value, int *pErr); >> +#endif >> + >> #endif /* HASH_H */ >> diff --git a/src/include/pgxc/locator.h b/src/include/pgxc/locator.h >> index 9f669d9..9ee983c 100644 >> --- a/src/include/pgxc/locator.h >> +++ b/src/include/pgxc/locator.h >> @@ -100,8 +100,7 @@ extern char ConvertToLocatorType(int disttype); >> extern char *GetRelationHashColumn(RelationLocInfo *rel_loc_info); >> extern RelationLocInfo *GetRelationLocInfo(Oid relid); >> extern RelationLocInfo *CopyRelationLocInfo(RelationLocInfo *src_info); >> -extern ExecNodes *GetRelationNodes(RelationLocInfo *rel_loc_info, long >> *partValue, >> - RelationAccessType accessType); >> +extern ExecNodes *GetRelationNodes(RelationLocInfo *rel_loc_info, Datum >> valueForDistCol, Oid typeOfValueForDistCol, RelationAccessType accessType); >> extern bool IsHashColumn(RelationLocInfo *rel_loc_info, char >> *part_col_name); >> extern bool IsHashColumnForRelId(Oid relid, char *part_col_name); >> extern int GetRoundRobinNode(Oid relid); >> diff --git a/src/test/regress/expected/create_index_1.out >> b/src/test/regress/expected/create_index_1.out >> index 52fdc91..ab3807c 100644 >> --- a/src/test/regress/expected/create_index_1.out >> +++ b/src/test/regress/expected/create_index_1.out >> @@ -174,15 +174,10 @@ SELECT count(*) FROM point_tbl p WHERE p.f1 ~= '(-5, >> -12)'; >> SET enable_seqscan = OFF; >> SET enable_indexscan = ON; >> SET enable_bitmapscan = ON; >> -EXPLAIN (COSTS OFF) >> -SELECT * FROM fast_emp4000 >> - WHERE home_base @ '(200,200),(2000,1000)'::box >> - ORDER BY (home_base[0])[0]; >> - QUERY PLAN >> ----------------- >> - Data Node Scan >> >> -(1 row) >> - >> +--EXPLAIN (COSTS OFF) >> +--SELECT * FROM fast_emp4000 >> +-- WHERE home_base @ '(200,200),(2000,1000)'::box >> +-- ORDER BY (home_base[0])[0]; >> SELECT * FROM fast_emp4000 >> WHERE home_base @ '(200,200),(2000,1000)'::box >> ORDER BY (home_base[0])[0]; >> @@ -190,40 +185,25 @@ SELECT * FROM fast_emp4000 >> ----------- >> (0 rows) >> >> -EXPLAIN (COSTS OFF) >> -SELECT count(*) FROM fast_emp4000 WHERE home_base && >> '(1000,1000,0,0)'::box; >> - QUERY PLAN >> ----------------- >> - Data Node Scan >> >> -(1 row) >> - >> +--EXPLAIN (COSTS OFF) >> +--SELECT count(*) FROM fast_emp4000 WHERE home_base && >> '(1000,1000,0,0)'::box; >> SELECT count(*) FROM fast_emp4000 WHERE home_base && >> '(1000,1000,0,0)'::box; >> count >> ------- >> 1 >> (1 row) >> >> -EXPLAIN (COSTS OFF) >> -SELECT count(*) FROM fast_emp4000 WHERE home_base IS NULL; >> - QUERY PLAN >> ----------------- >> - Data Node Scan >> >> -(1 row) >> - >> +--EXPLAIN (COSTS OFF) >> +--SELECT count(*) FROM fast_emp4000 WHERE home_base IS NULL; >> SELECT count(*) FROM fast_emp4000 WHERE home_base IS NULL; >> count >> ------- >> 138 >> (1 row) >> >> -EXPLAIN (COSTS OFF) >> -SELECT * FROM polygon_tbl WHERE f1 ~ '((1,1),(2,2),(2,1))'::polygon >> - ORDER BY (poly_center(f1))[0]; >> - QUERY PLAN >> ----------------- >> - Data Node Scan >> >> -(1 row) >> - >> +--EXPLAIN (COSTS OFF) >> +--SELECT * FROM polygon_tbl WHERE f1 ~ '((1,1),(2,2),(2,1))'::polygon >> +-- ORDER BY (poly_center(f1))[0]; >> SELECT * FROM polygon_tbl WHERE f1 ~ '((1,1),(2,2),(2,1))'::polygon >> ORDER BY (poly_center(f1))[0]; >> id | f1 >> @@ -231,14 +211,9 @@ SELECT * FROM polygon_tbl WHERE f1 ~ >> '((1,1),(2,2),(2,1))'::polygon >> 1 | ((2,0),(2,4),(0,0)) >> (1 row) >> >> -EXPLAIN (COSTS OFF) >> -SELECT * FROM circle_tbl WHERE f1 && circle(point(1,-2), 1) >> - ORDER BY area(f1); >> - QUERY PLAN >> ----------------- >> - Data Node Scan >> >> -(1 row) >> - >> +--EXPLAIN (COSTS OFF) >> +--SELECT * FROM circle_tbl WHERE f1 && circle(point(1,-2), 1) >> +-- ORDER BY area(f1); >> SELECT * FROM circle_tbl WHERE f1 && circle(point(1,-2), 1) >> ORDER BY area(f1); >> f1 >> @@ -269,9 +244,9 @@ LINE 1: SELECT count(*) FROM gcircle_tbl WHERE f1 && >> '<(500,500),500... >> ^ >> EXPLAIN (COSTS OFF) >> SELECT count(*) FROM point_tbl WHERE f1 <@ box '(0,0,100,100)'; >> - QUERY PLAN >> ----------------- >> - Data Node Scan >> + QUERY PLAN >> >> +--------------------------------- >> + Data Node Scan (Node Count [1]) >> (1 row) >> >> SELECT count(*) FROM point_tbl WHERE f1 <@ box '(0,0,100,100)'; >> @@ -282,9 +257,9 @@ SELECT count(*) FROM point_tbl WHERE f1 <@ box >> '(0,0,100,100)'; >> >> EXPLAIN (COSTS OFF) >> SELECT count(*) FROM point_tbl WHERE box '(0,0,100,100)' @> f1; >> - QUERY PLAN >> ----------------- >> - Data Node Scan >> + QUERY PLAN >> >> +--------------------------------- >> + Data Node Scan (Node Count [1]) >> (1 row) >> >> SELECT count(*) FROM point_tbl WHERE box '(0,0,100,100)' @> f1; >> @@ -295,9 +270,9 @@ SELECT count(*) FROM point_tbl WHERE box >> '(0,0,100,100)' @> f1; >> >> EXPLAIN (COSTS OFF) >> SELECT count(*) FROM point_tbl WHERE f1 <@ polygon >> '(0,0),(0,100),(100,100),(50,50),(100,0),(0,0)'; >> - QUERY PLAN >> ----------------- >> - Data Node Scan >> + QUERY PLAN >> >> +--------------------------------- >> + Data Node Scan (Node Count [1]) >> (1 row) >> >> SELECT count(*) FROM point_tbl WHERE f1 <@ polygon >> '(0,0),(0,100),(100,100),(50,50),(100,0),(0,0)'; >> @@ -308,9 +283,9 @@ SELECT count(*) FROM point_tbl WHERE f1 <@ polygon >> '(0,0),(0,100),(100,100),(50, >> >> EXPLAIN (COSTS OFF) >> SELECT count(*) FROM point_tbl WHERE f1 <@ circle '<(50,50),50>'; >> - QUERY PLAN >> ----------------- >> - Data Node Scan >> + QUERY PLAN >> >> +--------------------------------- >> + Data Node Scan (Node Count [1]) >> (1 row) >> >> SELECT count(*) FROM point_tbl WHERE f1 <@ circle '<(50,50),50>'; >> @@ -321,9 +296,9 @@ SELECT count(*) FROM point_tbl WHERE f1 <@ circle >> '<(50,50),50>'; >> >> EXPLAIN (COSTS OFF) >> SELECT count(*) FROM point_tbl p WHERE p.f1 << '(0.0, 0.0)'; >> - QUERY PLAN >> ----------------- >> - Data Node Scan >> + QUERY PLAN >> >> +--------------------------------- >> + Data Node Scan (Node Count [1]) >> (1 row) >> >> SELECT count(*) FROM point_tbl p WHERE p.f1 << '(0.0, 0.0)'; >> @@ -334,9 +309,9 @@ SELECT count(*) FROM point_tbl p WHERE p.f1 << '(0.0, >> 0.0)'; >> >> EXPLAIN (COSTS OFF) >> SELECT count(*) FROM point_tbl p WHERE p.f1 >> '(0.0, 0.0)'; >> - QUERY PLAN >> ----------------- >> - Data Node Scan >> + QUERY PLAN >> >> +--------------------------------- >> + Data Node Scan (Node Count [1]) >> (1 row) >> >> SELECT count(*) FROM point_tbl p WHERE p.f1 >> '(0.0, 0.0)'; >> @@ -347,9 +322,9 @@ SELECT count(*) FROM point_tbl p WHERE p.f1 >> '(0.0, >> 0.0)'; >> >> EXPLAIN (COSTS OFF) >> SELECT count(*) FROM point_tbl p WHERE p.f1 <^ '(0.0, 0.0)'; >> - QUERY PLAN >> ----------------- >> - Data Node Scan >> + QUERY PLAN >> >> +--------------------------------- >> + Data Node Scan (Node Count [1]) >> (1 row) >> >> SELECT count(*) FROM point_tbl p WHERE p.f1 <^ '(0.0, 0.0)'; >> @@ -360,9 +335,9 @@ SELECT count(*) FROM point_tbl p WHERE p.f1 <^ '(0.0, >> 0.0)'; >> >> EXPLAIN (COSTS OFF) >> SELECT count(*) FROM point_tbl p WHERE p.f1 >^ '(0.0, 0.0)'; >> - QUERY PLAN >> ----------------- >> - Data Node Scan >> + QUERY PLAN >> >> +--------------------------------- >> + Data Node Scan (Node Count [1]) >> (1 row) >> >> SELECT count(*) FROM point_tbl p WHERE p.f1 >^ '(0.0, 0.0)'; >> @@ -373,9 +348,9 @@ SELECT count(*) FROM point_tbl p WHERE p.f1 >^ '(0.0, >> 0.0)'; >> >> EXPLAIN (COSTS OFF) >> SELECT count(*) FROM point_tbl p WHERE p.f1 ~= '(-5, -12)'; >> - QUERY PLAN >> ----------------- >> - Data Node Scan >> + QUERY PLAN >> >> +--------------------------------- >> + Data Node Scan (Node Count [1]) >> (1 row) >> >> SELECT count(*) FROM point_tbl p WHERE p.f1 ~= '(-5, -12)'; >> @@ -774,7 +749,7 @@ CREATE INDEX hash_f8_index ON hash_f8_heap USING hash >> (random float8_ops); >> -- >> CREATE TABLE func_index_heap (f1 text, f2 text); >> CREATE UNIQUE INDEX func_index_index on func_index_heap (textcat(f1,f2)); >> -ERROR: Cannot locally enforce a unique index on round robin distributed >> table. >> +ERROR: Unique index of partitioned table must contain the hash/modulo >> distribution column. >> INSERT INTO func_index_heap VALUES('ABC','DEF'); >> INSERT INTO func_index_heap VALUES('AB','CDEFG'); >> INSERT INTO func_index_heap VALUES('QWE','RTY'); >> @@ -788,7 +763,7 @@ INSERT INTO func_index_heap VALUES('QWERTY'); >> DROP TABLE func_index_heap; >> CREATE TABLE func_index_heap (f1 text, f2 text); >> CREATE UNIQUE INDEX func_index_index on func_index_heap ((f1 || f2) >> text_ops); >> -ERROR: Cannot locally enforce a unique index on round robin distributed >> table. >> +ERROR: Unique index of partitioned table must contain the hash/modulo >> distribution column. >> INSERT INTO func_index_heap VALUES('ABC','DEF'); >> INSERT INTO func_index_heap VALUES('AB','CDEFG'); >> INSERT INTO func_index_heap VALUES('QWE','RTY'); >> diff --git a/src/test/regress/expected/float4_1.out >> b/src/test/regress/expected/float4_1.out >> index 432d159..f50147d 100644 >> --- a/src/test/regress/expected/float4_1.out >> +++ b/src/test/regress/expected/float4_1.out >> @@ -125,16 +125,6 @@ SELECT 'nan'::numeric::float4; >> NaN >> (1 row) >> >> -SELECT '' AS five, * FROM FLOAT4_TBL; >> - five | f1 >> -------+------------- >> - | 1004.3 >> - | 1.23457e+20 >> - | 0 >> - | -34.84 >> - | 1.23457e-20 >> -(5 rows) >> - >> SELECT '' AS five, * FROM FLOAT4_TBL ORDER BY f1; >> five | f1 >> ------+------------- >> @@ -257,13 +247,14 @@ SELECT '' AS five, f.f1, @f.f1 AS abs_f1 FROM >> FLOAT4_TBL f ORDER BY f1; >> UPDATE FLOAT4_TBL >> SET f1 = FLOAT4_TBL.f1 * '-1' >> WHERE FLOAT4_TBL.f1 > '0.0'; >> +ERROR: Partition column can't be updated in current version >> SELECT '' AS five, * FROM FLOAT4_TBL ORDER BY f1; >> - five | f1 >> -------+-------------- >> - | -1.23457e+20 >> - | -1004.3 >> - | -34.84 >> - | -1.23457e-20 >> - | 0 >> + five | f1 >> +------+------------- >> + | -34.84 >> + | 0 >> + | 1.23457e-20 >> + | 1004.3 >> + | 1.23457e+20 >> (5 rows) >> >> diff --git a/src/test/regress/expected/float8_1.out >> b/src/test/regress/expected/float8_1.out >> index 65fe187..8ce7930 100644 >> --- a/src/test/regress/expected/float8_1.out >> +++ b/src/test/regress/expected/float8_1.out >> @@ -381,6 +381,7 @@ SELECT '' AS five, * FROM FLOAT8_TBL ORDER BY f1; >> UPDATE FLOAT8_TBL >> SET f1 = FLOAT8_TBL.f1 * '-1' >> WHERE FLOAT8_TBL.f1 > '0.0'; >> +ERROR: Partition column can't be updated in current version >> SELECT '' AS bad, f.f1 ^ '1e200' from FLOAT8_TBL f ORDER BY f1; >> ERROR: value out of range: overflow >> SELECT '' AS bad, f.f1 ^ '1e200' from FLOAT8_TBL f ORDER BY f1; >> @@ -396,17 +397,17 @@ ERROR: cannot take logarithm of zero >> SELECT '' AS bad, ln(f.f1) from FLOAT8_TBL f where f.f1 < '0.0'; >> ERROR: cannot take logarithm of a negative number >> SELECT '' AS bad, exp(f.f1) from FLOAT8_TBL f ORDER BY f1; >> -ERROR: value out of range: underflow >> +ERROR: value out of range: overflow >> SELECT '' AS bad, f.f1 / '0.0' from FLOAT8_TBL f; >> ERROR: division by zero >> SELECT '' AS five, * FROM FLOAT8_TBL ORDER BY f1; >> - five | f1 >> -------+----------------------- >> - | -1.2345678901234e+200 >> - | -1004.3 >> - | -34.84 >> - | -1.2345678901234e-200 >> - | 0 >> + five | f1 >> +------+---------------------- >> + | -34.84 >> + | 0 >> + | 1.2345678901234e-200 >> + | 1004.3 >> + | 1.2345678901234e+200 >> (5 rows) >> >> -- test for over- and underflow >> diff --git a/src/test/regress/expected/foreign_key_1.out >> b/src/test/regress/expected/foreign_key_1.out >> index 7eccdc6..3cb7d17 100644 >> --- a/src/test/regress/expected/foreign_key_1.out >> +++ b/src/test/regress/expected/foreign_key_1.out >> @@ -773,9 +773,9 @@ INSERT INTO FKTABLE VALUES(43); -- should >> fail >> ERROR: insert or update on table "fktable" violates foreign key >> constraint "fktable_ftest1_fkey" >> DETAIL: Key (ftest1)=(43) is not present in table "pktable". >> UPDATE FKTABLE SET ftest1 = ftest1; -- should succeed >> +ERROR: Partition column can't be updated in current version >> UPDATE FKTABLE SET ftest1 = ftest1 + 1; -- should fail >> -ERROR: insert or update on table "fktable" violates foreign key >> constraint "fktable_ftest1_fkey" >> -DETAIL: Key (ftest1)=(43) is not present in table "pktable". >> +ERROR: Partition column can't be updated in current version >> DROP TABLE FKTABLE; >> -- This should fail, because we'd have to cast numeric to int which is >> -- not an implicit coercion (or use numeric=numeric, but that's not part >> @@ -787,34 +787,22 @@ DROP TABLE PKTABLE; >> -- On the other hand, this should work because int implicitly promotes to >> -- numeric, and we allow promotion on the FK side >> CREATE TABLE PKTABLE (ptest1 numeric PRIMARY KEY); >> -ERROR: Column ptest1 is not a hash distributable data type >> +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index >> "pktable_pkey" for table "pktable" >> INSERT INTO PKTABLE VALUES(42); >> -ERROR: relation "pktable" does not exist >> -LINE 1: INSERT INTO PKTABLE VALUES(42); >> - ^ >> CREATE TABLE FKTABLE (ftest1 int REFERENCES pktable); >> -ERROR: relation "pktable" does not exist >> -- Check it actually works >> INSERT INTO FKTABLE VALUES(42); -- should succeed >> -ERROR: relation "fktable" does not exist >> -LINE 1: INSERT INTO FKTABLE VALUES(42); >> - ^ >> +ERROR: insert or update on table "fktable" violates foreign key >> constraint "fktable_ftest1_fkey" >> +DETAIL: Key (ftest1)=(42) is not present in table "pktable". >> INSERT INTO FKTABLE VALUES(43); -- should fail >> -ERROR: relation "fktable" does not exist >> -LINE 1: INSERT INTO FKTABLE VALUES(43); >> - ^ >> +ERROR: insert or update on table "fktable" violates foreign key >> constraint "fktable_ftest1_fkey" >> +DETAIL: Key (ftest1)=(43) is not present in table "pktable". >> UPDATE FKTABLE SET ftest1 = ftest1; -- should succeed >> -ERROR: relation "fktable" does not exist >> -LINE 1: UPDATE FKTABLE SET ftest1 = ftest1; >> - ^ >> +ERROR: Partition column can't be updated in current version >> UPDATE FKTABLE SET ftest1 = ftest1 + 1; -- should fail >> -ERROR: relation "fktable" does not exist >> -LINE 1: UPDATE FKTABLE SET ftest1 = ftest1 + 1; >> - ^ >> +ERROR: Partition column can't be updated in current version >> DROP TABLE FKTABLE; >> -ERROR: table "fktable" does not exist >> DROP TABLE PKTABLE; >> -ERROR: table "pktable" does not exist >> -- Two columns, two tables >> CREATE TABLE PKTABLE (ptest1 int, ptest2 inet, PRIMARY KEY(ptest1, >> ptest2)); >> NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index >> "pktable_pkey" for table "pktable" >> diff --git a/src/test/regress/expected/money_1.out >> b/src/test/regress/expected/money_1.out >> new file mode 100644 >> index 0000000..6a15792 >> --- /dev/null >> +++ b/src/test/regress/expected/money_1.out >> @@ -0,0 +1,186 @@ >> +-- >> +-- MONEY >> +-- >> +CREATE TABLE money_data (m money); >> +INSERT INTO money_data VALUES ('123'); >> +SELECT * FROM money_data; >> + m >> +--------- >> + $123.00 >> +(1 row) >> + >> +SELECT m + '123' FROM money_data; >> + ?column? >> +---------- >> + $246.00 >> +(1 row) >> + >> +SELECT m + '123.45' FROM money_data; >> + ?column? >> +---------- >> + $246.45 >> +(1 row) >> + >> +SELECT m - '123.45' FROM money_data; >> + ?column? >> +---------- >> + -$0.45 >> +(1 row) >> + >> +SELECT m * 2 FROM money_data; >> + ?column? >> +---------- >> + $246.00 >> +(1 row) >> + >> +SELECT m / 2 FROM money_data; >> + ?column? >> +---------- >> + $61.50 >> +(1 row) >> + >> +-- All true >> +SELECT m = '$123.00' FROM money_data; >> + ?column? >> +---------- >> + t >> +(1 row) >> + >> +SELECT m != '$124.00' FROM money_data; >> + ?column? >> +---------- >> + t >> +(1 row) >> + >> +SELECT m <= '$123.00' FROM money_data; >> + ?column? >> +---------- >> + t >> +(1 row) >> + >> +SELECT m >= '$123.00' FROM money_data; >> + ?column? >> +---------- >> + t >> +(1 row) >> + >> +SELECT m < '$124.00' FROM money_data; >> + ?column? >> +---------- >> + t >> +(1 row) >> + >> +SELECT m > '$122.00' FROM money_data; >> + ?column? >> +---------- >> + t >> +(1 row) >> + >> +-- All false >> +SELECT m = '$123.01' FROM money_data; >> + ?column? >> +---------- >> +(0 rows) >> + >> +SELECT m != '$123.00' FROM money_data; >> + ?column? >> +---------- >> + f >> +(1 row) >> + >> +SELECT m <= '$122.99' FROM money_data; >> + ?column? >> +---------- >> + f >> +(1 row) >> + >> +SELECT m >= '$123.01' FROM money_data; >> + ?column? >> +---------- >> + f >> +(1 row) >> + >> +SELECT m > '$124.00' FROM money_data; >> + ?column? >> +---------- >> + f >> +(1 row) >> + >> +SELECT m < '$122.00' FROM money_data; >> + ?column? >> +---------- >> + f >> +(1 row) >> + >> +SELECT cashlarger(m, '$124.00') FROM money_data; >> + cashlarger >> +------------ >> + $124.00 >> +(1 row) >> + >> +SELECT cashsmaller(m, '$124.00') FROM money_data; >> + cashsmaller >> +------------- >> + $123.00 >> +(1 row) >> + >> +SELECT cash_words(m) FROM money_data; >> + cash_words >> +------------------------------------------------- >> + One hundred twenty three dollars and zero cents >> +(1 row) >> + >> +SELECT cash_words(m + '1.23') FROM money_data; >> + cash_words >> +-------------------------------------------------------- >> + One hundred twenty four dollars and twenty three cents >> +(1 row) >> + >> +DELETE FROM money_data; >> +INSERT INTO money_data VALUES ('$123.45'); >> +SELECT * FROM money_data; >> + m >> +--------- >> + $123.45 >> +(1 row) >> + >> +DELETE FROM money_data; >> +INSERT INTO money_data VALUES ('$123.451'); >> +SELECT * FROM money_data; >> + m >> +--------- >> + $123.45 >> +(1 row) >> + >> +DELETE FROM money_data; >> +INSERT INTO money_data VALUES ('$123.454'); >> +SELECT * FROM money_data; >> + m >> +--------- >> + $123.45 >> +(1 row) >> + >> +DELETE FROM money_data; >> +INSERT INTO money_data VALUES ('$123.455'); >> +SELECT * FROM money_data; >> + m >> +--------- >> + $123.46 >> +(1 row) >> + >> +DELETE FROM money_data; >> +INSERT INTO money_data VALUES ('$123.456'); >> +SELECT * FROM money_data; >> + m >> +--------- >> + $123.46 >> +(1 row) >> + >> +DELETE FROM money_data; >> +INSERT INTO money_data VALUES ('$123.459'); >> +SELECT * FROM money_data; >> + m >> +--------- >> + $123.46 >> +(1 row) >> + >> diff --git a/src/test/regress/expected/prepared_xacts_2.out >> b/src/test/regress/expected/prepared_xacts_2.out >> index e456200..307ffad 100644 >> --- a/src/test/regress/expected/prepared_xacts_2.out >> +++ b/src/test/regress/expected/prepared_xacts_2.out >> @@ -6,7 +6,7 @@ >> -- isn't really needed ... stopping and starting the postmaster would >> -- be enough, but we can't even do that here. >> -- create a simple table that we'll use in the tests >> -CREATE TABLE pxtest1 (foobar VARCHAR(10)); >> +CREATE TABLE pxtest1 (foobar VARCHAR(10)) distribute by replication; >> INSERT INTO pxtest1 VALUES ('aaa'); >> -- Test PREPARE TRANSACTION >> BEGIN; >> diff --git a/src/test/regress/expected/reltime_1.out >> b/src/test/regress/expected/reltime_1.out >> new file mode 100644 >> index 0000000..83f61f9 >> --- /dev/null >> +++ b/src/test/regress/expected/reltime_1.out >> @@ -0,0 +1,109 @@ >> +-- >> +-- RELTIME >> +-- >> +CREATE TABLE RELTIME_TBL (f1 reltime); >> +INSERT INTO RELTIME_TBL (f1) VALUES ('@ 1 minute'); >> +INSERT INTO RELTIME_TBL (f1) VALUES ('@ 5 hour'); >> +INSERT INTO RELTIME_TBL (f1) VALUES ('@ 10 day'); >> +INSERT INTO RELTIME_TBL (f1) VALUES ('@ 34 year'); >> +INSERT INTO RELTIME_TBL (f1) VALUES ('@ 3 months'); >> +INSERT INTO RELTIME_TBL (f1) VALUES ('@ 14 seconds ago'); >> +-- badly formatted reltimes >> +INSERT INTO RELTIME_TBL (f1) VALUES ('badly formatted reltime'); >> +ERROR: invalid input syntax for type reltime: "badly formatted reltime" >> +LINE 1: INSERT INTO RELTIME_TBL (f1) VALUES ('badly formatted reltim... >> + ^ >> +INSERT INTO RELTIME_TBL (f1) VALUES ('@ 30 eons ago'); >> +ERROR: invalid input syntax for type reltime: "@ 30 eons ago" >> +LINE 1: INSERT INTO RELTIME_TBL (f1) VALUES ('@ 30 eons ago'); >> + ^ >> +-- test reltime operators >> +SELECT '' AS six, * FROM RELTIME_TBL ORDER BY f1; >> + six | f1 >> +-----+--------------- >> + | @ 14 secs ago >> + | @ 1 min >> + | @ 5 hours >> + | @ 10 days >> + | @ 3 mons >> + | @ 34 years >> +(6 rows) >> + >> +SELECT '' AS five, * FROM RELTIME_TBL >> + WHERE RELTIME_TBL.f1 <> reltime '@ 10 days' ORDER BY f1; >> + five | f1 >> +------+--------------- >> + | @ 14 secs ago >> + | @ 1 min >> + | @ 5 hours >> + | @ 3 mons >> + | @ 34 years >> +(5 rows) >> + >> +SELECT '' AS three, * FROM RELTIME_TBL >> + WHERE RELTIME_TBL.f1 <= reltime '@ 5 hours' ORDER BY f1; >> + three | f1 >> +-------+--------------- >> + | @ 14 secs ago >> + | @ 1 min >> + | @ 5 hours >> +(3 rows) >> + >> +SELECT '' AS three, * FROM RELTIME_TBL >> + WHERE RELTIME_TBL.f1 < reltime '@ 1 day' ORDER BY f1; >> + three | f1 >> +-------+--------------- >> + | @ 14 secs ago >> + | @ 1 min >> + | @ 5 hours >> +(3 rows) >> + >> +SELECT '' AS one, * FROM RELTIME_TBL >> + WHERE RELTIME_TBL.f1 = reltime '@ 34 years' ORDER BY f1; >> + one | f1 >> +-----+---------- >> + | 34 years >> +(1 row) >> + >> +SELECT '' AS two, * FROM RELTIME_TBL >> + WHERE RELTIME_TBL.f1 >= reltime '@ 1 month' ORDER BY f1; >> + two | f1 >> +-----+------------ >> + | @ 3 mons >> + | @ 34 years >> +(2 rows) >> + >> +SELECT '' AS five, * FROM RELTIME_TBL >> + WHERE RELTIME_TBL.f1 > reltime '@ 3 seconds ago' ORDER BY f1; >> + five | f1 >> +------+------------ >> + | @ 1 min >> + | @ 5 hours >> + | @ 10 days >> + | @ 3 mons >> + | @ 34 years >> +(5 rows) >> + >> +SELECT '' AS fifteen, r1.*, r2.* >> + FROM RELTIME_TBL r1, RELTIME_TBL r2 >> + WHERE r1.f1 > r2.f1 >> + ORDER BY r1.f1, r2.f1; >> + fifteen | f1 | f1 >> +---------+------------+--------------- >> + | @ 1 min | @ 14 secs ago >> + | @ 5 hours | @ 14 secs ago >> + | @ 5 hours | @ 1 min >> + | @ 10 days | @ 14 secs ago >> + | @ 10 days | @ 1 min >> + | @ 10 days | @ 5 hours >> + | @ 3 mons | @ 14 secs ago >> + | @ 3 mons | @ 1 min >> + | @ 3 mons | @ 5 hours >> + | @ 3 mons | @ 10 days >> + | @ 34 years | @ 14 secs ago >> + | @ 34 years | @ 1 min >> + | @ 34 years | @ 5 hours >> + | @ 34 years | @ 10 days >> + | @ 34 years | @ 3 mons >> +(15 rows) >> + >> diff --git a/src/test/regress/expected/triggers_1.out >> b/src/test/regress/expected/triggers_1.out >> index 5528c66..a9f83ec 100644 >> --- a/src/test/regress/expected/triggers_1.out >> +++ b/src/test/regress/expected/triggers_1.out >> @@ -717,30 +717,30 @@ ERROR: Postgres-XC does not support TRIGGER yet >> DETAIL: The feature is not currently supported >> \set QUIET false >> UPDATE min_updates_test SET f1 = f1; >> -UPDATE 2 >> -UPDATE min_updates_test SET f2 = f2 + 1; >> ERROR: Partition column can't be updated in current version >> +UPDATE min_updates_test SET f2 = f2 + 1; >> +UPDATE 2 >> UPDATE min_updates_test SET f3 = 2 WHERE f3 is null; >> UPDATE 1 >> UPDATE min_updates_test_oids SET f1 = f1; >> -UPDATE 2 >> -UPDATE min_updates_test_oids SET f2 = f2 + 1; >> ERROR: Partition column can't be updated in current version >> +UPDATE min_updates_test_oids SET f2 = f2 + 1; >> +UPDATE 2 >> UPDATE min_updates_test_oids SET f3 = 2 WHERE f3 is null; >> UPDATE 1 >> \set QUIET true >> SELECT * FROM min_updates_test ORDER BY 1,2,3; >> f1 | f2 | f3 >> ----+----+---- >> - a | 1 | 2 >> - b | 2 | 2 >> + a | 2 | 2 >> + b | 3 | 2 >> (2 rows) >> >> SELECT * FROM min_updates_test_oids ORDER BY 1,2,3; >> f1 | f2 | f3 >> ----+----+---- >> - a | 1 | 2 >> - b | 2 | 2 >> + a | 2 | 2 >> + b | 3 | 2 >> (2 rows) >> >> DROP TABLE min_updates_test; >> diff --git a/src/test/regress/expected/tsearch_1.out >> b/src/test/regress/expected/tsearch_1.out >> index e8c35d4..4d1f1b1 100644 >> --- a/src/test/regress/expected/tsearch_1.out >> +++ b/src/test/regress/expected/tsearch_1.out >> @@ -801,7 +801,7 @@ SELECT COUNT(*) FROM test_tsquery WHERE keyword > >> 'new & york'; >> (1 row) >> >> CREATE UNIQUE INDEX bt_tsq ON test_tsquery (keyword); >> -ERROR: Cannot locally enforce a unique index on round robin distributed >> table. >> +ERROR: Unique index of partitioned table must contain the hash/modulo >> distribution column. >> SET enable_seqscan=OFF; >> SELECT COUNT(*) FROM test_tsquery WHERE keyword < 'new & york'; >> count >> @@ -1054,6 +1054,7 @@ SELECT count(*) FROM test_tsvector WHERE a @@ >> to_tsquery('345&qwerty'); >> (0 rows) >> >> UPDATE test_tsvector SET t = null WHERE t = '345 qwerty'; >> +ERROR: Partition column can't be updated in current version >> SELECT count(*) FROM test_tsvector WHERE a @@ to_tsquery('345&qwerty'); >> count >> ------- >> diff --git a/src/test/regress/expected/xc_distkey.out >> b/src/test/regress/expected/xc_distkey.out >> new file mode 100644 >> index 0000000..d050b27 >> --- /dev/null >> +++ b/src/test/regress/expected/xc_distkey.out >> @@ -0,0 +1,618 @@ >> +-- XC Test cases to verify that all supported data types are working as >> distribution key >> +-- Also verifies that the comaparison with a constant for equality is >> optimized. >> +create table ch_tab(a char) distribute by modulo(a); >> +insert into ch_tab values('a'); >> +select hashchar('a'); >> + hashchar >> +----------- >> + 463612535 >> +(1 row) >> + >> +create table nm_tab(a name) distribute by modulo(a); >> +insert into nm_tab values('abbas'); >> +select hashname('abbas'); >> + hashname >> +----------- >> + 605752656 >> +(1 row) >> + >> +create table nu_tab(a numeric(10,5)) distribute by modulo(a); >> +insert into nu_tab values(123.456); >> +insert into nu_tab... [truncated message content] |
From: Koichi S. <koi...@us...> - 2011-05-25 09:14:29
|
Project "Postgres-XC". The branch, ha_support has been updated via a34aef207659537b5ee51038cfe30fe8d34a4b8a (commit) from 5e1f7db50172e18a081c9b8155399cd8e8057101 (commit) - Log ----------------------------------------------------------------- commit a34aef207659537b5ee51038cfe30fe8d34a4b8a Author: Koichi Suzuki <koi...@gm...> Date: Wed May 25 17:59:01 2011 +0900 This commit added main thread reconnect to the new GTM. It enables that main thread first reconnect and register itself, then worker threads can reconnect. Whole reconnect procedure is very complicated and need review before test. Affected file is: modified: proxy_main.c diff --git a/src/gtm/proxy/proxy_main.c b/src/gtm/proxy/proxy_main.c index c93b140..183a877 100644 --- a/src/gtm/proxy/proxy_main.c +++ b/src/gtm/proxy/proxy_main.c @@ -41,6 +41,8 @@ #include "gtm/libpq-int.h" #include "gtm/gtm_ip.h" #include "gtm/gtm_standby.h" +/* For reconnect control lock */ +#include "gtm/gtm_lock.h" extern int optind; extern char *optarg; @@ -78,6 +80,12 @@ short ReadyToReconnect = FALSE; char *NewGTMServerHost; int NewGTMServerPortNumber; +/* Reconnect Control Lock */ + +GTM_RWLock ReconnectControlLock; +jmp_buf mainThreadSIGUSR1_buf; +int SIGUSR1Accepted = FALSE; + /* The socket(s) we're listening to. */ #define MAXLISTEN 64 static int ListenSocket[MAXLISTEN]; @@ -199,6 +207,10 @@ BaseInit() sprintf(GTMLogFile, "%s/%s", GTMProxyDataDir, GTM_LOG_FILE); } + /* Initialize reconnect control lock */ + + GTM_RWLockInit(&ReconnectControlLock); + /* Save Node Register File in register.c */ Recovery_SaveRegisterFileName(GTMProxyDataDir); @@ -391,6 +403,9 @@ GTMProxy_SigleHandler(int signal) PG_SETMASK(&UnBlockSig); return; } + /* + * Send SIGUSR2 to all worker threads. + */ for (ii = 0; ii < GTMProxyWorkerThreads; ii++) { if ((Proxy_ThreadInfo[ii] == NULL) || (Proxy_ThreadInfo[ii]->can_accept_SIGUSR2 == FALSE)) @@ -399,6 +414,17 @@ GTMProxy_SigleHandler(int signal) PG_SETMASK(&UnBlockSig); } } + /* + * Before send SIGUSR2 to worker threads, acquire reconnect control lock in write mode + * so that worker threads wait until main thread reconnects to new GTM and register + * itself. + */ + GTM_RWLockAcquire(&ReconnectControlLock, GTM_LOCKMODE_WRITE); + /* + * We cannot accept the next SIGUSR1 until all the reconnect is finished. + */ + ReadyToReconnect = FALSE; + for (ii = 0; ii < GTMProxyWorkerThreads; ii++) { /* @@ -409,7 +435,12 @@ GTMProxy_SigleHandler(int signal) } elog(LOG, "SIGUSR2 issued to all the worker threads."); PG_SETMASK(&UnBlockSig); - return; + /* + * Note that during connection handling with backends, signals are blocked + * so it is safe to longjump here. + */ + siglongjmp(mainThreadSIGUSR1_buf, 1); + case SIGUSR2: /* Reconnect from the main thread */ /* * Main thread has nothing to do twith this signal and should not receive this. @@ -684,11 +715,6 @@ main(int argc, char *argv[]) } } - /* - * Now all the worker threads are ready and the proxy can accept SIGUSR1 to reconnect - */ - - ReadyToReconnect = TRUE; /* * Accept any new connections. Add for each incoming connection to one of @@ -753,11 +779,40 @@ ServerLoop(void) nSockets = initMasks(&readmask); + for (;;) { fd_set rmask; int selres; + if (sigsetjmp(mainThreadSIGUSR1_buf, 1) != 0) + { + /* + * Reconnect! + * We use RegisterProxy() call. Before this, we change connection informatio + * of GTM to the new one. + * Because this is done while ReconnectControlLock is acquired, + * worker threads can use this change and they don't have to worry about + * new connection point. + * + * Because we leave the old socket as is, there could be some waste of + * the resource but this may not happen so many times. + */ + free(GTMServerHost); + GTMServerHost = NewGTMServerHost; + GTMServerPortNumber = NewGTMServerPortNumber; + RegisterProxy(); + /* + * If it is done, then release the lock for worker threads. + */ + GTM_RWLockRelease(&ReconnectControlLock); + } + /* + * We should delay the point to accept reconnect until here because + * longjmp buffer has not been prepared. + */ + ReadyToReconnect = TRUE; + /* * Wait for a connection request to arrive. * @@ -1110,7 +1165,7 @@ GTMProxy_ThreadMain(void *argp) * Each SIGUSR2 should return here and please note that from the the beginning * of the outer loop, longjmp is disabled and signal handler will simply return * so that we don't have to be botherd with the memory context. We should be - * sure to be in MemoryContext where longjmp() is issued. + * sure to be in MemoryContext where siglongjmp() is issued. */ setjmp_again: if (sigsetjmp(GetMyThreadInfo->longjmp_env, 1) == 0) @@ -1122,6 +1177,10 @@ GTMProxy_ThreadMain(void *argp) /* * SIGUSR2 is detected and jumped here */ + /* + * First of all, we should acquire reconnect control lock in READ mode. + */ + GTM_RWLockAcquire(&ReconnectControlLock, GTM_LOCKMODE_READ); PG_SETMASK(&UnBlockSig); /* * Disconnect the current connection and re-connect to the new GTM @@ -1152,6 +1211,10 @@ GTMProxy_ThreadMain(void *argp) } gtm_list_free_deep(thrinfo->thr_processed_commands); thrinfo->thr_processed_commands = gtm_NIL; + /* + * Release the reconnect control lock + */ + GTM_RWLockRelease(&ReconnectControlLock); goto setjmp_again; /* Get ready for another SIGUSR2 */ } ----------------------------------------------------------------------- Summary of changes: src/gtm/proxy/proxy_main.c | 77 ++++++++++++++++++++++++++++++++++++++++---- 1 files changed, 70 insertions(+), 7 deletions(-) hooks/post-receive -- Postgres-XC |
From: Michael P. <mic...@us...> - 2011-05-25 08:19:25
|
Project "Postgres-XC". The branch, PGXC-TrialMaster has been updated via 47e7f5141b5af7e17a3838d10a1758c945d1fd67 (commit) from c51655e2648efaaef0b9e73a11b3af56aef0c031 (commit) - Log ----------------------------------------------------------------- commit 47e7f5141b5af7e17a3838d10a1758c945d1fd67 Author: Michael P <mic...@us...> Date: Wed May 25 17:16:12 2011 +0900 Addition of GIT files to XC branch gitignore files and README for git were missing. diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..988ef11 --- /dev/null +++ b/.gitignore @@ -0,0 +1,24 @@ +# Global excludes across all subdirectories +*.o +*.so +*.so.[0-9] +*.so.[0-9].[0-9] +*.sl +*.sl.[0-9] +*.sl.[0-9].[0-9] +*.dylib +*.dll +*.a +*.mo +objfiles.txt +.deps/ + +# Local excludes in root directory +/GNUmakefile +/config.log +/config.status +*.orig +*.BACKUP.* +*.LOCAL.* +*.REMOTE.* + diff --git a/README.git b/README.git new file mode 100644 index 0000000..83b9a8c --- /dev/null +++ b/README.git @@ -0,0 +1,14 @@ +(This file does not appear in release tarballs.) + +In a release or snapshot tarball of PostgreSQL, documentation files named +INSTALL and HISTORY will appear in this directory. However, these files are +not stored in git and so will not be present if you are using a git checkout. +If you are using git, you can view the most recent install instructions at: + http://developer.postgresql.org/docs/postgres/installation.html +and the current release notes at: + http://developer.postgresql.org/docs/postgres/release.html + +Users compiling from git will also need compatible versions of Bison, Flex, +and Perl, as discussed in the install documentation. These programs are not +needed when using a tarball, since the files they are needed to build are +already present in the tarball. (On Windows, however, you need Perl anyway.) diff --git a/contrib/adminpack/.gitignore b/contrib/adminpack/.gitignore new file mode 100644 index 0000000..ea9a442 --- /dev/null +++ b/contrib/adminpack/.gitignore @@ -0,0 +1 @@ +/adminpack.sql diff --git a/contrib/btree_gin/.gitignore b/contrib/btree_gin/.gitignore new file mode 100644 index 0000000..7cebcf0 --- /dev/null +++ b/contrib/btree_gin/.gitignore @@ -0,0 +1,3 @@ +/btree_gin.sql +# Generated subdirectories +/results/ diff --git a/contrib/btree_gist/.gitignore b/contrib/btree_gist/.gitignore new file mode 100644 index 0000000..46318ea --- /dev/null +++ b/contrib/btree_gist/.gitignore @@ -0,0 +1,3 @@ +/btree_gist.sql +# Generated subdirectories +/results/ diff --git a/contrib/chkpass/.gitignore b/contrib/chkpass/.gitignore new file mode 100644 index 0000000..9029d66 --- /dev/null +++ b/contrib/chkpass/.gitignore @@ -0,0 +1 @@ +/chkpass.sql diff --git a/contrib/citext/.gitignore b/contrib/citext/.gitignore new file mode 100644 index 0000000..e626817 --- /dev/null +++ b/contrib/citext/.gitignore @@ -0,0 +1,3 @@ +/citext.sql +# Generated subdirectories +/results/ diff --git a/contrib/cube/.gitignore b/contrib/cube/.gitignore new file mode 100644 index 0000000..9f60da5 --- /dev/null +++ b/contrib/cube/.gitignore @@ -0,0 +1,5 @@ +/cubeparse.c +/cubescan.c +/cube.sql +# Generated subdirectories +/results/ diff --git a/contrib/dblink/.gitignore b/contrib/dblink/.gitignore new file mode 100644 index 0000000..fb7e872 --- /dev/null +++ b/contrib/dblink/.gitignore @@ -0,0 +1,3 @@ +/dblink.sql +# Generated subdirectories +/results/ diff --git a/contrib/dict_int/.gitignore b/contrib/dict_int/.gitignore new file mode 100644 index 0000000..932dda6 --- /dev/null +++ b/contrib/dict_int/.gitignore @@ -0,0 +1,3 @@ +/dict_int.sql +# Generated subdirectories +/results/ diff --git a/contrib/dict_xsyn/.gitignore b/contrib/dict_xsyn/.gitignore new file mode 100644 index 0000000..0ebd61c --- /dev/null +++ b/contrib/dict_xsyn/.gitignore @@ -0,0 +1,3 @@ +/dict_xsyn.sql +# Generated subdirectories +/results/ diff --git a/contrib/earthdistance/.gitignore b/contrib/earthdistance/.gitignore new file mode 100644 index 0000000..366a0a3 --- /dev/null +++ b/contrib/earthdistance/.gitignore @@ -0,0 +1,3 @@ +/earthdistance.sql +# Generated subdirectories +/results/ diff --git a/contrib/fuzzystrmatch/.gitignore b/contrib/fuzzystrmatch/.gitignore new file mode 100644 index 0000000..f4962c6 --- /dev/null +++ b/contrib/fuzzystrmatch/.gitignore @@ -0,0 +1 @@ +/fuzzystrmatch.sql diff --git a/contrib/hstore/.gitignore b/contrib/hstore/.gitignore new file mode 100644 index 0000000..d7af953 --- /dev/null +++ b/contrib/hstore/.gitignore @@ -0,0 +1,3 @@ +/hstore.sql +# Generated subdirectories +/results/ diff --git a/contrib/intarray/.gitignore b/contrib/intarray/.gitignore new file mode 100644 index 0000000..761a9b2 --- /dev/null +++ b/contrib/intarray/.gitignore @@ -0,0 +1,3 @@ +/_int.sql +# Generated subdirectories +/results/ diff --git a/contrib/isn/.gitignore b/contrib/isn/.gitignore new file mode 100644 index 0000000..1df12e3 --- /dev/null +++ b/contrib/isn/.gitignore @@ -0,0 +1 @@ +/isn.sql diff --git a/contrib/lo/.gitignore b/contrib/lo/.gitignore new file mode 100644 index 0000000..979347b --- /dev/null +++ b/contrib/lo/.gitignore @@ -0,0 +1 @@ +/lo.sql diff --git a/contrib/ltree/.gitignore b/contrib/ltree/.gitignore new file mode 100644 index 0000000..49883e8 --- /dev/null +++ b/contrib/ltree/.gitignore @@ -0,0 +1,3 @@ +/ltree.sql +# Generated subdirectories +/results/ diff --git a/contrib/oid2name/.gitignore b/contrib/oid2name/.gitignore new file mode 100644 index 0000000..fdefde1 --- /dev/null +++ b/contrib/oid2name/.gitignore @@ -0,0 +1 @@ +/oid2name diff --git a/contrib/pageinspect/.gitignore b/contrib/pageinspect/.gitignore new file mode 100644 index 0000000..fad166a --- /dev/null +++ b/contrib/pageinspect/.gitignore @@ -0,0 +1 @@ +/pageinspect.sql diff --git a/contrib/pg_archivecleanup/.gitignore b/contrib/pg_archivecleanup/.gitignore new file mode 100644 index 0000000..8040890 --- /dev/null +++ b/contrib/pg_archivecleanup/.gitignore @@ -0,0 +1 @@ +/pg_archivecleanup diff --git a/contrib/pg_buffercache/.gitignore b/contrib/pg_buffercache/.gitignore new file mode 100644 index 0000000..fea8b0b --- /dev/null +++ b/contrib/pg_buffercache/.gitignore @@ -0,0 +1 @@ +/pg_buffercache.sql diff --git a/contrib/pg_freespacemap/.gitignore b/contrib/pg_freespacemap/.gitignore new file mode 100644 index 0000000..645433a --- /dev/null +++ b/contrib/pg_freespacemap/.gitignore @@ -0,0 +1 @@ +/pg_freespacemap.sql diff --git a/contrib/pg_standby/.gitignore b/contrib/pg_standby/.gitignore new file mode 100644 index 0000000..a401b08 --- /dev/null +++ b/contrib/pg_standby/.gitignore @@ -0,0 +1 @@ +/pg_standby diff --git a/contrib/pg_stat_statements/.gitignore b/contrib/pg_stat_statements/.gitignore new file mode 100644 index 0000000..2ca3f06 --- /dev/null +++ b/contrib/pg_stat_statements/.gitignore @@ -0,0 +1 @@ +/pg_stat_statements.sql diff --git a/contrib/pg_trgm/.gitignore b/contrib/pg_trgm/.gitignore new file mode 100644 index 0000000..9cda826 --- /dev/null +++ b/contrib/pg_trgm/.gitignore @@ -0,0 +1,3 @@ +/pg_trgm.sql +# Generated subdirectories +/results/ diff --git a/contrib/pg_upgrade/.gitignore b/contrib/pg_upgrade/.gitignore new file mode 100644 index 0000000..03ec737 --- /dev/null +++ b/contrib/pg_upgrade/.gitignore @@ -0,0 +1 @@ +/pg_upgrade diff --git a/contrib/pgbench/.gitignore b/contrib/pgbench/.gitignore new file mode 100644 index 0000000..489a2d6 --- /dev/null +++ b/contrib/pgbench/.gitignore @@ -0,0 +1 @@ +/pgbench diff --git a/contrib/pgcrypto/.gitignore b/contrib/pgcrypto/.gitignore new file mode 100644 index 0000000..07b24d9 --- /dev/null +++ b/contrib/pgcrypto/.gitignore @@ -0,0 +1,3 @@ +/pgcrypto.sql +# Generated subdirectories +/results/ diff --git a/contrib/pgrowlocks/.gitignore b/contrib/pgrowlocks/.gitignore new file mode 100644 index 0000000..b272928 --- /dev/null +++ b/contrib/pgrowlocks/.gitignore @@ -0,0 +1 @@ +/pgrowlocks.sql diff --git a/contrib/pgstattuple/.gitignore b/contrib/pgstattuple/.gitignore new file mode 100644 index 0000000..69b22b6 --- /dev/null +++ b/contrib/pgstattuple/.gitignore @@ -0,0 +1 @@ +/pgstattuple.sql diff --git a/contrib/seg/.gitignore b/contrib/seg/.gitignore new file mode 100644 index 0000000..a8973ff --- /dev/null +++ b/contrib/seg/.gitignore @@ -0,0 +1,5 @@ +/segparse.c +/segscan.c +/seg.sql +# Generated subdirectories +/results/ diff --git a/contrib/spi/.gitignore b/contrib/spi/.gitignore new file mode 100644 index 0000000..6c07a33 --- /dev/null +++ b/contrib/spi/.gitignore @@ -0,0 +1,5 @@ +/autoinc.sql +/insert_username.sql +/moddatetime.sql +/refint.sql +/timetravel.sql diff --git a/contrib/sslinfo/.gitignore b/contrib/sslinfo/.gitignore new file mode 100644 index 0000000..6ed45c8 --- /dev/null +++ b/contrib/sslinfo/.gitignore @@ -0,0 +1 @@ +/sslinfo.sql diff --git a/contrib/tablefunc/.gitignore b/contrib/tablefunc/.gitignore new file mode 100644 index 0000000..b286396 --- /dev/null +++ b/contrib/tablefunc/.gitignore @@ -0,0 +1,3 @@ +/tablefunc.sql +# Generated subdirectories +/results/ diff --git a/contrib/test_parser/.gitignore b/contrib/test_parser/.gitignore new file mode 100644 index 0000000..c07f518 --- /dev/null +++ b/contrib/test_parser/.gitignore @@ -0,0 +1,3 @@ +/test_parser.sql +# Generated subdirectories +/results/ diff --git a/contrib/tsearch2/.gitignore b/contrib/tsearch2/.gitignore new file mode 100644 index 0000000..1d34309 --- /dev/null +++ b/contrib/tsearch2/.gitignore @@ -0,0 +1,3 @@ +/tsearch2.sql +# Generated subdirectories +/results/ diff --git a/contrib/unaccent/.gitignore b/contrib/unaccent/.gitignore new file mode 100644 index 0000000..6d74a76 --- /dev/null +++ b/contrib/unaccent/.gitignore @@ -0,0 +1,3 @@ +/unaccent.sql +# Generated subdirectories +/results/ diff --git a/contrib/vacuumlo/.gitignore b/contrib/vacuumlo/.gitignore new file mode 100644 index 0000000..07f6ab4 --- /dev/null +++ b/contrib/vacuumlo/.gitignore @@ -0,0 +1 @@ +/vacuumlo diff --git a/contrib/xml2/.gitignore b/contrib/xml2/.gitignore new file mode 100644 index 0000000..5ef9dbf --- /dev/null +++ b/contrib/xml2/.gitignore @@ -0,0 +1,3 @@ +/pgxml.sql +# Generated subdirectories +/results/ diff --git a/doc/src/sgml/.gitignore b/doc/src/sgml/.gitignore new file mode 100644 index 0000000..62a38d2 --- /dev/null +++ b/doc/src/sgml/.gitignore @@ -0,0 +1,32 @@ +# Stuff shipped in tarballs +/html/ +/html-stamp +/man1/ +/man3/ +/man7/ +/man-stamp +# Other popular build targets +/HISTORY +/INSTALL +/regress_README +/postgres-US.pdf +/postgres-A4.pdf +/postgres.html +/postgres.txt +# GENERATED_SGML +/features-supported.sgml +/features-unsupported.sgml +/version.sgml +/bookindex.sgml +/HTML.index +# Assorted byproducts from building the above +/postgres.xml +/HISTORY.html +/INSTALL.html +/regress_README.html +/postgres-US.aux +/postgres-US.log +/postgres-US.out +/postgres-A4.aux +/postgres-A4.log +/postgres-A4.out diff --git a/src/.gitignore b/src/.gitignore new file mode 100644 index 0000000..a66aac0 --- /dev/null +++ b/src/.gitignore @@ -0,0 +1,3 @@ +/Makefile.global +/Makefile.port +/Makefile.custom diff --git a/src/backend/.gitignore b/src/backend/.gitignore new file mode 100644 index 0000000..566eb10 --- /dev/null +++ b/src/backend/.gitignore @@ -0,0 +1 @@ +/postgres diff --git a/src/backend/bootstrap/.gitignore b/src/backend/bootstrap/.gitignore new file mode 100644 index 0000000..1ffe8ca --- /dev/null +++ b/src/backend/bootstrap/.gitignore @@ -0,0 +1,2 @@ +/bootparse.c +/bootscanner.c diff --git a/src/backend/catalog/.gitignore b/src/backend/catalog/.gitignore new file mode 100644 index 0000000..557af3c --- /dev/null +++ b/src/backend/catalog/.gitignore @@ -0,0 +1,4 @@ +/postgres.bki +/postgres.description +/postgres.shdescription +/schemapg.h diff --git a/src/backend/parser/.gitignore b/src/backend/parser/.gitignore new file mode 100644 index 0000000..16ac68d --- /dev/null +++ b/src/backend/parser/.gitignore @@ -0,0 +1,3 @@ +/gram.h +/gram.c +/scan.c diff --git a/src/backend/port/.gitignore b/src/backend/port/.gitignore new file mode 100644 index 0000000..7d3ac4a --- /dev/null +++ b/src/backend/port/.gitignore @@ -0,0 +1,5 @@ +/dynloader.c +/pg_latch.c +/pg_sema.c +/pg_shmem.c +/tas.s diff --git a/src/backend/snowball/.gitignore b/src/backend/snowball/.gitignore new file mode 100644 index 0000000..ef03eb6 --- /dev/null +++ b/src/backend/snowball/.gitignore @@ -0,0 +1 @@ +/snowball_create.sql diff --git a/src/backend/utils/.gitignore b/src/backend/utils/.gitignore new file mode 100644 index 0000000..fd00851 --- /dev/null +++ b/src/backend/utils/.gitignore @@ -0,0 +1,3 @@ +/fmgrtab.c +/fmgroids.h +/probes.h diff --git a/src/backend/utils/mb/conversion_procs/.gitignore b/src/backend/utils/mb/conversion_procs/.gitignore new file mode 100644 index 0000000..3e30742 --- /dev/null +++ b/src/backend/utils/mb/conversion_procs/.gitignore @@ -0,0 +1 @@ +/conversion_create.sql diff --git a/src/backend/utils/misc/.gitignore b/src/backend/utils/misc/.gitignore new file mode 100644 index 0000000..495b1ae --- /dev/null +++ b/src/backend/utils/misc/.gitignore @@ -0,0 +1 @@ +/guc-file.c diff --git a/src/bin/initdb/.gitignore b/src/bin/initdb/.gitignore new file mode 100644 index 0000000..843eaf0 --- /dev/null +++ b/src/bin/initdb/.gitignore @@ -0,0 +1,4 @@ +/encnames.c +/pqsignal.c + +/initdb diff --git a/src/bin/pg_config/.gitignore b/src/bin/pg_config/.gitignore new file mode 100644 index 0000000..169bc76 --- /dev/null +++ b/src/bin/pg_config/.gitignore @@ -0,0 +1 @@ +/pg_config diff --git a/src/bin/pg_controldata/.gitignore b/src/bin/pg_controldata/.gitignore new file mode 100644 index 0000000..32ea401 --- /dev/null +++ b/src/bin/pg_controldata/.gitignore @@ -0,0 +1,3 @@ +/pg_crc.c + +/pg_controldata diff --git a/src/bin/pg_ctl/.gitignore b/src/bin/pg_ctl/.gitignore new file mode 100644 index 0000000..c90c103 --- /dev/null +++ b/src/bin/pg_ctl/.gitignore @@ -0,0 +1 @@ +/pg_ctl diff --git a/src/bin/pg_dump/.gitignore b/src/bin/pg_dump/.gitignore new file mode 100644 index 0000000..c2c8677 --- /dev/null +++ b/src/bin/pg_dump/.gitignore @@ -0,0 +1,5 @@ +/kwlookup.c + +/pg_dump +/pg_dumpall +/pg_restore diff --git a/src/bin/pg_resetxlog/.gitignore b/src/bin/pg_resetxlog/.gitignore new file mode 100644 index 0000000..5845909 --- /dev/null +++ b/src/bin/pg_resetxlog/.gitignore @@ -0,0 +1,3 @@ +/pg_crc.c + +/pg_resetxlog diff --git a/src/bin/psql/.gitignore b/src/bin/psql/.gitignore new file mode 100644 index 0000000..4fbec70 --- /dev/null +++ b/src/bin/psql/.gitignore @@ -0,0 +1,8 @@ +/psqlscan.c +/sql_help.h +/sql_help.c +/dumputils.c +/keywords.c +/kwlookup.c + +/psql diff --git a/src/bin/scripts/.gitignore b/src/bin/scripts/.gitignore new file mode 100644 index 0000000..e62f4b0 --- /dev/null +++ b/src/bin/scripts/.gitignore @@ -0,0 +1,15 @@ +/clusterdb +/createdb +/createlang +/createuser +/dropdb +/droplang +/dropuser +/reindexdb +/vacuumdb + +/dumputils.c +/keywords.c +/kwlookup.c +/mbprint.c +/print.c diff --git a/src/include/.gitignore b/src/include/.gitignore new file mode 100644 index 0000000..fa285a1 --- /dev/null +++ b/src/include/.gitignore @@ -0,0 +1,5 @@ +/stamp-h +/pg_config.h +/pg_config_os.h +/dynloader.h + diff --git a/src/include/catalog/.gitignore b/src/include/catalog/.gitignore new file mode 100644 index 0000000..650202e --- /dev/null +++ b/src/include/catalog/.gitignore @@ -0,0 +1 @@ +/schemapg.h diff --git a/src/include/parser/.gitignore b/src/include/parser/.gitignore new file mode 100644 index 0000000..19ea955 --- /dev/null +++ b/src/include/parser/.gitignore @@ -0,0 +1 @@ +/gram.h diff --git a/src/include/utils/.gitignore b/src/include/utils/.gitignore new file mode 100644 index 0000000..c7c402b --- /dev/null +++ b/src/include/utils/.gitignore @@ -0,0 +1,2 @@ +/fmgroids.h +/probes.h diff --git a/src/interfaces/ecpg/compatlib/.gitignore b/src/interfaces/ecpg/compatlib/.gitignore new file mode 100644 index 0000000..e4ba84a --- /dev/null +++ b/src/interfaces/ecpg/compatlib/.gitignore @@ -0,0 +1,4 @@ +/libecpg_compatdll.def +/libecpg_compatddll.def +/blibecpg_compatdll.def +/exports.list diff --git a/src/interfaces/ecpg/ecpglib/.gitignore b/src/interfaces/ecpg/ecpglib/.gitignore new file mode 100644 index 0000000..e6c60b1 --- /dev/null +++ b/src/interfaces/ecpg/ecpglib/.gitignore @@ -0,0 +1,9 @@ +/libecpgdll.def +/libecpgddll.def +/blibecpgdll.def +/exports.list + +/path.c +/pgstrcasecmp.c +/strlcpy.c +/thread.c diff --git a/src/interfaces/ecpg/include/.gitignore b/src/interfaces/ecpg/include/.gitignore new file mode 100644 index 0000000..608493d --- /dev/null +++ b/src/interfaces/ecpg/include/.gitignore @@ -0,0 +1,2 @@ +/ecpg_config.h +/stamp-h diff --git a/src/interfaces/ecpg/pgtypeslib/.gitignore b/src/interfaces/ecpg/pgtypeslib/.gitignore new file mode 100644 index 0000000..aa5bdb8 --- /dev/null +++ b/src/interfaces/ecpg/pgtypeslib/.gitignore @@ -0,0 +1,6 @@ +/libpgtypesdll.def +/libpgtypesddll.def +/blibpgtypesdll.def +/exports.list + +/pgstrcasecmp.c diff --git a/src/interfaces/ecpg/preproc/.gitignore b/src/interfaces/ecpg/preproc/.gitignore new file mode 100644 index 0000000..6fca9af --- /dev/null +++ b/src/interfaces/ecpg/preproc/.gitignore @@ -0,0 +1,7 @@ +/preproc.y +/preproc.c +/preproc.h +/pgc.c + +/ecpg +/kwlookup.c diff --git a/src/interfaces/ecpg/test/.gitignore b/src/interfaces/ecpg/test/.gitignore new file mode 100644 index 0000000..d60b197 --- /dev/null +++ b/src/interfaces/ecpg/test/.gitignore @@ -0,0 +1,5 @@ +/pg_regress +# Exclude subdirectories +/log/ +/results/ +/tmp_check/ diff --git a/src/interfaces/ecpg/test/compat_informix/.gitignore b/src/interfaces/ecpg/test/compat_informix/.gitignore new file mode 100644 index 0000000..f97706b --- /dev/null +++ b/src/interfaces/ecpg/test/compat_informix/.gitignore @@ -0,0 +1,18 @@ +/charfuncs +/charfuncs.c +/dec_test +/dec_test.c +/describe +/describe.c +/rfmtdate +/rfmtdate.c +/rfmtlong +/rfmtlong.c +/rnull +/rnull.c +/sqlda +/sqlda.c +/test_informix +/test_informix.c +/test_informix2 +/test_informix2.c diff --git a/src/interfaces/ecpg/test/connect/.gitignore b/src/interfaces/ecpg/test/connect/.gitignore new file mode 100644 index 0000000..e0639f3 --- /dev/null +++ b/src/interfaces/ecpg/test/connect/.gitignore @@ -0,0 +1,10 @@ +/test1 +/test1.c +/test2 +/test2.c +/test3 +/test3.c +/test4 +/test4.c +/test5 +/test5.c diff --git a/src/interfaces/ecpg/test/pgtypeslib/.gitignore b/src/interfaces/ecpg/test/pgtypeslib/.gitignore new file mode 100644 index 0000000..2987fef --- /dev/null +++ b/src/interfaces/ecpg/test/pgtypeslib/.gitignore @@ -0,0 +1,10 @@ +/dt_test +/dt_test.c +/dt_test2 +/dt_test2.c +/nan_test +/nan_test.c +/num_test +/num_test.c +/num_test2 +/num_test2.c diff --git a/src/interfaces/ecpg/test/preproc/.gitignore b/src/interfaces/ecpg/test/preproc/.gitignore new file mode 100644 index 0000000..f2b304b --- /dev/null +++ b/src/interfaces/ecpg/test/preproc/.gitignore @@ -0,0 +1,22 @@ +/array_of_struct +/array_of_struct.c +/autoprep +/autoprep.c +/comment +/comment.c +/cursor +/cursor.c +/define +/define.c +/init +/init.c +/outofscope +/outofscope.c +/strings +/strings.c +/type +/type.c +/variable +/variable.c +/whenever +/whenever.c diff --git a/src/interfaces/ecpg/test/sql/.gitignore b/src/interfaces/ecpg/test/sql/.gitignore new file mode 100644 index 0000000..cd6f342 --- /dev/null +++ b/src/interfaces/ecpg/test/sql/.gitignore @@ -0,0 +1,40 @@ +/array +/array.c +/binary +/binary.c +/code100 +/code100.c +/copystdout +/copystdout.c +/define +/define.c +/desc +/desc.c +/describe +/describe.c +/dynalloc +/dynalloc.c +/dynalloc2 +/dynalloc2.c +/dyntest +/dyntest.c +/execute +/execute.c +/fetch +/fetch.c +/func +/func.c +/indicators +/indicators.c +/insupd +/insupd.c +/oldexec +/oldexec.c +/parser +/parser.c +/quote +/quote.c +/show +/show.c +/sqlda +/sqlda.c diff --git a/src/interfaces/ecpg/test/thread/.gitignore b/src/interfaces/ecpg/test/thread/.gitignore new file mode 100644 index 0000000..12ff319 --- /dev/null +++ b/src/interfaces/ecpg/test/thread/.gitignore @@ -0,0 +1,10 @@ +/alloc +/alloc.c +/descriptor +/descriptor.c +/prep +/prep.c +/thread +/thread.c +/thread_implicit +/thread_implicit.c diff --git a/src/interfaces/libpq/.gitignore b/src/interfaces/libpq/.gitignore new file mode 100644 index 0000000..f086ec3 --- /dev/null +++ b/src/interfaces/libpq/.gitignore @@ -0,0 +1,17 @@ +/exports.list +/crypt.c +/getaddrinfo.c +/inet_aton.c +/noblock.c +/open.c +/pgstrcasecmp.c +/snprintf.c +/strerror.c +/strlcpy.c +/thread.c +/win32error.c +/pgsleep.c +/md5.c +/ip.c +/encnames.c +/wchar.c diff --git a/src/pl/plperl/.gitignore b/src/pl/plperl/.gitignore new file mode 100644 index 0000000..c04f42b --- /dev/null +++ b/src/pl/plperl/.gitignore @@ -0,0 +1,7 @@ +/SPI.c +/Util.c +/perlchunks.h +/plperl_opmask.h + +# Generated subdirectories +/results/ diff --git a/src/pl/plpgsql/src/.gitignore b/src/pl/plpgsql/src/.gitignore new file mode 100644 index 0000000..2eecb0f --- /dev/null +++ b/src/pl/plpgsql/src/.gitignore @@ -0,0 +1,2 @@ +/pl_gram.c +/pl_gram.h diff --git a/src/pl/plpython/.gitignore b/src/pl/plpython/.gitignore new file mode 100644 index 0000000..19b6c5b --- /dev/null +++ b/src/pl/plpython/.gitignore @@ -0,0 +1,2 @@ +# Generated subdirectories +/results/ diff --git a/src/pl/tcl/.gitignore b/src/pl/tcl/.gitignore new file mode 100644 index 0000000..19b6c5b --- /dev/null +++ b/src/pl/tcl/.gitignore @@ -0,0 +1,2 @@ +# Generated subdirectories +/results/ diff --git a/src/pl/tcl/modules/.gitignore b/src/pl/tcl/modules/.gitignore new file mode 100644 index 0000000..8958188 --- /dev/null +++ b/src/pl/tcl/modules/.gitignore @@ -0,0 +1,3 @@ +/pltcl_delmod +/pltcl_listmod +/pltcl_loadmod diff --git a/src/port/.gitignore b/src/port/.gitignore new file mode 100644 index 0000000..53a4032 --- /dev/null +++ b/src/port/.gitignore @@ -0,0 +1,3 @@ +/libpgport.a +/libpgport_srv.a +/pg_config_paths.h diff --git a/src/test/regress/.gitignore b/src/test/regress/.gitignore new file mode 100644 index 0000000..7573add --- /dev/null +++ b/src/test/regress/.gitignore @@ -0,0 +1,7 @@ +# Local binaries +/pg_regress + +# Generated subdirectories +/tmp_check/ +/results/ +/log/ diff --git a/src/test/regress/sql/.gitignore b/src/test/regress/sql/.gitignore new file mode 100644 index 0000000..0b7c2cf --- /dev/null +++ b/src/test/regress/sql/.gitignore @@ -0,0 +1,7 @@ +/constraints.sql +/copy.sql +/create_function_1.sql +/create_function_2.sql +/largeobject.sql +/misc.sql +/tablespace.sql diff --git a/src/timezone/.gitignore b/src/timezone/.gitignore new file mode 100644 index 0000000..f844c9f --- /dev/null +++ b/src/timezone/.gitignore @@ -0,0 +1 @@ +/zic ----------------------------------------------------------------------- Summary of changes: .gitignore | 24 ++++++++++++ README.git | 14 +++++++ contrib/adminpack/.gitignore | 1 + contrib/btree_gin/.gitignore | 3 + contrib/btree_gist/.gitignore | 3 + contrib/chkpass/.gitignore | 1 + contrib/citext/.gitignore | 3 + contrib/cube/.gitignore | 5 ++ contrib/dblink/.gitignore | 3 + contrib/dict_int/.gitignore | 3 + contrib/dict_xsyn/.gitignore | 3 + contrib/earthdistance/.gitignore | 3 + contrib/fuzzystrmatch/.gitignore | 1 + contrib/hstore/.gitignore | 3 + contrib/intarray/.gitignore | 3 + contrib/isn/.gitignore | 1 + contrib/lo/.gitignore | 1 + contrib/ltree/.gitignore | 3 + contrib/oid2name/.gitignore | 1 + contrib/pageinspect/.gitignore | 1 + contrib/pg_archivecleanup/.gitignore | 1 + contrib/pg_buffercache/.gitignore | 1 + contrib/pg_freespacemap/.gitignore | 1 + contrib/pg_standby/.gitignore | 1 + contrib/pg_stat_statements/.gitignore | 1 + contrib/pg_trgm/.gitignore | 3 + contrib/pg_upgrade/.gitignore | 1 + contrib/pgbench/.gitignore | 1 + contrib/pgcrypto/.gitignore | 3 + contrib/pgrowlocks/.gitignore | 1 + contrib/pgstattuple/.gitignore | 1 + contrib/seg/.gitignore | 5 ++ contrib/spi/.gitignore | 5 ++ contrib/sslinfo/.gitignore | 1 + contrib/tablefunc/.gitignore | 3 + contrib/test_parser/.gitignore | 3 + contrib/tsearch2/.gitignore | 3 + contrib/unaccent/.gitignore | 3 + contrib/vacuumlo/.gitignore | 1 + contrib/xml2/.gitignore | 3 + doc/src/sgml/.gitignore | 32 ++++++++++++++++ src/.gitignore | 3 + src/backend/.gitignore | 1 + src/backend/bootstrap/.gitignore | 2 + src/backend/catalog/.gitignore | 4 ++ src/backend/parser/.gitignore | 3 + src/backend/port/.gitignore | 5 ++ src/backend/snowball/.gitignore | 1 + src/backend/utils/.gitignore | 3 + src/backend/utils/mb/conversion_procs/.gitignore | 1 + src/backend/utils/misc/.gitignore | 1 + src/bin/initdb/.gitignore | 4 ++ src/bin/pg_config/.gitignore | 1 + src/bin/pg_controldata/.gitignore | 3 + src/bin/pg_ctl/.gitignore | 1 + src/bin/pg_dump/.gitignore | 5 ++ src/bin/pg_resetxlog/.gitignore | 3 + src/bin/psql/.gitignore | 8 ++++ src/bin/scripts/.gitignore | 15 +++++++ src/include/.gitignore | 5 ++ src/include/catalog/.gitignore | 1 + src/include/parser/.gitignore | 1 + src/include/utils/.gitignore | 2 + src/interfaces/ecpg/compatlib/.gitignore | 4 ++ src/interfaces/ecpg/ecpglib/.gitignore | 9 ++++ src/interfaces/ecpg/include/.gitignore | 2 + src/interfaces/ecpg/pgtypeslib/.gitignore | 6 +++ src/interfaces/ecpg/preproc/.gitignore | 7 +++ src/interfaces/ecpg/test/.gitignore | 5 ++ .../ecpg/test/compat_informix/.gitignore | 18 +++++++++ src/interfaces/ecpg/test/connect/.gitignore | 10 +++++ src/interfaces/ecpg/test/pgtypeslib/.gitignore | 10 +++++ src/interfaces/ecpg/test/preproc/.gitignore | 22 +++++++++++ src/interfaces/ecpg/test/sql/.gitignore | 40 ++++++++++++++++++++ src/interfaces/ecpg/test/thread/.gitignore | 10 +++++ src/interfaces/libpq/.gitignore | 17 ++++++++ src/pl/plperl/.gitignore | 7 +++ src/pl/plpgsql/src/.gitignore | 2 + src/pl/plpython/.gitignore | 2 + src/pl/tcl/.gitignore | 2 + src/pl/tcl/modules/.gitignore | 3 + src/port/.gitignore | 3 + src/test/regress/.gitignore | 7 +++ src/test/regress/sql/.gitignore | 7 +++ src/timezone/.gitignore | 1 + 85 files changed, 421 insertions(+), 0 deletions(-) create mode 100644 .gitignore create mode 100644 README.git create mode 100644 contrib/adminpack/.gitignore create mode 100644 contrib/btree_gin/.gitignore create mode 100644 contrib/btree_gist/.gitignore create mode 100644 contrib/chkpass/.gitignore create mode 100644 contrib/citext/.gitignore create mode 100644 contrib/cube/.gitignore create mode 100644 contrib/dblink/.gitignore create mode 100644 contrib/dict_int/.gitignore create mode 100644 contrib/dict_xsyn/.gitignore create mode 100644 contrib/earthdistance/.gitignore create mode 100644 contrib/fuzzystrmatch/.gitignore create mode 100644 contrib/hstore/.gitignore create mode 100644 contrib/intarray/.gitignore create mode 100644 contrib/isn/.gitignore create mode 100644 contrib/lo/.gitignore create mode 100644 contrib/ltree/.gitignore create mode 100644 contrib/oid2name/.gitignore create mode 100644 contrib/pageinspect/.gitignore create mode 100644 contrib/pg_archivecleanup/.gitignore create mode 100644 contrib/pg_buffercache/.gitignore create mode 100644 contrib/pg_freespacemap/.gitignore create mode 100644 contrib/pg_standby/.gitignore create mode 100644 contrib/pg_stat_statements/.gitignore create mode 100644 contrib/pg_trgm/.gitignore create mode 100644 contrib/pg_upgrade/.gitignore create mode 100644 contrib/pgbench/.gitignore create mode 100644 contrib/pgcrypto/.gitignore create mode 100644 contrib/pgrowlocks/.gitignore create mode 100644 contrib/pgstattuple/.gitignore create mode 100644 contrib/seg/.gitignore create mode 100644 contrib/spi/.gitignore create mode 100644 contrib/sslinfo/.gitignore create mode 100644 contrib/tablefunc/.gitignore create mode 100644 contrib/test_parser/.gitignore create mode 100644 contrib/tsearch2/.gitignore create mode 100644 contrib/unaccent/.gitignore create mode 100644 contrib/vacuumlo/.gitignore create mode 100644 contrib/xml2/.gitignore create mode 100644 doc/src/sgml/.gitignore create mode 100644 src/.gitignore create mode 100644 src/backend/.gitignore create mode 100644 src/backend/bootstrap/.gitignore create mode 100644 src/backend/catalog/.gitignore create mode 100644 src/backend/parser/.gitignore create mode 100644 src/backend/port/.gitignore create mode 100644 src/backend/snowball/.gitignore create mode 100644 src/backend/utils/.gitignore create mode 100644 src/backend/utils/mb/conversion_procs/.gitignore create mode 100644 src/backend/utils/misc/.gitignore create mode 100644 src/bin/initdb/.gitignore create mode 100644 src/bin/pg_config/.gitignore create mode 100644 src/bin/pg_controldata/.gitignore create mode 100644 src/bin/pg_ctl/.gitignore create mode 100644 src/bin/pg_dump/.gitignore create mode 100644 src/bin/pg_resetxlog/.gitignore create mode 100644 src/bin/psql/.gitignore create mode 100644 src/bin/scripts/.gitignore create mode 100644 src/include/.gitignore create mode 100644 src/include/catalog/.gitignore create mode 100644 src/include/parser/.gitignore create mode 100644 src/include/utils/.gitignore create mode 100644 src/interfaces/ecpg/compatlib/.gitignore create mode 100644 src/interfaces/ecpg/ecpglib/.gitignore create mode 100644 src/interfaces/ecpg/include/.gitignore create mode 100644 src/interfaces/ecpg/pgtypeslib/.gitignore create mode 100644 src/interfaces/ecpg/preproc/.gitignore create mode 100644 src/interfaces/ecpg/test/.gitignore create mode 100644 src/interfaces/ecpg/test/compat_informix/.gitignore create mode 100644 src/interfaces/ecpg/test/connect/.gitignore create mode 100644 src/interfaces/ecpg/test/pgtypeslib/.gitignore create mode 100644 src/interfaces/ecpg/test/preproc/.gitignore create mode 100644 src/interfaces/ecpg/test/sql/.gitignore create mode 100644 src/interfaces/ecpg/test/thread/.gitignore create mode 100644 src/interfaces/libpq/.gitignore create mode 100644 src/pl/plperl/.gitignore create mode 100644 src/pl/plpgsql/src/.gitignore create mode 100644 src/pl/plpython/.gitignore create mode 100644 src/pl/tcl/.gitignore create mode 100644 src/pl/tcl/modules/.gitignore create mode 100644 src/port/.gitignore create mode 100644 src/test/regress/.gitignore create mode 100644 src/test/regress/sql/.gitignore create mode 100644 src/timezone/.gitignore hooks/post-receive -- Postgres-XC |
From: Michael P. <mic...@us...> - 2011-05-25 08:12:15
|
Project "Postgres-XC". The branch, PGXC-TrialMaster has been updated via c51655e2648efaaef0b9e73a11b3af56aef0c031 (commit) from 0adc920245555f2087d6c4a022d8f0cc13d1e551 (commit) - Log ----------------------------------------------------------------- commit c51655e2648efaaef0b9e73a11b3af56aef0c031 Author: Michael P <mic...@us...> Date: Wed May 25 17:09:45 2011 +0900 Fix for regression tests cluster, dependency, rowtypes and with diff --git a/src/test/regress/expected/cluster_1.out b/src/test/regress/expected/cluster_1.out index 71eca00..30e2a96 100644 --- a/src/test/regress/expected/cluster_1.out +++ b/src/test/regress/expected/cluster_1.out @@ -380,9 +380,9 @@ LINE 1: insert into clstr_temp values (2, 'two'), (1, 'one'); ^ cluster clstr_temp using clstr_temp_pkey; ERROR: relation "clstr_temp" does not exist -select * from clstr_temp ORDER BY 1; +select * from clstr_temp; ERROR: relation "clstr_temp" does not exist -LINE 1: select * from clstr_temp ORDER BY 1; +LINE 1: select * from clstr_temp; ^ drop table clstr_temp; ERROR: table "clstr_temp" does not exist diff --git a/src/test/regress/expected/dependency_1.out b/src/test/regress/expected/dependency_1.out index 827f442..f7ca7f6 100644 --- a/src/test/regress/expected/dependency_1.out +++ b/src/test/regress/expected/dependency_1.out @@ -116,7 +116,7 @@ REASSIGN OWNED BY regression_user1 TO regression_user2; -- doesn't work: grant still exists DROP USER regression_user1; ERROR: role "regression_user1" cannot be dropped because some objects depend on it -DETAIL: privileges for table deptest1 +DETAIL: access to table deptest1 DROP OWNED BY regression_user1; DROP USER regression_user1; \set VERBOSITY terse diff --git a/src/test/regress/expected/rowtypes_1.out b/src/test/regress/expected/rowtypes_1.out index a6e532a..514adfe 100644 --- a/src/test/regress/expected/rowtypes_1.out +++ b/src/test/regress/expected/rowtypes_1.out @@ -283,41 +283,3 @@ select row(1,1.1) = any (array[ row(7,7.7), row(1,1.0), row(0,0.0) ]); f (1 row) --- --- Test case derived from bug #5716: check multiple uses of a rowtype result --- -BEGIN; -CREATE TABLE price ( - id SERIAL PRIMARY KEY, - active BOOLEAN NOT NULL, - price NUMERIC -); -ERROR: Postgres-XC does not support SERIAL yet -DETAIL: The feature is not currently supported -CREATE TYPE price_input AS ( - id INTEGER, - price NUMERIC -); -ERROR: current transaction is aborted, commands ignored until end of transaction block -CREATE TYPE price_key AS ( - id INTEGER -); -ERROR: current transaction is aborted, commands ignored until end of transaction block -CREATE FUNCTION price_key_from_table(price) RETURNS price_key AS $$ - SELECT $1.id -$$ LANGUAGE SQL; -ERROR: current transaction is aborted, commands ignored until end of transaction block -CREATE FUNCTION price_key_from_input(price_input) RETURNS price_key AS $$ - SELECT $1.id -$$ LANGUAGE SQL; -ERROR: current transaction is aborted, commands ignored until end of transaction block -insert into price values (1,false,42), (10,false,100), (11,true,17.99); -ERROR: current transaction is aborted, commands ignored until end of transaction block -UPDATE price - SET active = true, price = input_prices.price - FROM unnest(ARRAY[(10, 123.00), (11, 99.99)]::price_input[]) input_prices - WHERE price_key_from_table(price.*) = price_key_from_input(input_prices.*); -ERROR: current transaction is aborted, commands ignored until end of transaction block -select * from price; -ERROR: current transaction is aborted, commands ignored until end of transaction block -rollback; diff --git a/src/test/regress/sql/with.sql b/src/test/regress/sql/with.sql index c6bb359..687c035 100644 --- a/src/test/regress/sql/with.sql +++ b/src/test/regress/sql/with.sql @@ -480,11 +480,11 @@ with cte(foo) as ( select 42 ) select * from ((select foo from cte)) q; -- signaling still works properly after fixing this bug) select ( with cte(foo) as ( values(f1) ) select (select foo from cte) ) -from int4_tbl; +from int4_tbl order by 1; select ( with cte(foo) as ( values(f1) ) values((select foo from cte)) ) -from int4_tbl; +from int4_tbl order by 1; -- -- test for nested-recursive-WITH bug @@ -499,4 +499,4 @@ WITH RECURSIVE t(j) AS ( UNION ALL SELECT j+1 FROM t WHERE j < 10 ) -SELECT * FROM t; +SELECT * FROM t order by 1; ----------------------------------------------------------------------- Summary of changes: src/test/regress/expected/cluster_1.out | 4 +- src/test/regress/expected/dependency_1.out | 2 +- src/test/regress/expected/rowtypes_1.out | 38 ---------------------------- src/test/regress/sql/with.sql | 6 ++-- 4 files changed, 6 insertions(+), 44 deletions(-) hooks/post-receive -- Postgres-XC |
From: Michael P. <mic...@us...> - 2011-05-25 07:41:53
|
Project "Postgres-XC". The branch, PGXC-TrialMaster has been updated via 0adc920245555f2087d6c4a022d8f0cc13d1e551 (commit) from 3b98a30005200738184510c9cc44a8d66c695815 (commit) - Log ----------------------------------------------------------------- commit 0adc920245555f2087d6c4a022d8f0cc13d1e551 Author: Michael P <mic...@us...> Date: Wed May 25 16:37:54 2011 +0900 Fix for regression tests delete, opr_sanity, typed_table and select diff --git a/src/test/regress/expected/delete_1.out b/src/test/regress/expected/delete_1.out index ec6d4a4..e412aa2 100644 --- a/src/test/regress/expected/delete_1.out +++ b/src/test/regress/expected/delete_1.out @@ -27,11 +27,9 @@ DELETE FROM delete_test dt WHERE delete_test.a > 25; ERROR: relation "delete_test" does not exist LINE 1: DELETE FROM delete_test dt WHERE delete_test.a > 25; ^ -SELECT * FROM delete_test; +SELECT * FROM delete_test ORDER BY id; ERROR: relation "delete_test" does not exist -LINE 1: SELECT * FROM delete_test; +LINE 1: SELECT * FROM delete_test ORDER BY id; ^ -DROP TABLE delete_test ORDER BY id; -ERROR: syntax error at or near "ORDER" -LINE 1: DROP TABLE delete_test ORDER BY id; - ^ +DROP TABLE delete_test; +ERROR: table "delete_test" does not exist diff --git a/src/test/regress/expected/opr_sanity_1.out b/src/test/regress/expected/opr_sanity_1.out index bf70944..1f06399 100644 --- a/src/test/regress/expected/opr_sanity_1.out +++ b/src/test/regress/expected/opr_sanity_1.out @@ -665,8 +665,9 @@ WHERE p.proisagg AND oid | proname ------+------------ 2335 | array_agg + 3537 | string_agg 3538 | string_agg -(2 rows) +(3 rows) -- If there is no finalfn then the output type must be the transtype. SELECT a.aggfnoid::oid, p.proname @@ -775,31 +776,6 @@ ORDER BY 1, 2; min | < | 1 (2 rows) --- Check that there are not aggregates with the same name and different --- numbers of arguments. While not technically wrong, we have a project policy --- to avoid this because it opens the door for confusion in connection with --- ORDER BY: novices frequently put the ORDER BY in the wrong place. --- See the fate of the single-argument form of string_agg() for history. --- The only aggregates that should show up here are count(x) and count(*). -SELECT p1.oid::regprocedure, p2.oid::regprocedure -FROM pg_proc AS p1, pg_proc AS p2 -WHERE p1.oid < p2.oid AND p1.proname = p2.proname AND - p1.proisagg AND p2.proisagg AND - array_dims(p1.proargtypes) != array_dims(p2.proargtypes) -ORDER BY 1; - oid | oid ---------------+--------- - count("any") | count() -(1 row) - --- For the same reason, aggregates with default arguments are no good. -SELECT oid, proname -FROM pg_proc AS p -WHERE proisagg AND proargdefaults IS NOT NULL; - oid | proname ------+--------- -(0 rows) - -- **************** pg_opfamily **************** -- Look for illegal values in pg_opfamily fields SELECT p1.oid diff --git a/src/test/regress/expected/typed_table_1.out b/src/test/regress/expected/typed_table_1.out index 0f6a208..b9b0557 100644 --- a/src/test/regress/expected/typed_table_1.out +++ b/src/test/regress/expected/typed_table_1.out @@ -25,18 +25,12 @@ SELECT * FROM get_all_persons(); ----+------ (0 rows) --- certain ALTER TABLE operations on typed tables are not allowed ALTER TABLE persons ADD COLUMN comment text; ERROR: cannot add column to typed table ALTER TABLE persons DROP COLUMN name; ERROR: cannot drop column from typed table ALTER TABLE persons RENAME COLUMN id TO num; ERROR: cannot rename column of typed table -ALTER TABLE persons ALTER COLUMN name TYPE varchar; -ERROR: cannot alter column type of typed table -CREATE TABLE stuff (id int); -ALTER TABLE persons INHERIT stuff; -ERROR: cannot change inheritance of typed table CREATE TABLE personsx OF person_type (myname WITH OPTIONS NOT NULL); -- error ERROR: column "myname" does not exist CREATE TABLE persons2 OF person_type ( @@ -77,4 +71,3 @@ NOTICE: drop cascades to 3 other objects DETAIL: drop cascades to table persons drop cascades to function get_all_persons() drop cascades to table persons3 -DROP TABLE stuff; diff --git a/src/test/regress/sql/select.sql b/src/test/regress/sql/select.sql index a95329d..8dd35d6 100644 --- a/src/test/regress/sql/select.sql +++ b/src/test/regress/sql/select.sql @@ -211,4 +211,4 @@ drop function sillysrf(int); -- X = X isn't a no-op, it's effectively X IS NOT NULL assuming = is strict -- (see bug #5084) select * from (values (2),(null),(1)) v(k) where k = k order by k; -select * from (values (2),(null),(1)) v(k) where k = k; +select * from (values (2),(null),(1)) v(k) where k = k order by k desc; ----------------------------------------------------------------------- Summary of changes: src/test/regress/expected/delete_1.out | 10 +++----- src/test/regress/expected/opr_sanity_1.out | 28 +------------------------- src/test/regress/expected/typed_table_1.out | 7 ------ src/test/regress/sql/select.sql | 2 +- 4 files changed, 7 insertions(+), 40 deletions(-) hooks/post-receive -- Postgres-XC |
From: Michael P. <mic...@us...> - 2011-05-25 07:11:59
|
Project "Postgres-XC". The branch, PGXC-TrialMaster has been updated via 3b98a30005200738184510c9cc44a8d66c695815 (commit) from 3c71bd016e200f1f44e98762eeca0184b1385294 (commit) - Log ----------------------------------------------------------------- commit 3b98a30005200738184510c9cc44a8d66c695815 Author: Michael P <mic...@us...> Date: Wed May 25 16:08:33 2011 +0900 Fix for regression tests float4, float8, geometry, point ORDER BY extensions were missing after merge with postgres master branch diff --git a/src/test/regress/expected/float4.out b/src/test/regress/expected/float4.out index db26388..a4111ec 100644 --- a/src/test/regress/expected/float4.out +++ b/src/test/regress/expected/float4.out @@ -125,7 +125,7 @@ SELECT 'nan'::numeric::float4; NaN (1 row) -SELECT '' AS five, * FROM FLOAT4_TBL; +SELECT '' AS five, * FROM FLOAT4_TBL ORDER BY f1; five | f1 ------+------------- | -34.84 diff --git a/src/test/regress/expected/float4_1.out b/src/test/regress/expected/float4_1.out index 432d159..09ec318 100644 --- a/src/test/regress/expected/float4_1.out +++ b/src/test/regress/expected/float4_1.out @@ -125,7 +125,7 @@ SELECT 'nan'::numeric::float4; NaN (1 row) -SELECT '' AS five, * FROM FLOAT4_TBL; +SELECT '' AS five, * FROM FLOAT4_TBL ORDER BY f1; five | f1 ------+------------- | 1004.3 diff --git a/src/test/regress/expected/float8.out b/src/test/regress/expected/float8.out index 3dbfaa3..1f79f66 100644 --- a/src/test/regress/expected/float8.out +++ b/src/test/regress/expected/float8.out @@ -125,7 +125,7 @@ SELECT 'nan'::numeric::float8; NaN (1 row) -SELECT '' AS five, * FROM FLOAT8_TBL; +SELECT '' AS five, * FROM FLOAT8_TBL ORDER BY f1; five | f1 ------+---------------------- | -34.84 diff --git a/src/test/regress/expected/geometry.out b/src/test/regress/expected/geometry.out index c19b619..966c739 100644 --- a/src/test/regress/expected/geometry.out +++ b/src/test/regress/expected/geometry.out @@ -260,24 +260,24 @@ SELECT '' AS twenty, b.f1 / p.f1 AS rotation twenty | rotation --------+---------------------------------------------------------------------- | (0,-0),(-0.2,-0.2) + | (-0,0.0828402366864),(-0.201183431953,0) | (0.08,-0),(0,-0.56) | (0.0651176557644,0),(0,-0.0483449262493) - | (-0,0.0828402366864),(-0.201183431953,0) | (0.2,0),(0,0) | (-0.1,-0.1),(-0.3,-0.3) + | (-0.100591715976,0.12426035503),(-0.301775147929,0.0414201183432) | (0.12,-0.28),(0.04,-0.84) | (0.0976764836466,-0.0241724631247),(0.0325588278822,-0.072517389374) - | (-0.100591715976,0.12426035503),(-0.301775147929,0.0414201183432) | (0.3,0),(0.1,0) | (-0.25,-0.25),(-0.25,-0.35) + | (-0.251479289941,0.103550295858),(-0.322485207101,0.0739644970414) | (0.26,-0.7),(0.1,-0.82) | (0.109762715209,-0.0562379754329),(0.0813970697055,-0.0604311578117) - | (-0.251479289941,0.103550295858),(-0.322485207101,0.0739644970414) | (0.3,0.05),(0.25,0) | (-0.3,-0.3),(-0.3,-0.3) + | (-0.301775147929,0.12426035503),(-0.301775147929,0.12426035503) | (0.12,-0.84),(0.12,-0.84) | (0.0976764836466,-0.072517389374),(0.0976764836466,-0.072517389374) - | (-0.301775147929,0.12426035503),(-0.301775147929,0.12426035503) | (0.3,0),(0.3,0) (20 rows) diff --git a/src/test/regress/expected/point.out b/src/test/regress/expected/point.out index ddc0735..84da956 100644 --- a/src/test/regress/expected/point.out +++ b/src/test/regress/expected/point.out @@ -91,7 +91,7 @@ SELECT '' AS three, p.* FROM POINT_TBL p (3 rows) SELECT '' AS three, p.* FROM POINT_TBL p - WHERE not p.f1 <@ box '(0,0,100,100)'; + WHERE not p.f1 <@ box '(0,0,100,100)' ORDER BY p.f1[0], p.f1[1]; three | f1 -------+---------- | (-10,0) diff --git a/src/test/regress/sql/point.sql b/src/test/regress/sql/point.sql index 50d55c8..faa6b9e 100644 --- a/src/test/regress/sql/point.sql +++ b/src/test/regress/sql/point.sql @@ -48,7 +48,7 @@ SELECT '' AS three, p.* FROM POINT_TBL p WHERE p.f1 <@ box '(0,0,100,100)' ORDER BY p.f1[0], p.f1[1]; SELECT '' AS three, p.* FROM POINT_TBL p - WHERE box '(0,0,100,100)' @> p.f1 ORDER BY p.f1[0], p.f1[1]; + WHERE box '(0,0,100,100)' @> p.f1; SELECT '' AS three, p.* FROM POINT_TBL p WHERE not p.f1 <@ box '(0,0,100,100)'; ----------------------------------------------------------------------- Summary of changes: src/test/regress/expected/float4.out | 2 +- src/test/regress/expected/float4_1.out | 2 +- src/test/regress/expected/float8.out | 2 +- src/test/regress/expected/geometry.out | 8 ++++---- src/test/regress/expected/point.out | 2 +- src/test/regress/sql/point.sql | 2 +- 6 files changed, 9 insertions(+), 9 deletions(-) hooks/post-receive -- Postgres-XC |
From: Koichi S. <koi...@us...> - 2011-05-25 07:04:39
|
Project "Postgres-XC". The branch, documentation has been updated via 4b5e7dbd9ecd43597fb63c14a3f0c830d266ace2 (commit) from 1dc1b90e434dbe56438dec9dfa8887de1767d457 (commit) - Log ----------------------------------------------------------------- commit 4b5e7dbd9ecd43597fb63c14a3f0c830d266ace2 Author: Koichi Suzuki <koi...@gm...> Date: Wed May 25 16:02:02 2011 +0900 This commit updates Postgres-XC documentation on manual build and installation. Updated files are: modified: installation.sgmlin modified: pgnotice.sgmlin modified: postgres.sgmlin Please note that this commit does not include XCM, xc_watcher and pgxc_config. They will be described in server tools and HA sections. diff --git a/doc/src/sgml/installation.sgmlin b/doc/src/sgml/installation.sgmlin index a28e6b7..7c21d30 100644 --- a/doc/src/sgml/installation.sgmlin +++ b/doc/src/sgml/installation.sgmlin @@ -1,16 +1,22 @@ <!-- $PostgreSQL: pgsql/doc/src/sgml/installation.sgml,v 1.348.2.5 2010/09/09 17:19:46 tgl Exp $ --> <chapter id="installation"> + <!## PG> <title><![%standalone-include[<productname>PostgreSQL</>]]> Installation from Source Code</title> + <!## end> + <!## XC> + <title><![%standalone-include[<productname>Postgres-XC</>]]> + Installation from Source Code</title> + <!## end> <indexterm zone="installation"> <primary>installation</primary> </indexterm> - &pgnotice; <para> + <!## PG> This <![%standalone-include;[document]]> <![%standalone-ignore;[chapter]]> describes the installation of <productname>PostgreSQL</productname> using the source code @@ -19,14 +25,24 @@ <![%standalone-include;[document]]> <![%standalone-ignore;[chapter]]> and read the packager's instructions instead.) + <!## end> + <!## XC> + This <![%standalone-include;[document]]> + <![%standalone-ignore;[chapter]]> describes the installation of + <productname>Postgres-XC</productname> using the source code + distribution. (If you are installing a pre-packaged distribution, + such as an RPM or Debian package, ignore this + <![%standalone-include;[document]]> + <![%standalone-ignore;[chapter]]> + and read the packager's instructions instead.) + <!## end> </para> <sect1 id="install-short"> <title>Short Version</title> - &pgnotice; - <para> +<!## PG> <synopsis> ./configure gmake @@ -41,6 +57,25 @@ su - postgres /usr/local/pgsql/bin/createdb test /usr/local/pgsql/bin/psql test </synopsis> +<!## end> +<!## XC> +<synopsis> +./configure +gmake +su +gmake install +</synopsis> + +</para> +<para> +Because <productname>Postgres-XC</> is composed of multiple databsae +component which runs on different servers, the stop after this has to +be done in each server involved manually. For more automated +installation step, please see the section on <filename>pgxc_config</>. +For manual setup, please see the following sections. +</para> +<para> +<!## end> The long version is the rest of this <![%standalone-include;[document.]]> <![%standalone-ignore;[chapter.]]> @@ -50,10 +85,9 @@ su - postgres <sect1 id="install-requirements"> <title>Requirements</title> - &pgnotice; - <para> + <!## PG> In general, a modern Unix-compatible platform should be able to run <productname>PostgreSQL</>. The platforms that had received specific testing at the @@ -61,11 +95,24 @@ su - postgres below. In the <filename>doc</> subdirectory of the distribution there are several platform-specific <acronym>FAQ</> documents you might wish to consult if you are having trouble. + <!## end> + <!## XC> + In general, a Linux platform based upon 64bit Intel CPU should be able to run + <productname>Postgres-XC</>. + At the time of release, it runs on 64bit CentOS-5.4 or later + and 64bit RHEL 5 or later. + <!## end> </para> <para> + <!## PG> The following software packages are required for building <productname>PostgreSQL</>: + <!## end> + <!## XC> + The following software packages are required for building + <productname>Postgres-XC</>: + <!## end> <itemizedlist> <listitem> @@ -116,6 +163,7 @@ su - postgres <primary>libedit</primary> </indexterm> + <!## PG> The <acronym>GNU</> <productname>Readline</> library is used by default. It allows <application>psql</application> (the PostgreSQL command line SQL interpreter) to remember each @@ -134,6 +182,27 @@ su - postgres Linux distribution, be aware that you need both the <literal>readline</> and <literal>readline-devel</> packages, if those are separate in your distribution. + <!## end> + <!## XC> + The <acronym>GNU</> <productname>Readline</> library is used by + default. It allows <application>psql</application> (the + Postgres-XC command line SQL interpreter) to remember each + command you type, and allows you to use arrow keys to recall and + edit previous commands. This is very helpful and is strongly + recommended. If you don't want to use it then you must specify + the <option>--without-readline</option> option to + <filename>configure</>. As an alternative, you can often use the + BSD-licensed <filename>libedit</filename> library, originally + developed on <productname>NetBSD</productname>. The + <filename>libedit</filename> library is + GNU <productname>Readline</productname>-compatible and is used if + <filename>libreadline</filename> is not found, or if + <option>--with-libedit-preferred</option> is used as an + option to <filename>configure</>. If you are using a package-based + Linux distribution, be aware that you need both the + <literal>readline</> and <literal>readline-devel</> packages, if + those are separate in your distribution. + <!## end> </para> </listitem> @@ -181,9 +250,16 @@ su - postgres </para> <para> + <!## PG> If you don't have the shared library but you need one, a message like this will appear during the <productname>PostgreSQL</> build to point out this fact: + <!## end> + <!## XC> + If you don't have the shared library but you need one, a message + like this will appear during the <productname>Postgres-XC</> + build to point out this fact: + <!## end> <screen> *** Cannot build PL/Perl because libperl is not a shared library. *** You might have to rebuild your Perl installation. Refer to @@ -235,12 +311,22 @@ su - postgres </para> <para> + <!## PG> If you have problems, run <productname>Python</> 2.3 or later's configure using the <literal>--enable-shared</> flag. On some operating systems you don't have to build a shared library, but you will have to convince the <productname>PostgreSQL</> build system of this. Consult the <filename>Makefile</filename> in the <filename>src/pl/plpython</filename> directory for details. + <!## end> + <!## XC> + If you have problems, run <productname>Python</> 2.3 or later's + configure using the <literal>--enable-shared</> flag. On some + operating systems you don't have to build a shared library, but + you will have to convince the <productname>Postgres-XC</> build + system of this. Consult the <filename>Makefile</filename> in + the <filename>src/pl/plpython</filename> directory for details. + <!## end> </para> </listitem> @@ -355,6 +441,7 @@ su - postgres <para> + <!## PG> The <productname>PostgreSQL</> &version; sources can be obtained by anonymous FTP from <ulink url="ftp://ftp.postgresql.org/pub/source/v&version;/postgresql-&version;.tar.gz"></ulink>. @@ -370,6 +457,23 @@ su - postgres with the <productname>PostgreSQL</> sources. Change into that directory for the rest of the installation procedure. + <!## end> + <!## XC> + The <productname>Postgres-XC</> &version; sources can be obtained from + its Web site<ulink + url="http://postgres-xc.sourceforge.net/"></ulink> or development site + <ulink url="http://sourceforge.net/projects/postgres-xc/"></ulink>. + After you have obtained the file, unpack it: +<screen> +<userinput>gunzip pgxc-v&version;.tar.gz</userinput> +<userinput>tar xf pgxc-v&version;.tar</userinput> +</screen> + This will create a directory + <filename>pgxc</filename> under the current directory + with the <productname>Postgres-XC</> sources. + Change into that directory for the rest + of the installation procedure. + <!## end> </para> <para> @@ -385,8 +489,6 @@ su - postgres <indexterm zone="install-upgrading"> <primary>upgrading</primary> </indexterm> - &pgnotice; - <para> These instructions assume that your existing installation is under the @@ -394,8 +496,17 @@ su - postgres <filename>/usr/local/pgsql/data</>. Substitute your paths appropriately. </para> + <!## XC> + <para> + Because you have to run more than one <productname>Postgres-XC</> + components in more than one server and each component's data area + may be different from component to component. Please note that you + should do the following steps for to install and configure XC components. + </para> + <!## end> <para> + <!## PG> The internal data storage format typically changes in every major release of <productname>PostgreSQL</>. Therefore, if you are upgrading an existing installation that does not have a version number of @@ -404,6 +515,17 @@ su - postgres <quote>&majorversion;.x</quote>, the new version can use your current data files so you should skip the backup and restore steps below because they are unnecessary. + <!## end> + <!## XC> + The internal data storage format typically changes in every major + release of <productname>Postgres-XC</>. Therefore, if you are upgrading + an existing installation that does not have a version number of + <quote>&majorversion;.x</quote>, you must back up and restore your + data. If you are upgrading from <productname>Postgres-XC</> + <quote>&majorversion;.x</quote>, the new version can use your current + data files so you should skip the backup and restore steps below because + they are unnecessary. + <!## end> </para> <procedure> @@ -426,12 +548,21 @@ su - postgres <screen> <userinput>pg_dumpall > <replaceable>outputfile</></userinput> </screen> + <!## PG> If you need to preserve OIDs (such as when using them as foreign keys), then use the <option>-o</option> option when running <application>pg_dumpall</>. + <!## end> + <!## XC> + In this case, <application>pg_dumpall</> will take care of your data + stored in all the components. You can run <application>pg_dampall</> + from a client of your choince only onece. You don't have to run it + for each component. + <!## end> </para> <para> + <!## PG> To make the backup, you can use the <application>pg_dumpall</application> command from the version you are currently running. For best results, however, try to use the <application>pg_dumpall</application> @@ -443,6 +574,21 @@ su - postgres old version. In that case you can complete the installation normally and transfer the data later. This will also decrease the downtime. + <!## end> + <!## XC> + To make the backup, you can use the <application>pg_dumpall</application> + command from the version you are currently running. For best + results, however, try to use the <application>pg_dumpall</application> + command from <productname>Postgres-XC</productname> &version;, + since this version contains bug fixes and improvements over older + versions. While this advice might seem idiosyncratic since you + haven't installed the new version yet, it is advisable to follow + it if you plan to install the new version in parallel with the + old version. In that case you can complete the installation + normally and transfer the data later. This will also decrease + the downtime. + <!## end> + </para> </step> @@ -451,7 +597,12 @@ su - postgres Shut down the old server: <screen> <userinput>pg_ctl stop</> +<userinput>gtm_ctl stop</> </screen> + <!## XC> + Also, be careful to shutdown all the component running. + <!## end> + <!## PG> On systems that have <productname>PostgreSQL</> started at boot time, there is probably a start-up file that will accomplish the same thing. For example, on a <systemitem class="osname">Red Hat Linux</> system one @@ -459,6 +610,7 @@ su - postgres <screen> <userinput>/etc/rc.d/init.d/postgresql stop</userinput> </screen> + <!## end> </para> </step> @@ -477,12 +629,19 @@ su - postgres <step> <para> + <!## PG> Install the new version of <productname>PostgreSQL</productname> as outlined in <![%standalone-include[the next section.]]> <![%standalone-ignore[<xref linkend="install-procedure">.]]> + <!## end> + <!## XC> + Install the new version of <productname>Postgres-XC</productname> as + outlined in <![%standalone-include[the next section.]]> + <![%standalone-ignore[<xref linkend="install-procedure">.]]> + <!## end> </para> - </step> + </step> <step> <para> Create a new database cluster if needed. Remember that you must @@ -503,6 +662,7 @@ su - postgres <step> <para> + <!## PG> Start the database server, again using the special database user account: <programlisting> @@ -511,6 +671,19 @@ su - postgres </para> </step> +<programlisting> +<userinput>/usr/local/pgsql/bin/postgres -D /usr/local/pgsql/data</> +</programlisting> + <!## end> + <!## XC> + Start <productname>Postgres-XC</> cluster systems. It involves + configuring each components and starting eah component. This will + be covered later and will not be described here. + <!## end> + </para> + + </step> + <step> <para> Finally, restore your data from backup with: @@ -745,6 +918,7 @@ su - postgres <note> <para> + <!## PG> Care has been taken to make it possible to install <productname>PostgreSQL</> into shared installation locations (such as <filename>/usr/local/include</filename>) without @@ -767,6 +941,31 @@ su - postgres access its header files. Finally, a private subdirectory will also be created, if appropriate, under <varname>libdir</varname> for dynamically loadable modules. + <!## end> + <!## XC> + Care has been taken to make it possible to install + <productname>Postgres-XC</> into shared installation locations + (such as <filename>/usr/local/include</filename>) without + interfering with the namespace of the rest of the system. First, + the string <quote><literal>/postgresql</literal></quote> is + automatically appended to <varname>datadir</varname>, + <varname>sysconfdir</varname>, and <varname>docdir</varname>, + unless the fully expanded directory name already contains the + string <quote><literal>postgres</></quote> or + <quote><literal>pgsql</></quote>. For example, if you choose + <filename>/usr/local</filename> as prefix, the documentation will + be installed in <filename>/usr/local/doc/postgresql</filename>, + but if the prefix is <filename>/opt/postgres</filename>, then it + will be in <filename>/opt/postgres/doc</filename>. The public C + header files of the client interfaces are installed into + <varname>includedir</varname> and are namespace-clean. The + internal header files and the server header files are installed + into private directories under <varname>includedir</varname>. See + the documentation of each interface for information about how to + access its header files. Finally, a private subdirectory will + also be created, if appropriate, under <varname>libdir</varname> + for dynamically loadable modules. + <!## end> </para> </note> </para> @@ -1217,6 +1416,7 @@ su - postgres </indexterm> <listitem> <para> + <!## PG> <productname>PostgreSQL</> includes its own time zone database, which it requires for date and time operations. This time zone database is in fact compatible with the <quote>zoneinfo</> time zone @@ -1232,6 +1432,24 @@ su - postgres zone data. If you use this option, you are advised to run the regression tests to verify that the time zone data you have pointed to works correctly with <productname>PostgreSQL</>. + <!## end> + <!## XC> + <productname>Postgres-XC</> includes its own time zone database, + which it requires for date and time operations. This time zone + database is in fact compatible with the <quote>zoneinfo</> time zone + database provided by many operating systems such as FreeBSD, + Linux, and Solaris, so it would be redundant to install it again. + When this option is used, the system-supplied time zone database + in <replaceable>DIRECTORY</replaceable> is used instead of the one + included in the Postgres-XC source distribution. + <replaceable>DIRECTORY</replaceable> must be specified as an + absolute path. <filename>/usr/share/zoneinfo</filename> is a + likely directory on some operating systems. Note that the + installation routine will not detect mismatching or erroneous time + zone data. If you use this option, you are advised to run the + regression tests to verify that the time zone data you have + pointed to works correctly with <productname>Postgres-XC</>. + <!## end> </para> <para> @@ -1397,9 +1615,18 @@ su - postgres <para> You can specify environment variables on the <filename>configure</filename> command line, for example: +<!## PG> <screen> <userinput>./configure CC=/opt/bin/gcc CFLAGS='-O2 -pipe'</> </screen> +<!## end> +<!## XC> +<screen> +<userinput>./configure CC=/opt/bin/gcc CFLAGS='-O2 -pipe -DPGXC'</> +</screen> + Please note that you need to specify <filename>-DPGXC</> + explicitly to specify <filename>CFLAGS</> option. +<!## end> </para> <para> @@ -1596,13 +1823,13 @@ PostgreSQL, contrib and HTML documentation successfully made. Ready to install. </para> </step> + <!## PG> <step> <title>Regression Tests</title> <indexterm> <primary>regression test</primary> </indexterm> - <para> If you want to test the newly built server before you install it, you can run the regression tests at this point. The regression @@ -1621,6 +1848,7 @@ PostgreSQL, contrib and HTML documentation successfully made. Ready to install. repeat this test at any later time by issuing the same command. </para> </step> + <!## end> <step id="install"> <title>Installing the Files</title> @@ -1634,8 +1862,137 @@ PostgreSQL, contrib and HTML documentation successfully made. Ready to install. </para> </note> +<!## XC> <para> + Before learning how to intall <productname>Postgres-XC</>, you should learn + what you are going to install to what server. The following lists + <productname>Postgres-XC</> components you've built and you're going to install. + + <variablelist> + <varlistentry> + <term><envar>GTM</envar></term> + <listitem> + <para> + GTM stans for global transaction manager. It provides global transaction ID + and snapshot to each transaction in <productname>Postgres-XC</> database cluster. + It also provide sevaral global value such as sequence and global timestamp. + </para> + <para> + GTM itself can be comfigured as a backup of other GTM as + GTM-Standby so that GTM can continue to run even if main GTM + fails. You may want to install GTM-Standby to separate + server. + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><envar>GTM-Proxy</envar></term> + <listitem> + <para> + Because GTM has to take care of each transaction, it has to + read and write enormous amount of messages which may + restrict <productname>Postgres-XC</> scalability. GTM-Proxy is + a proxy of GTM feature which groups requests and response to + reduce network read/write by GTM. Distributing one snapshot to + multiple transactions also contributes to reduce GTM network + workload. + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><envar>Coordinator</envar></term> + <listitem> + <para> + Coordinator is an etry point to <productname>Postgres-XC</> from applications. + You can run more than one coordinator in parallel. Each coordinator behaves + as just <productname>PostgreSQL</> database server, while all the coordinators + handles transactions in harmonized way so that any transaction comming into one + coordinator is protected against any other transactions comming into others. + Updates by a transaction is visible immediately to others running in other + coordinators. + To simplify the loard balance of coordinators and datanodes, as mentioned + below, it is highly advised to install same number of coordinator and datanode + in a server. + </para> + </listitem> + </varlistentry> + <varlistentry> + <term><envar>Datanode</envar></term> + <listitem> + <para> + Datanode + </para> + <para> + Coordinator and datanode shares the same binary but their behavior is a little + different. Coordinator decomposes incoming statements into those handled by + datanodes. If necessary, coordinator materializes response from datanodes + to calculate final response to applicatins. + </para> + <para> + Datanode is very close to PostgreSQL itself because it just handles incoming + statements locally. + </para> + </listitem> + </varlistentry> + <varlistentry> + <term><envar>XCM</envar></term> + <listitem> + <para> + <filename>XCM</> module maintains configuration of + global <productname>Postgres-XC</> configuration and status. + XCM's status is initialized and maintained + by <filename>xc_watcher</> module which can be refered by any + other components. Occasionaly other components may update + <filename>XCM</> status when it detects error, which is passed + to <filename>xc_watcher</>, which distributes the status + change to all the other servers. + </para> + </listitem> + </varlistentry> + <varlistentry> + <term><envar>xc_watcher</envar></term> + <listitem> + <para> + <filename>xc_watcher</> provides cluster-wide operations such + as starting, stopping, monitoring and controling + failover/failback. + <filename>xc_watcher</> composed of central operation utility + and other utilities which should run on servers where at least one of GTM, + GTM-Proxy, GTM-Standby, coordinator or datanode funs. You may + want to run <filename>xc_watcher</> on a separate server. + </para> + </listitem> + </varlistentry> + <varlistentry> + <term><envar>pgxc_config</envar></term> + <listitem> + <para> + <filename>pgxc_config</> is <productname>Postgres-XC</>'s + configuration utility. It reads <productname>XC</>'s global + configuration, help to initialize its owner and to install + binries into servers, generate each component's configuration + file and generate scripts used by <filename>xc_watcher</>. + </para> + <para> + <filename>pgxc_config</> simplifies <productname>XC</>'s + installation and maintenance work, which will be covered in the + section of server program chapter. This section covers only + manual installation and configuration. + </para> + </listitem> + </varlistentry> + </variablelist> + </para> +<!## end> + <para> + <!## PG> To install <productname>PostgreSQL</> enter: + <!## end> + <!## XC> + To install <productname>Postgre-XC</> locally, enter: + <!## end> <screen> <userinput>gmake install</userinput> </screen> @@ -1648,7 +2005,12 @@ PostgreSQL, contrib and HTML documentation successfully made. Ready to install. </para> <para> +<!## PG> To install the documentation (HTML and man pages), enter: +<!## end> +<!## XC> + To install the documentation (HTML and man pages) locally, enter: +<!## end> <screen> <userinput>gmake install-docs</userinput> </screen> @@ -1702,6 +2064,7 @@ PostgreSQL, contrib and HTML documentation successfully made. Ready to install. </step> </procedure> +<!## PG> <formalpara> <title>Registering <application>eventlog</> on <systemitem class="osname">Windows</>:</title> @@ -1714,6 +2077,7 @@ PostgreSQL, contrib and HTML documentation successfully made. Ready to install. This creates registry entries used by the event viewer. </para> </formalpara> +<!## end> <formalpara> <title>Uninstallation:</title> @@ -1763,6 +2127,7 @@ PostgreSQL, contrib and HTML documentation successfully made. Ready to install. <para> + <!## PG> On some systems with shared libraries you need to tell the system how to find the newly installed shared libraries. The systems on which this is @@ -1774,6 +2139,13 @@ PostgreSQL, contrib and HTML documentation successfully made. Ready to install. class="osname">OpenBSD</>, <systemitem class="osname">Tru64 UNIX</> (formerly <systemitem class="osname">Digital UNIX</>), and <systemitem class="osname">Solaris</>. + <!## end> + <!## XC> + On some systems with shared libraries + you need to tell the system how to find the newly installed + shared libraries. + However, <systemitem class="osname">Linux</> does <emphasis>not</> need this. + <!## end> </para> <para> @@ -1826,12 +2198,20 @@ libpq.so.2.1: cannot open shared object file: No such file or directory <indexterm> <primary>ldconfig</primary> </indexterm> + <!## PG> If you are on <systemitem class="osname">BSD/OS</>, <systemitem class="osname">Linux</>, or <systemitem class="osname">SunOS 4</> and you have root access you can run: + <!## end> + <!## XC> + Because you are <systemitem + class="osname">Linux</>, + you have root access and you can run: + <!## end> <programlisting> /sbin/ldconfig /usr/local/pgsql/lib </programlisting> + <!## PG> (or equivalent directory) after installation to enable the run-time linker to find the shared libraries faster. Refer to the manual page of <command>ldconfig</> for more information. On @@ -1843,6 +2223,7 @@ libpq.so.2.1: cannot open shared object file: No such file or directory </programlisting> instead. Other systems are not known to have an equivalent command. + <!## end> </para> </sect2> @@ -1852,8 +2233,6 @@ libpq.so.2.1: cannot open shared object file: No such file or directory <indexterm> <primary><envar>PATH</envar></primary> </indexterm> - &pgnotice; - <para> If you installed into <filename>/usr/local/pgsql</> or some other @@ -1909,17 +2288,23 @@ export MANPATH <![%standalone-include;[ <sect1 id="install-getting-started"> <title>Getting Started</title> - &pgnotice; <para> + <!## PG> The following is a quick summary of how to get <productname>PostgreSQL</> up and running once installed. The main documentation contains more information. + <!## end> + <!## XC> + The following is a quick summary of how to get <productname>Postgres-XC</> up and + running once installed. The main documentation contains more information. + <!## end> </para> <procedure> <step> <para> + <!## PG> Create a user account for the <productname>PostgreSQL</> server. This is the user the server will run as. For production use you should create a separate, unprivileged account @@ -1927,18 +2312,104 @@ export MANPATH access or just want to play around, your own user account is enough, but running the server as root is a security risk and will not work. + <!## end> + <!## XC> + Create a user account on all the servers where at least one + of <productname>Postgres-XC</> component runs. This is the user + the components will run as. For production use you should create a + separate, unprivileged account (<quote>postgres</> is commonly + used). If you do not have root access or just want to play + around, your own user account is enough, but running the server + as root is a security risk and will not work. + <!## end> <screen> <userinput>adduser postgres</> </screen> </para> + +<!## XC> + + </step> + + <step> + + <para> + If you follow the previous steps, you will have files ready to + distribute to servers where you want to run one or + more <productname>Postgres-XC</> components. + </para> + <para> + After you've installed your build locally, build target + will include the following directories. + +<screen> +bin/ include/ lib/ share/ +</screen> + + <filename>bin</> directory contains executable binaries and + scripts. <filename>include</> contains header files needed to + build <productname>Postgres-XC</> applications. <filename>lib</> + contains shared libraries needed to run binaries, as well as + static libraries which should be included into your application + binaries. Finally, <productname>share</> contains misceraneous + files <productname>Postgres-XC</> should read at runtime, as well + as sample files. + + </para> + + <para> + + If your servers has sufficient file space, you can copy all the + files to the target server. Total size is less than 30mega + bytes. If you want to install minimum files to ach servers, + please follow the following pragraphs. + + </para> + + <para> + For the server to run GTM or GTM-Standby, you need to copy the + following files to your path: <filename>bin/gtm</> and <filename>bin/gtm_ctl</>. + </para> + + <para> + For the server to run GTM-Proxy (the server you run coordinator and/or datanode), + you need to copy the following files to your path: <filename>bin/gtm_proxy</filename> + and <filename>bin/gtm_ctl</>. + </para> + + <para> + For servers to run <filename>XCM</>, you should copy the + following files to your path: + <filename>bin/xcm_*</>. Please note that they don't require any + shared library built. You should install <filename>XCM</> to all + the servers which runs + <filename>GTM</>, <filename>GTM-Standby</>, <filename>GTM-Proxy</>, + <filename>Coordinator</>, <filename>Datanode</>, + or <filename>xc_watcher</>. + </para> + + <para> + For server to run coordinator or datanode, or both, you hsould + copy the following files to your + path: <filename>bin/initdb</>, <filename>bin/pgxc_ddl</>, <filename>pgxc_clean</>. + You should also copy everything in <filename>path</> directory to + your library search path. + </para> + </step> +<!## end> + <step> <para> Create a database installation with the <command>initdb</> command. To run <command>initdb</> you must be logged in to your <productname>PostgreSQL</> server account. It will not work as root. +<!## XC> + Please note that you have to run <command>initdb</> for each + datanode and coordinator you are configuring. +<!## end> <screen> root# <userinput>mkdir /usr/local/pgsql/data</> root# <userinput>chown postgres /usr/local/pgsql/data</> @@ -1955,6 +2426,13 @@ postgres$ <userinput>/usr/local/pgsql/bin/initdb -D /usr/local/pgsql/data</> doesn't already exist) before starting <command>initdb</>, as illustrated here. </para> + <!## XC> + <para> + If you're configuring both datanode and coordinator on the same + server, you hsould specify different <option>-D</> option for + each of them. + </para> + <!## end> </step> <step> @@ -1966,8 +2444,375 @@ postgres$ <userinput>/usr/local/pgsql/bin/initdb -D /usr/local/pgsql/data</> </para> </step> + <!## XC> + <step> + <para> + You should configure GTM and GTM-Proxy, as well as + GTM-Standby if you need high-availability capability for GTM + before you really run <productname>Postgres-XC</> database + cluster. You can do the following before you + run <command>initdb</>. + </para> + <para> + Each GTM, GTM-Proxy and GTM needs their own working directories. + Create them as <productname>Postgres-XC</> owner user. Please + assign port number to each of them, although you don't have to do + any configuration work now. + </para> + </step> + + <step> + <para> + Now you should configure each coordinator and datanode. Because + they have to communicate each other and number of servers, + datanodes and coordinators depend upon configurations, we don't + provide default configuration file for them. + </para> + <para> + You can configure datanode and coordinator by + editing <filename>postgresql.conf</> file located under the + directory you specified with <option>-D</> option + of <command>initdb</>. The following paragraphs describe what + parameter to edit at least for coordinators. You can specify + any other <filename>postgresql.conf</> parameters as + standalone <productname>PostgreSQL</>. + </para> + + <variablelist> + <varlistentry> + <term><envar>max_prepared_transactions</envar></term> + <listitem> + <para> + <option>max_prepared_transactions</> specifies maximum number + of two-phase commit transactions. Even if you don't use + explicit two phase commit operation, coordinator may issue + two-phase commit operation implicitly if a transaction is + involved with mutiple datanodes and/or coorinators. You shoud + specify <option>max_prepared_transactions</> value at + least the number of <option>max_connection</>. + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><envar>min_pool_size</envar><term> + <listitem> + <par> + Coordinator is associated with a connection pooler which takes + care of connection with other coordinators and datanodes. This + parameter specifies minimum number of connection to pool. + If you're not configuring <productname>XC</> cluster in + unballanced way, you should specify the same value to all the + coordinators. + </par> + </listitem> + </varlistentry> + + <varlistentry> + <term><envar>max_pool_size</envar></term> + <listitem> + <para> + This parameter specifies maximum number of the pooled + connection. This value should be at least more than the number + of all the coordinators and datanodes. If you specify less + value, you will see very frequent close and ope connection + which leads to serious performance problem. + If you're not configuring <productname>XC</> cluster in + unballanced way, you should specify the same value to all the + coordinators. + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><envar>num_data_nodes</envar></term> + <listitem> + <para> + This parameter specifies how many datanodes the coordinator + connects to. + You should specify the same value to all the + coordinators. + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><envar>data_node_hosts</envar></term> + <listitem> + <para> + Specify the list of IP address or host name of each data node + as character string. + You should specify the same value to all the + coordinators. + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><envar>data_node_ports</envar></term> + <listitem> + <para> + Specify the list of port number of each data node + as character string in the order of <option>data_node_hosts</>. + You should specify the same value to all the + coordinators. + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><envar>data_node_users</envar></term> + <listitem> + <para> + Specify the name of user (role) you use to connect to + datanodes. This option may be removed in future releases. + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><envar>num_coordinators</envar></term> + <listitem> + <para> + Specify the list of IP address or host name of each coordinator + as character string. + You should specify the same value to all the + coordinators. + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><envar>coordinator_hosts</envar></term> + <listitem> + <para> + Specify the list of IP address or host name of each coordinator + as character string. + You should specify the same value to all the + coordinators. + </para> + </listitem> + </varlistentry> + + + <varlistentry> + <term><envar>coordinator_ports</envar></term> + <listitem> + <para> + Specify the list of port number of each coordinator + as character string in the order of <option>coordinator_hosts</>. + You should specify the same value to all the + coordinators. + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><envar>coordinator_users</envar></term> + <listitem> + <para> + Specify the name of user (role) you use to connect to + coordinators. This option may be removed in future releases. + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><envar>pgxc_node_id</envar></term> + <listitem> + <para> + Specify the index of this coordinator. The index refers to the + element of <option>coordinator_hosts</> parameter, which begins + with 1. + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><envar>port</envar></term> + <listitem> + <para> + Specify the port number listened to by this coordinator. + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><envar>pooler_port</envar></term> + <listitem> + <para> + Connection pooler needs separate port. + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><envar>gtm_port</envar></term> + <listitem> + <para> + Specify the port number of gtm you're connecting to. This is + local to the server and you should specify the port assigned to + the GTM-Proxy local to the coordinator. + </para> + </listitem> + </varlistentry> + + </variablelist> + + <step> + <para> + Now you should configure <filename>postgresql.conf</> for each + datanodes. Please note, as in the case of coordinator, you can + specify other <filename>postgresql.conf</> parameters as in + standalone <productname>PostgreSQL</>. + </para> + + <variablelist> + + <varlistentry> + <term><envar>max_connections</envar></term> + <listitem> + <para> + <option>max_connections</> is, in short, a maximum number of + background processes of the datanode. You should be careful + to specify readonable value to this parameter because each + coordinator backend may have connections to all the datanodes. + You should specify this value as <option>max_connections</> of + coordinator multiplied by the number of coordinators. + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><envar>max_prepared_transactions</envar></term> + <listitem> + <para> + <option>max_prepared_transactions</> specifies maximum number + of two-phase commit transactions. Even if you don't use + explicit two phase commit operation, coordinator may issue + two-phase commit operation implicitly if a transaction is + involved with mutiple datanodes and/or coorinators. The value + of this parameter should be at least the value + of <option>max_connections</> mutiplied by the number of coordinators. + </para> + </listitem> + </varlistentry> + + <varlistentry> + <term><envar>port</envar></term> + <listitem> + <para> + Specify the port number listened to by the datanode. + </para> + </listitem> + </varlistentry> + + <varlistentry> + <varlistentry> + <term><envar>pgxc_node_id</envar></term> + <listitem> + <para> + Specify the index of the datanode. The index refers to the + element of <option>data_node_hosts</> parameter specified in + the coordinator <filename>postgresql.conf</>, which begins + with 1. + </para> + </listitem> + </varlistentry> + + <term><envar>gtm_port</envar></term> + <listitem> + <para> + Specify the port number of gtm you're connecting to. This is + local to the server and you should specify the port assigned to + the GTM-Proxy local to the datanode. + </para> + </listitem> + </varlistentry> + + </variablelist> + </step> + + <step> + <para> + Then you are ready to start <productname>Postgres-XC</> cluster. + First, you should start GTM bu somehting like: +<programlisting> +gtm -D /usr/local/pgsql/gtm -h localhost -p 20001 -n 1 -x 1000 +</programlisting> + This will start GTM. <option>-h</> specifies IP address or host + name to listen to the connectoin + from <command>gtm_standby</>. <option>-p</> specifies the port + number to listen to. <option>-n</> specifies the node number + within GTM. This identifies GTM especially when you run + GTM-Standby. <option>-x</> option specifies the value of initial + Global Transaction ID. We need to speify this + because <command>initdb</> consumes some of the transaction ID + locally and GTM must to begin to provide global transacton ID + greater than the consumed one. So far, value 1000 is believed + to be safe. + </para> + </step> + <step> <para> + Next, you should start GTM-Proxy on each server you're running + coordinator and/or datanode like: +<programlisting> +gtm_proxy -h localhost -p 20002 -s localhost -t 20001 -i 1 -n 2 -D /usr/local/pgsql/gtm_proxy +</programlisting> + This will start GTM-Proxy. <option>-h</> option is the host name + or IP address which GTM-Proxy listens to. <option>-p</> option + is the port number to listen to. <option>-s</> + and <option>-t</> are IP address or the host + name and the port number of GTM as specified + above. <option>-i</> is the node number of GTM-Proxy beginning + with 1. <option>-n</> is the number of worker thread of + GTM-Proxy. Usually, 1 or 2 is advised. Then <option>-D</> + option is the working directory of the GTM-Proxy. + </para> + <para> + Please note that you should start GTM-Proxy on all the servers + you run coordinator/datanode. + </para> + </step> + + <step> + <para> + Now you can start datanode on each server like: +<programlisting> +postgres -X -D /usr/local/pgsql/datanode +</programlisting> + This will start the datanode. <option>-X</> + specifies <command>postgres</> to start as a + datanode. <option>-D</> specifies the data directory of the + data node. You can specify other options of standalone <command>postgres</>. + </para> + <para> + Please note that you should issue <command>postgres</> command at + all the servers you're running datanode. + </para> + </step> + + <para> + Finally, you can start coordinator like: +<programlisting> +postgres -C -D /usr/local/pgsql/coordinator +</programlisting> + This will start the coordinator. <option>-C</> + specifies <command>postgres</> to start as a + coordinator. <option>-D</> specifies the data directory of the + coordinator. You can specify other options of standalone <command>postgres</>. + </para> + <para> + Please note that you should issue <command>postgres</> command at + all ther servers you're running coordinators. + </para> + <!## end> + + <step> + <!## PG> + <para> The previous <command>initdb</> step should have told you how to start up the database server. Do so now. The command should look something like: @@ -1983,12 +2828,56 @@ nohup /usr/local/pgsql/bin/postgres -D /usr/local/pgsql/data \ </para> <para> + To stop <productname>Postgres-XC</>, you should stop + coordinators, datanodes, GTM-Proxy and GTM. + </para> To stop a server running in the background you can type: <programlisting> kill `cat /usr/local/pgsql/data/postmaster.pid` </programlisting> </para> </step> + <!## end> + + <!## XC> + <para> + The previous step should have told you how to + start up the whole database cluster. Do so now. The command should look + something like: +<programlisting> +postgres -X -D /usr/local/pgsql/datanode +</programlisting> + This will start the datanode in the foreground. To put the datanode + in the background use something like: +<programlisting> +nohup postgres -X -D /usr/local/pgsql/data \ + </dev/null >>server.log 2>&1 </dev/null & +</programlisting> + You can apply this to all the other components, GTM, GTM-Proxies, + and coordinators. + </para> + + <para> + To stop a datanode running in the background you can type: +<programlisting> +kill `cat /usr/local/pgsql/datanode/postmaster.pid` +</programlisting> + You can apply this to stop a coordinator too. + </para> + <para> + To stop the GTM running in the backgroud you can type +<programlisting> +kill `cat /usr/local/pgsql/gtm/gtm.pid` +</programlisting> + </para> + <para> + To stop a GTM-Proxy running in the background, you can type +<programlisting> +kill `cat /usr/local/pgsql/gtm-proxy/gtm_proxy.pid +</programlisting> + </para> + </step> + <!## end> <step> <para> @@ -1997,11 +2886,25 @@ kill `cat /usr/local/pgsql/data/postmaster.pid` <userinput>createdb testdb</> </screen> Then enter: +<!## PG> <screen> <userinput>psql testdb</> </screen> +<!## end> +<!## XC> +<screen> +<userinput>psql -p 20004testdb</> +</screen> +<!## end> +<!## XC> + Please do not forget to give the portnumber of one of the + coordinators. Then you are connected to a coordinator listening + to the port ou specified. +<!## end> +<!## PG> to connect to that database. At the prompt you can enter SQL commands and start experimenting. +<!## end> </para> </step> </procedure> @@ -2016,12 +2919,22 @@ kill `cat /usr/local/pgsql/data/postmaster.pid` <itemizedlist> <listitem> <para> + <!## PG> The <productname>PostgreSQL</> distribution contains a comprehensive documentation set, which you should read sometime. After installation, the documentation can be accessed by pointing your browser to <filename>/usr/local/pgsql/doc/html/index.html</>, unless you changed the installation directories. + <!## end> + <!## XC> + The <productname>Postgres-XC</> distribution contains a + comprehensive documentation set, which you should read sometime. + After installation, the documentation can be accessed by + pointing your browser to + <filename>/usr/local/pgsql/doc/html/index.html</>, unless you + changed the installation directories. + <!## end> </para> <para> @@ -2034,6 +2947,7 @@ kill `cat /usr/local/pgsql/data/postmaster.pid` </para> </listitem> + <!## PG> <listitem> <para> Usually, you will want to modify your computer so that it will @@ -2050,9 +2964,11 @@ kill `cat /usr/local/pgsql/data/postmaster.pid` is also explained in the documentation. </para> </listitem> + <!## end> <listitem> <para> + <!## PG> By default, <productname>PostgreSQL</> is configured to run on minimal hardware. This allows it to start up with almost any hardware configuration. The default configuration is, however, @@ -2062,6 +2978,19 @@ kill `cat /usr/local/pgsql/data/postmaster.pid` <varname>work_mem</varname>. Other parameters mentioned in the documentation also affect performance. + <!## end> + <!## XC> + By default, coordinators and datanodes + of <productname>Postgres-XC</> are configured to run on + minimal hardware. This allows it to start up with almost any + hardware configuration. The default configuration is, however, + not designed for optimum performance. To achieve optimum + performance, several server parameters must be adjusted, the two + most common being <varname>shared_buffers</varname> and + <varname>work_mem</varname>. + Other parameters mentioned in the documentation also affect + performance. + <!## end> </para> </listitem> </itemizedlist> @@ -2074,7 +3003,7 @@ kill `cat /usr/local/pgsql/data/postmaster.pid` <title>Supported Platforms</title> &pgnotice; - + <!## PG> <para> A platform (that is, a CPU architecture and operating system combination) is considered supported by the <productname>PostgreSQL</> development @@ -2088,8 +3017,10 @@ kill `cat /usr/local/pgsql/data/postmaster.pid` or can be made to work, you are strongly encouraged to set up a build farm member machine so that continued compatibility can be assured. </para> + <!## end> <para> + <!## PG> In general, <productname>PostgreSQL</> can be expected to work on these CPU architectures: x86, x86_64, IA64, PowerPC, PowerPC 64, S/390, S/390x, Sparc, Sparc 64, Alpha, ARM, MIPS, MIPSEL, M68K, @@ -2097,9 +3028,16 @@ kill `cat /usr/local/pgsql/data/postmaster.pid` architectures are not known to have been tested recently. It is often possible to build on an unsupported CPU type by configuring with <option>--disable-spinlocks</option>, but performance will be poor. + <!## end> + <!## XC> + In general, <productname>Postgres-XC</> can be expected to work on + these CPU architectures of x86_64. + <!## end> </para> + <para> + <!## PG> <productname>PostgreSQL</> can be expected to work on these operating systems: Linux (all recent distributions), Windows (Win2000 SP4 and later), FreeBSD, OpenBSD, NetBSD, Mac OS X, AIX, HP/UX, IRIX, Solaris, Tru64 Unix, @@ -2108,16 +3046,32 @@ kill `cat /usr/local/pgsql/data/postmaster.pid` a given operating system will work. Look in the <xref linkend="installation-platform-notes"> below to see if there is information - specific to your operating system, particularly if using an older system. + specific to your operating system, particularly if using an older + system. + <!## end> + <!## XC> + <productname>Postgres-XC</> can be expected to work on 64bit-based + recent Linux. + <!## end> </para> <para> + <!## PG> If you have installation problems on a platform that is known to be supported according to recent build farm results, please report it to <email>pgs...@po...</email>. If you are interested in porting <productname>PostgreSQL</> to a new platform, <email>pgs...@po...</email> is the appropriate place to discuss that. + <!## end> + <!## XC> + If you have installation problems on a platform that is known + to be supported according to recent build farm results, please report + it to <email>pos...@li...</email>. If you are interested + in porting <productname>Postgres-XC</> to a new platform, + <email>pos...@li...</email> is the appropriate place + to discuss that. + <!## end> </para> </sect1> diff --git a/doc/src/sgml/pgnotice.sgmlin b/doc/src/sgml/pgnotice.sgmlin index 515d384..292af4d 100644 --- a/doc/src/sgml/pgnotice.sgmlin +++ b/doc/src/sgml/pgnotice.sgmlin @@ -1,6 +1,8 @@ +<!## PG> <!## XC> <para> Notice: At present, this section is just taken from PostgreSQL documentation and is subject to revision for Postgres-XC. </para> <!## end> +<!## end> diff --git a/doc/src/sgml/postgres.sgmlin b/doc/src/sgml/postgres.sgmlin index 0e7cda1..bf95d71 100644 --- a/doc/src/sgml/postgres.sgmlin +++ b/doc/src/sgml/postgres.sgmlin @@ -172,8 +172,9 @@ <partintro> - &pgnotice; + <para> + <!## PG> This part covers topics that are of interest to a <productname>PostgreSQL</> database administrator. This includes installation of the software, set up and configuration of the @@ -181,6 +182,16 @@ Anyone who runs a <productname>PostgreSQL</> server, even for personal use, but especially in production, should be familiar with the topics covered in this part. + <!## end> + <!## XC> + This part covers topics that are of interest to a + <productname>Postgres-XC</> database administrator. This includes + installation of the software, set up and configuration of + servers, management of users and databases, and maintenance tasks. + Anyone who runs a <productname>Postgres-XC</> server, even for + personal use, but especially in production, should be familiar + with the topics covered in this part. + <!## end> </para> <para> @@ -193,6 +204,7 @@ </para> <para> + <!## PG> The first few chapters are written so they can be understood without prerequisite knowledge, so new users who need to set up their own server can begin their exploration with this part. @@ -201,6 +213,17 @@ the <productname>PostgreSQL</> database system. Readers are encouraged to look at <xref linkend="tutorial"> and <xref linkend="sql"> for additional information. + <!## end> + <!## XC> + The first few chapters are written so they can be understood + without prerequisite knowledge, so new users who need to set + up their own server can begin their exploration with this part. + The rest of this part is about tuning and management; that material + assumes that the reader is familiar with the general use of + the <productname>Postgres-XC</> database system. Readers are + encouraged to look at <xref linkend="tutorial"> and <xref + linkend="sql"> for additional information. + <!## end> </para> </partintro> ----------------------------------------------------------------------- Summary of changes: doc/src/sgml/installation.sgmlin | 982 +++++++++++++++++++++++++++++++++++++- doc/src/sgml/pgnotice.sgmlin | 2 + doc/src/sgml/postgres.sgmlin | 25 +- 3 files changed, 994 insertions(+), 15 deletions(-) hooks/post-receive -- Postgres-XC |
From: Michael P. <mic...@us...> - 2011-05-25 06:22:20
|
Project "Postgres-XC". The branch, PGXC-TrialMaster has been updated via 3c71bd016e200f1f44e98762eeca0184b1385294 (commit) from 4a54caf28b4c2ffed56bbac7d1f27d933bb1cb2b (commit) - Log ----------------------------------------------------------------- commit 3c71bd016e200f1f44e98762eeca0184b1385294 Author: Michael P <mic...@us...> Date: Wed May 25 15:19:55 2011 +0900 Addition of a forgotton error message diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index 5a87e58..4414664 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -388,10 +388,17 @@ DefineRelation(CreateStmt *stmt, char relkind) * code. This is needed because calling code might not expect untrusted * tables to appear in pg_temp at the front of its search path. */ +#ifdef PGXC + if (stmt->relation->istemp && IsUnderPostmaster) + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("PG-XC does not yet support temporary tables"))); +#else if (stmt->relation->istemp && InSecurityRestrictedOperation()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("cannot create temporary table within security-restricted operation"))); +#endif /* * Look up the namespace in which we are supposed to create the relation. ----------------------------------------------------------------------- Summary of changes: src/backend/commands/tablecmds.c | 7 +++++++ 1 files changed, 7 insertions(+), 0 deletions(-) hooks/post-receive -- Postgres-XC |
From: Michael P. <mic...@us...> - 2011-05-25 05:54:36
|
Project "Postgres-XC". The branch, PGXC-TrialMaster has been updated via 4a54caf28b4c2ffed56bbac7d1f27d933bb1cb2b (commit) from dd8e15cc5988aaa71b519724ab3d59e3e82f42e5 (commit) - Log ----------------------------------------------------------------- commit 4a54caf28b4c2ffed56bbac7d1f27d933bb1cb2b Author: Michael P <mic...@us...> Date: Wed May 25 14:51:15 2011 +0900 Correct bki generation This made impossible run of initdb. At this point cluster is able to run and to manage simple queries. diff --git a/src/include/catalog/pg_aggregate.h b/src/include/catalog/pg_aggregate.h index 57b0b71..5c98863 100644 --- a/src/include/catalog/pg_aggregate.h +++ b/src/include/catalog/pg_aggregate.h @@ -55,19 +55,13 @@ CATALOG(pg_aggregate,2600) BKI_WITHOUT_OIDS { regproc aggfnoid; regproc aggtransfn; -#ifdef PGXC - regproc aggcollectfn; -#endif + regproc aggcollectfn; /* PGXC */ regproc aggfinalfn; Oid aggsortop; Oid aggtranstype; -#ifdef PGXC - Oid aggcollecttype; -#endif + Oid aggcollecttype; /* PGXC */ text agginitval; /* VARIABLE LENGTH FIELD */ -#ifdef PGXC - text agginitcollect; /* VARIABLE LENGTH FIELD */ -#endif + text agginitcollect; /* PGXC, VARIABLE LENGTH FIELD */ } FormData_pg_aggregate; /* ---------------- @@ -93,14 +87,15 @@ typedef FormData_pg_aggregate *Form_pg_aggregate; #define Anum_pg_aggregate_aggcollecttype 7 #define Anum_pg_aggregate_agginitval 8 #define Anum_pg_aggregate_agginitcollect 9 -#else -#define Natts_pg_aggregate 6 -#define Anum_pg_aggregate_aggfnoid 1 -#define Anum_pg_aggregate_aggtransfn 2 -#define Anum_pg_aggregate_aggfinalfn 3 -#define Anum_pg_aggregate_aggsortop 4 -#define Anum_pg_aggregate_aggtranstype 5 -#define Anum_pg_aggregate_agginitval 6 +#endif +#ifdef PGXC +//#define Natts_pg_aggregate 6 +//#define Anum_pg_aggregate_aggfnoid 1 +//#define Anum_pg_aggregate_aggtransfn 2 +//#define Anum_pg_aggregate_aggfinalfn 3 +//#define Anum_pg_aggregate_aggsortop 4 +//#define Anum_pg_aggregate_aggtranstype 5 +//#define Anum_pg_aggregate_agginitval 6 #endif @@ -118,14 +113,15 @@ DATA(insert ( 2103 numeric_avg_accum numeric_avg_collect numeric_avg 0 1231 123 DATA(insert ( 2104 float4_accum float8_collect float8_avg 0 1022 1022 "{0,0,0}" "{0,0,0}" )); DATA(insert ( 2105 float8_accum float8_collect float8_avg 0 1022 1022 "{0,0,0}" "{0,0,0}" )); DATA(insert ( 2106 interval_accum interval_collect interval_avg 0 1187 1187 "{0 second,0 second}" "{0 second,0 second}" )); -#else -DATA(insert ( 2100 int8_avg_accum numeric_avg 0 1231 "{0,0}" )); -DATA(insert ( 2101 int4_avg_accum int8_avg 0 1016 "{0,0}" )); -DATA(insert ( 2102 int2_avg_accum int8_avg 0 1016 "{0,0}" )); -DATA(insert ( 2103 numeric_avg_accum numeric_avg 0 1231 "{0,0}" )); -DATA(insert ( 2104 float4_accum float8_avg 0 1022 "{0,0,0}" )); -DATA(insert ( 2105 float8_accum float8_avg 0 1022 "{0,0,0}" )); -DATA(insert ( 2106 interval_accum interval_avg 0 1187 "{0 second,0 second}" )); +#endif +#ifdef PGXC +//DATA(insert ( 2100 int8_avg_accum numeric_avg 0 1231 "{0,0}" )); +//DATA(insert ( 2101 int4_avg_accum int8_avg 0 1016 "{0,0}" )); +//DATA(insert ( 2102 int2_avg_accum int8_avg 0 1016 "{0,0}" )); +//DATA(insert ( 2103 numeric_avg_accum numeric_avg 0 1231 "{0,0}" )); +//DATA(insert ( 2104 float4_accum float8_avg 0 1022 "{0,0,0}" )); +//DATA(insert ( 2105 float8_accum float8_avg 0 1022 "{0,0,0}" )); +//DATA(insert ( 2106 interval_accum interval_avg 0 1187 "{0 second,0 second}" )); #endif /* sum */ @@ -138,15 +134,16 @@ DATA(insert ( 2111 float8pl float8pl - 0 701 701 _null_ "0" )); DATA(insert ( 2112 cash_pl cash_pl - 0 790 790 _null_ _null_ )); DATA(insert ( 2113 interval_pl interval_pl - 0 1186 1186 _null_ _null_ )); DATA(insert ( 2114 numeric_add numeric_add - 0 1700 1700 _null_ "0" )); -#else -DATA(insert ( 2107 int8_sum - 0 1700 _null_ )); -DATA(insert ( 2108 int4_sum - 0 20 _null_ )); -DATA(insert ( 2109 int2_sum - 0 20 _null_ )); -DATA(insert ( 2110 float4pl - 0 700 _null_ )); -DATA(insert ( 2111 float8pl - 0 701 _null_ )); -DATA(insert ( 2112 cash_pl - 0 790 _null_ )); -DATA(insert ( 2113 interval_pl - 0 1186 _null_ )); -DATA(insert ( 2114 numeric_add - 0 1700 _null_ )); +#endif +#ifdef PGXC +//DATA(insert ( 2107 int8_sum - 0 1700 _null_ )); +//DATA(insert ( 2108 int4_sum - 0 20 _null_ )); +//DATA(insert ( 2109 int2_sum - 0 20 _null_ )); +//DATA(insert ( 2110 float4pl - 0 700 _null_ )); +//DATA(insert ( 2111 float8pl - 0 701 _null_ )); +//DATA(insert ( 2112 cash_pl - 0 790 _null_ )); +//DATA(insert ( 2113 interval_pl - 0 1186 _null_ )); +//DATA(insert ( 2114 numeric_add - 0 1700 _null_ )); #endif /* max */ @@ -171,27 +168,28 @@ DATA(insert ( 2050 array_larger array_larger - 1073 2277 2277 _null_ _null_ ) DATA(insert ( 2244 bpchar_larger bpchar_larger - 1060 1042 1042 _null_ _null_ )); DATA(insert ( 2797 tidlarger tidlarger - 2800 27 27 _null_ _null_ )); DATA(insert ( 3526 enum_larger enum_larger - 3519 3500 3500 _null_ _null_ )); -#else -DATA(insert ( 2115 int8larger - 413 20 _null_ )); -DATA(insert ( 2116 int4larger - 521 23 _null_ )); -DATA(insert ( 2117 int2larger - 520 21 _null_ )); -DATA(insert ( 2118 oidlarger - 610 26 _null_ )); -DATA(insert ( 2119 float4larger - 623 700 _null_ )); -DATA(insert ( 2120 float8larger - 674 701 _null_ )); -DATA(insert ( 2121 int4larger - 563 702 _null_ )); -DATA(insert ( 2122 date_larger - 1097 1082 _null_ )); -DATA(insert ( 2123 time_larger - 1112 1083 _null_ )); -DATA(insert ( 2124 timetz_larger - 1554 1266 _null_ )); -DATA(insert ( 2125 cashlarger - 903 790 _null_ )); -DATA(insert ( 2126 timestamp_larger - 2064 1114 _null_ )); -DATA(insert ( 2127 timestamptz_larger - 1324 1184 _null_ )); -DATA(insert ( 2128 interval_larger - 1334 1186 _null_ )); -DATA(insert ( 2129 text_larger - 666 25 _null_ )); -DATA(insert ( 2130 numeric_larger - 1756 1700 _null_ )); -DATA(insert ( 2050 array_larger - 1073 2277 _null_ )); -DATA(insert ( 2244 bpchar_larger - 1060 1042 _null_ )); -DATA(insert ( 2797 tidlarger - 2800 27 _null_ )); -DATA(insert ( 3526 enum_larger - 3519 3500 _null_ )); +#endif +#ifdef PGXC +//DATA(insert ( 2115 int8larger - 413 20 _null_ )); +//DATA(insert ( 2116 int4larger - 521 23 _null_ )); +//DATA(insert ( 2117 int2larger - 520 21 _null_ )); +//DATA(insert ( 2118 oidlarger - 610 26 _null_ )); +//DATA(insert ( 2119 float4larger - 623 700 _null_ )); +//DATA(insert ( 2120 float8larger - 674 701 _null_ )); +//DATA(insert ( 2121 int4larger - 563 702 _null_ )); +//DATA(insert ( 2122 date_larger - 1097 1082 _null_ )); +//DATA(insert ( 2123 time_larger - 1112 1083 _null_ )); +//DATA(insert ( 2124 timetz_larger - 1554 1266 _null_ )); +//DATA(insert ( 2125 cashlarger - 903 790 _null_ )); +//DATA(insert ( 2126 timestamp_larger - 2064 1114 _null_ )); +//DATA(insert ( 2127 timestamptz_larger - 1324 1184 _null_ )); +//DATA(insert ( 2128 interval_larger - 1334 1186 _null_ )); +//DATA(insert ( 2129 text_larger - 666 25 _null_ )); +//DATA(insert ( 2130 numeric_larger - 1756 1700 _null_ )); +//DATA(insert ( 2050 array_larger - 1073 2277 _null_ )); +//DATA(insert ( 2244 bpchar_larger - 1060 1042 _null_ )); +//DATA(insert ( 2797 tidlarger - 2800 27 _null_ )); +//DATA(insert ( 3526 enum_larger - 3519 3500 _null_ )); #endif /* min */ @@ -216,27 +214,28 @@ DATA(insert ( 2051 array_smaller array_smaller - 1072 2277 2277 _null_ _null_ DATA(insert ( 2245 bpchar_smaller bpchar_smaller - 1058 1042 1042 _null_ _null_ )); DATA(insert ( 2798 tidsmaller tidsmaller - 2799 27 27 _null_ _null_ )); DATA(insert ( 3527 enum_smaller enum_smaller - 3518 3500 3500 _null_ _null_ )); -#else -DATA(insert ( 2131 int8smaller - 412 20 _null_ )); -DATA(insert ( 2132 int4smaller - 97 23 _null_ )); -DATA(insert ( 2133 int2smaller - 95 21 _null_ )); -DATA(insert ( 2134 oidsmaller - 609 26 _null_ )); -DATA(insert ( 2135 float4smaller - 622 700 _null_ )); -DATA(insert ( 2136 float8smaller - 672 701 _null_ )); -DATA(insert ( 2137 int4smaller - 562 702 _null_ )); -DATA(insert ( 2138 date_smaller - 1095 1082 _null_ )); -DATA(insert ( 2139 time_smaller - 1110 1083 _null_ )); -DATA(insert ( 2140 timetz_smaller - 1552 1266 _null_ )); -DATA(insert ( 2141 cashsmaller - 902 790 _null_ )); -DATA(insert ( 2142 timestamp_smaller - 2062 1114 _null_ )); -DATA(insert ( 2143 timestamptz_smaller - 1322 1184 _null_ )); -DATA(insert ( 2144 interval_smaller - 1332 1186 _null_ )); -DATA(insert ( 2145 text_smaller - 664 25 _null_ )); -DATA(insert ( 2146 numeric_smaller - 1754 1700 _null_ )); -DATA(insert ( 2051 array_smaller - 1072 2277 _null_ )); -DATA(insert ( 2245 bpchar_smaller - 1058 1042 _null_ )); -DATA(insert ( 2798 tidsmaller - 2799 27 _null_ )); -DATA(insert ( 3527 enum_smaller - 3518 3500 _null_ )); +#endif +#ifdef PGXC +//DATA(insert ( 2131 int8smaller - 412 20 _null_ )); +//DATA(insert ( 2132 int4smaller - 97 23 _null_ )); +//DATA(insert ( 2133 int2smaller - 95 21 _null_ )); +//DATA(insert ( 2134 oidsmaller - 609 26 _null_ )); +//DATA(insert ( 2135 float4smaller - 622 700 _null_ )); +//DATA(insert ( 2136 float8smaller - 672 701 _null_ )); +//DATA(insert ( 2137 int4smaller - 562 702 _null_ )); +//DATA(insert ( 2138 date_smaller - 1095 1082 _null_ )); +//DATA(insert ( 2139 time_smaller - 1110 1083 _null_ )); +//DATA(insert ( 2140 timetz_smaller - 1552 1266 _null_ )); +//DATA(insert ( 2141 cashsmaller - 902 790 _null_ )); +//DATA(insert ( 2142 timestamp_smaller - 2062 1114 _null_ )); +//DATA(insert ( 2143 timestamptz_smaller - 1322 1184 _null_ )); +//DATA(insert ( 2144 interval_smaller - 1332 1186 _null_ )); +//DATA(insert ( 2145 text_smaller - 664 25 _null_ )); +//DATA(insert ( 2146 numeric_smaller - 1754 1700 _null_ )); +//DATA(insert ( 2051 array_smaller - 1072 2277 _null_ )); +//DATA(insert ( 2245 bpchar_smaller - 1058 1042 _null_ )); +//DATA(insert ( 2798 tidsmaller - 2799 27 _null_ )); +//DATA(insert ( 3527 enum_smaller - 3518 3500 _null_ )); #endif /* count */ @@ -244,9 +243,10 @@ DATA(insert ( 3527 enum_smaller - 3518 3500 _null_ )); #ifdef PGXC DATA(insert ( 2147 int8inc_any int8_sum_to_int8 - 0 20 20 "0" _null_ )); DATA(insert ( 2803 int8inc int8_sum_to_int8 - 0 20 20 "0" _null_ )); -#else -DATA(insert ( 2147 int8inc_any - 0 20 "0" )); -DATA(insert ( 2803 int8inc - 0 20 "0" )); +#endif +#ifdef PGXC +//DATA(insert ( 2147 int8inc_any - 0 20 "0" )); +//DATA(insert ( 2803 int8inc - 0 20 "0" )); #endif /* var_pop */ @@ -257,13 +257,14 @@ DATA(insert ( 2720 int2_accum numeric_collect numeric_var_pop 0 1231 1231 "{0, DATA(insert ( 2721 float4_accum float8_collect float8_var_pop 0 1022 1022 "{0,0,0}" "{0,0,0}" )); DATA(insert ( 2722 float8_accum float8_collect float8_var_pop 0 1022 1022 "{0,0,0}" "{0,0,0}" )); DATA(insert ( 2723 numeric_accum numeric_collect numeric_var_pop 0 1231 1231 "{0,0,0}" "{0,0,0}" )); -#else -DATA(insert ( 2718 int8_accum numeric_var_pop 0 1231 "{0,0,0}" )); -DATA(insert ( 2719 int4_accum numeric_var_pop 0 1231 "{0,0,0}" )); -DATA(insert ( 2720 int2_accum numeric_var_pop 0 1231 "{0,0,0}" )); -DATA(insert ( 2721 float4_accum float8_var_pop 0 1022 "{0,0,0}" )); -DATA(insert ( 2722 float8_accum float8_var_pop 0 1022 "{0,0,0}" )); -DATA(insert ( 2723 numeric_accum numeric_var_pop 0 1231 "{0,0,0}" )); +#endif +#ifdef PGXC +//DATA(insert ( 2718 int8_accum numeric_var_pop 0 1231 "{0,0,0}" )); +//DATA(insert ( 2719 int4_accum numeric_var_pop 0 1231 "{0,0,0}" )); +//DATA(insert ( 2720 int2_accum numeric_var_pop 0 1231 "{0,0,0}" )); +//DATA(insert ( 2721 float4_accum float8_var_pop 0 1022 "{0,0,0}" )); +//DATA(insert ( 2722 float8_accum float8_var_pop 0 1022 "{0,0,0}" )); +//DATA(insert ( 2723 numeric_accum numeric_var_pop 0 1231 "{0,0,0}" )); #endif /* var_samp */ @@ -274,13 +275,14 @@ DATA(insert ( 2643 int2_accum numeric_collect numeric_var_samp 0 1231 1231 "{0, DATA(insert ( 2644 float4_accum float8_collect float8_var_samp 0 1022 1022 "{0,0,0}" "{0,0,0}" )); DATA(insert ( 2645 float8_accum float8_collect float8_var_samp 0 1022 1022 "{0,0,0}" "{0,0,0}" )); DATA(insert ( 2646 numeric_accum numeric_collect numeric_var_samp 0 1231 1231 "{0,0,0}" "{0,0,0}" )); -#else -DATA(insert ( 2641 int8_accum numeric_var_samp 0 1231 "{0,0,0}" )); -DATA(insert ( 2642 int4_accum numeric_var_samp 0 1231 "{0,0,0}" )); -DATA(insert ( 2643 int2_accum numeric_var_samp 0 1231 "{0,0,0}" )); -DATA(insert ( 2644 float4_accum float8_var_samp 0 1022 "{0,0,0}" )); -DATA(insert ( 2645 float8_accum float8_var_samp 0 1022 "{0,0,0}" )); -DATA(insert ( 2646 numeric_accum numeric_var_samp 0 1231 "{0,0,0}" )); +#endif +#ifdef PGXC +//DATA(insert ( 2641 int8_accum numeric_var_samp 0 1231 "{0,0,0}" )); +//DATA(insert ( 2642 int4_accum numeric_var_samp 0 1231 "{0,0,0}" )); +//DATA(insert ( 2643 int2_accum numeric_var_samp 0 1231 "{0,0,0}" )); +//DATA(insert ( 2644 float4_accum float8_var_samp 0 1022 "{0,0,0}" )); +//DATA(insert ( 2645 float8_accum float8_var_samp 0 1022 "{0,0,0}" )); +//DATA(insert ( 2646 numeric_accum numeric_var_samp 0 1231 "{0,0,0}" )); #endif /* variance: historical Postgres syntax for var_samp */ @@ -291,13 +293,14 @@ DATA(insert ( 2150 int2_accum numeric_collect numeric_var_samp 0 1231 1231 "{0, DATA(insert ( 2151 float4_accum float8_collect float8_var_samp 0 1022 1022 "{0,0,0}" "{0,0,0}" )); DATA(insert ( 2152 float8_accum float8_collect float8_var_samp 0 1022 1022 "{0,0,0}" "{0,0,0}" )); DATA(insert ( 2153 numeric_accum numeric_collect numeric_var_samp 0 1231 1231 "{0,0,0}" "{0,0,0}" )); -#else -DATA(insert ( 2148 int8_accum numeric_var_samp 0 1231 "{0,0,0}" )); -DATA(insert ( 2149 int4_accum numeric_var_samp 0 1231 "{0,0,0}" )); -DATA(insert ( 2150 int2_accum numeric_var_samp 0 1231 "{0,0,0}" )); -DATA(insert ( 2151 float4_accum float8_var_samp 0 1022 "{0,0,0}" )); -DATA(insert ( 2152 float8_accum float8_var_samp 0 1022 "{0,0,0}" )); -DATA(insert ( 2153 numeric_accum numeric_var_samp 0 1231 "{0,0,0}" )); +#endif +#ifdef PGXC +//DATA(insert ( 2148 int8_accum numeric_var_samp 0 1231 "{0,0,0}" )); +//DATA(insert ( 2149 int4_accum numeric_var_samp 0 1231 "{0,0,0}" )); +//DATA(insert ( 2150 int2_accum numeric_var_samp 0 1231 "{0,0,0}" )); +//DATA(insert ( 2151 float4_accum float8_var_samp 0 1022 "{0,0,0}" )); +//DATA(insert ( 2152 float8_accum float8_var_samp 0 1022 "{0,0,0}" )); +//DATA(insert ( 2153 numeric_accum numeric_var_samp 0 1231 "{0,0,0}" )); #endif /* stddev_pop */ @@ -308,13 +311,14 @@ DATA(insert ( 2726 int2_accum numeric_collect numeric_stddev_pop 0 1231 1231 "{ DATA(insert ( 2727 float4_accum float8_collect float8_stddev_pop 0 1022 1022 "{0,0,0}" "{0,0,0}" )); DATA(insert ( 2728 float8_accum float8_collect float8_stddev_pop 0 1022 1022 "{0,0,0}" "{0,0,0}" )); DATA(insert ( 2729 numeric_accum numeric_collect numeric_stddev_pop 0 1231 1231 "{0,0,0}" "{0,0,0}" )); -#else -DATA(insert ( 2724 int8_accum numeric_stddev_pop 0 1231 "{0,0,0}" )); -DATA(insert ( 2725 int4_accum numeric_stddev_pop 0 1231 "{0,0,0}" )); -DATA(insert ( 2726 int2_accum numeric_stddev_pop 0 1231 "{0,0,0}" )); -DATA(insert ( 2727 float4_accum float8_stddev_pop 0 1022 "{0,0,0}" )); -DATA(insert ( 2728 float8_accum float8_stddev_pop 0 1022 "{0,0,0}" )); -DATA(insert ( 2729 numeric_accum numeric_stddev_pop 0 1231 "{0,0,0}" )); +#endif +#ifdef PGXC +//DATA(insert ( 2724 int8_accum numeric_stddev_pop 0 1231 "{0,0,0}" )); +//DATA(insert ( 2725 int4_accum numeric_stddev_pop 0 1231 "{0,0,0}" )); +//DATA(insert ( 2726 int2_accum numeric_stddev_pop 0 1231 "{0,0,0}" )); +//DATA(insert ( 2727 float4_accum float8_stddev_pop 0 1022 "{0,0,0}" )); +//DATA(insert ( 2728 float8_accum float8_stddev_pop 0 1022 "{0,0,0}" )); +//DATA(insert ( 2729 numeric_accum numeric_stddev_pop 0 1231 "{0,0,0}" )); #endif /* stddev_samp */ @@ -325,13 +329,14 @@ DATA(insert ( 2714 int2_accum numeric_collect numeric_stddev_samp 0 1231 1231 " DATA(insert ( 2715 float4_accum float8_collect float8_stddev_samp 0 1022 1022 "{0,0,0}" "{0,0,0}" )); DATA(insert ( 2716 float8_accum float8_collect float8_stddev_samp 0 1022 1022 "{0,0,0}" "{0,0,0}" )); DATA(insert ( 2717 numeric_accum numeric_collect numeric_stddev_samp 0 1231 1231 "{0,0,0}" "{0,0,0}" )); -#else -DATA(insert ( 2712 int8_accum numeric_stddev_samp 0 1231 "{0,0,0}" )); -DATA(insert ( 2713 int4_accum numeric_stddev_samp 0 1231 "{0,0,0}" )); -DATA(insert ( 2714 int2_accum numeric_stddev_samp 0 1231 "{0,0,0}" )); -DATA(insert ( 2715 float4_accum float8_stddev_samp 0 1022 "{0,0,0}" )); -DATA(insert ( 2716 float8_accum float8_stddev_samp 0 1022 "{0,0,0}" )); -DATA(insert ( 2717 numeric_accum numeric_stddev_samp 0 1231 "{0,0,0}" )); +#endif +#ifdef PGXC +//DATA(insert ( 2712 int8_accum numeric_stddev_samp 0 1231 "{0,0,0}" )); +//DATA(insert ( 2713 int4_accum numeric_stddev_samp 0 1231 "{0,0,0}" )); +//DATA(insert ( 2714 int2_accum numeric_stddev_samp 0 1231 "{0,0,0}" )); +//DATA(insert ( 2715 float4_accum float8_stddev_samp 0 1022 "{0,0,0}" )); +//DATA(insert ( 2716 float8_accum float8_stddev_samp 0 1022 "{0,0,0}" )); +//DATA(insert ( 2717 numeric_accum numeric_stddev_samp 0 1231 "{0,0,0}" )); #endif /* stddev: historical Postgres syntax for stddev_samp */ @@ -342,13 +347,14 @@ DATA(insert ( 2156 int2_accum numeric_collect numeric_stddev_samp 0 1231 1231 " DATA(insert ( 2157 float4_accum float8_collect float8_stddev_samp 0 1022 1022 "{0,0,0}" "{0,0,0}" )); DATA(insert ( 2158 float8_accum float8_collect float8_stddev_samp 0 1022 1022 "{0,0,0}" "{0,0,0}" )); DATA(insert ( 2159 numeric_accum numeric_collect numeric_stddev_samp 0 1231 1231 "{0,0,0}" "{0,0,0}" )); -#else -DATA(insert ( 2154 int8_accum numeric_stddev_samp 0 1231 "{0,0,0}" )); -DATA(insert ( 2155 int4_accum numeric_stddev_samp 0 1231 "{0,0,0}" )); -DATA(insert ( 2156 int2_accum numeric_stddev_samp 0 1231 "{0,0,0}" )); -DATA(insert ( 2157 float4_accum float8_stddev_samp 0 1022 "{0,0,0}" )); -DATA(insert ( 2158 float8_accum float8_stddev_samp 0 1022 "{0,0,0}" )); -DATA(insert ( 2159 numeric_accum numeric_stddev_samp 0 1231 "{0,0,0}" )); +#endif +#ifdef PGXC +//DATA(insert ( 2154 int8_accum numeric_stddev_samp 0 1231 "{0,0,0}" )); +//DATA(insert ( 2155 int4_accum numeric_stddev_samp 0 1231 "{0,0,0}" )); +//DATA(insert ( 2156 int2_accum numeric_stddev_samp 0 1231 "{0,0,0}" )); +//DATA(insert ( 2157 float4_accum float8_stddev_samp 0 1022 "{0,0,0}" )); +//DATA(insert ( 2158 float8_accum float8_stddev_samp 0 1022 "{0,0,0}" )); +//DATA(insert ( 2159 numeric_accum numeric_stddev_samp 0 1231 "{0,0,0}" )); #endif /* SQL2003 binary regression aggregates */ @@ -365,19 +371,20 @@ DATA(insert ( 2826 float8_regr_accum float8_regr_collect float8_regr_intercept 0 DATA(insert ( 2827 float8_regr_accum float8_regr_collect float8_covar_pop 0 1022 1022 "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" )); DATA(insert ( 2828 float8_regr_accum float8_regr_collect float8_covar_samp 0 1022 1022 "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" )); DATA(insert ( 2829 float8_regr_accum float8_regr_collect float8_corr 0 1022 1022 "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" )); -#else -DATA(insert ( 2818 int8inc_float8_float8 - 0 20 "0" )); -DATA(insert ( 2819 float8_regr_accum float8_regr_sxx 0 1022 "{0,0,0,0,0,0}" )); -DATA(insert ( 2820 float8_regr_accum float8_regr_syy 0 1022 "{0,0,0,0,0,0}" )); -DATA(insert ( 2821 float8_regr_accum float8_regr_sxy 0 1022 "{0,0,0,0,0,0}" )); -DATA(insert ( 2822 float8_regr_accum float8_regr_avgx 0 1022 "{0,0,0,0,0,0}" )); -DATA(insert ( 2823 float8_regr_accum float8_regr_avgy 0 1022 "{0,0,0,0,0,0}" )); -DATA(insert ( 2824 float8_regr_accum float8_regr_r2 0 1022 "{0,0,0,0,0,0}" )); -DATA(insert ( 2825 float8_regr_accum float8_regr_slope 0 1022 "{0,0,0,0,0,0}" )); -DATA(insert ( 2826 float8_regr_accum float8_regr_intercept 0 1022 "{0,0,0,0,0,0}" )); -DATA(insert ( 2827 float8_regr_accum float8_covar_pop 0 1022 "{0,0,0,0,0,0}" )); -DATA(insert ( 2828 float8_regr_accum float8_covar_samp 0 1022 "{0,0,0,0,0,0}" )); -DATA(insert ( 2829 float8_regr_accum float8_corr 0 1022 "{0,0,0,0,0,0}" )); +#endif +#ifdef PGXC +//DATA(insert ( 2818 int8inc_float8_float8 - 0 20 "0" )); +//DATA(insert ( 2819 float8_regr_accum float8_regr_sxx 0 1022 "{0,0,0,0,0,0}" )); +//DATA(insert ( 2820 float8_regr_accum float8_regr_syy 0 1022 "{0,0,0,0,0,0}" )); +//DATA(insert ( 2821 float8_regr_accum float8_regr_sxy 0 1022 "{0,0,0,0,0,0}" )); +//DATA(insert ( 2822 float8_regr_accum float8_regr_avgx 0 1022 "{0,0,0,0,0,0}" )); +//DATA(insert ( 2823 float8_regr_accum float8_regr_avgy 0 1022 "{0,0,0,0,0,0}" )); +//DATA(insert ( 2824 float8_regr_accum float8_regr_r2 0 1022 "{0,0,0,0,0,0}" )); +//DATA(insert ( 2825 float8_regr_accum float8_regr_slope 0 1022 "{0,0,0,0,0,0}" )); +//DATA(insert ( 2826 float8_regr_accum float8_regr_intercept 0 1022 "{0,0,0,0,0,0}" )); +//DATA(insert ( 2827 float8_regr_accum float8_covar_pop 0 1022 "{0,0,0,0,0,0}" )); +//DATA(insert ( 2828 float8_regr_accum float8_covar_samp 0 1022 "{0,0,0,0,0,0}" )); +//DATA(insert ( 2829 float8_regr_accum float8_corr 0 1022 "{0,0,0,0,0,0}" )); #endif /* boolean-and and boolean-or */ @@ -385,10 +392,11 @@ DATA(insert ( 2829 float8_regr_accum float8_corr 0 1022 "{0,0,0,0,0,0}" )); DATA(insert ( 2517 booland_statefunc booland_statefunc - 0 16 16 _null_ _null_ )); DATA(insert ( 2518 boolor_statefunc boolor_statefunc - 0 16 16 _null_ _null_ )); DATA(insert ( 2519 booland_statefunc booland_statefunc - 0 16 16 _null_ _null_ )); -#else -DATA(insert ( 2517 booland_statefunc - 0 16 _null_ )); -DATA(insert ( 2518 boolor_statefunc - 0 16 _null_ )); -DATA(insert ( 2519 booland_statefunc - 0 16 _null_ )); +#endif +#ifdef PGXC +//DATA(insert ( 2517 booland_statefunc - 0 16 _null_ )); +//DATA(insert ( 2518 boolor_statefunc - 0 16 _null_ )); +//DATA(insert ( 2519 booland_statefunc - 0 16 _null_ )); #endif /* bitwise integer */ @@ -401,35 +409,40 @@ DATA(insert ( 2240 int8and int8and - 0 20 20 _null_ _null_ )); DATA(insert ( 2241 int8or int8or - 0 20 20 _null_ _null_ )); DATA(insert ( 2242 bitand bitand - 0 1560 1560 _null_ _null_ )); DATA(insert ( 2243 bitor bitor - 0 1560 1560 _null_ _null_ )); -#else -DATA(insert ( 2236 int2and - 0 21 _null_ )); -DATA(insert ( 2237 int2or - 0 21 _null_ )); -DATA(insert ( 2238 int4and - 0 23 _null_ )); -DATA(insert ( 2239 int4or - 0 23 _null_ )); -DATA(insert ( 2240 int8and - 0 20 _null_ )); -DATA(insert ( 2241 int8or - 0 20 _null_ )); -DATA(insert ( 2242 bitand - 0 1560 _null_ )); -DATA(insert ( 2243 bitor - 0 1560 _null_ )); +#endif +#ifdef PGXC +//DATA(insert ( 2236 int2and - 0 21 _null_ )); +//DATA(insert ( 2237 int2or - 0 21 _null_ )); +//DATA(insert ( 2238 int4and - 0 23 _null_ )); +//DATA(insert ( 2239 int4or - 0 23 _null_ )); +//DATA(insert ( 2240 int8and - 0 20 _null_ )); +//DATA(insert ( 2241 int8or - 0 20 _null_ )); +//DATA(insert ( 2242 bitand - 0 1560 _null_ )); +//DATA(insert ( 2243 bitor - 0 1560 _null_ )); #endif /* xml */ #ifdef PGXC DATA(insert ( 2901 xmlconcat2 xmlconcat2 - 0 142 142 _null_ _null_ )); -#else -DATA(insert ( 2901 xmlconcat2 - 0 142 _null_ )); +#endif +#ifdef PGXC +//DATA(insert ( 2901 xmlconcat2 - 0 142 _null_ )); #endif /* array */ #ifdef PGXC /* PGXCTODO */ //DATA(insert ( 2335 array_agg_transfn array_agg_finalfn 0 2281 _null_ )); -#else -DATA(insert ( 2335 array_agg_transfn array_agg_finalfn 0 2281 _null_ )); +#endif +#ifdef PGXC +//DATA(insert ( 2335 array_agg_transfn array_agg_finalfn 0 2281 _null_ )); #endif /* text */ -DATA(insert (3537 string_agg_transfn string_agg_finalfn 0 2281 _null_ )); -DATA(insert (3538 string_agg_delim_transfn string_agg_finalfn 0 2281 _null_ )); +#ifdef PGXC +//DATA(insert (3537 string_agg_transfn string_agg_finalfn 0 2281 _null_ )); +//DATA(insert (3538 string_agg_delim_transfn string_agg_finalfn 0 2281 _null_ )); +#endif /* * prototypes for functions in pg_aggregate.c diff --git a/src/include/catalog/pg_proc.h b/src/include/catalog/pg_proc.h index 7ae0b73..6d8163e 100644 --- a/src/include/catalog/pg_proc.h +++ b/src/include/catalog/pg_proc.h @@ -2835,7 +2835,7 @@ DATA(insert OID = 2966 ( float8_collect PGNSP PGUID 12 1 0 0 f f f t f i 2 0 DESCR("aggregate collection function"); DATA(insert OID = 2964 ( numeric_avg_collect PGNSP PGUID 12 1 0 0 f f f t f i 2 0 1231 "1231 1231" _null_ _null_ _null_ _null_ numeric_avg_collect _null_ _null_ _null_ )); DESCR("aggregate collection function"); -DATA(insert OID = 2730 ( numeric_collect PGNSP PGUID 12 1 0 0 f f f t f i 2 0 1231 "1231 1231" _null_ _null_ _null_ _null_ numeric_collect _null_ _null_ _null_ )); +DATA(insert OID = 2968 ( numeric_collect PGNSP PGUID 12 1 0 0 f f f t f i 2 0 1231 "1231 1231" _null_ _null_ _null_ _null_ numeric_collect _null_ _null_ _null_ )); DESCR("aggregate collection function"); DATA(insert OID = 2967 ( interval_collect PGNSP PGUID 12 1 0 0 f f f t f i 2 0 1187 "1187 1187" _null_ _null_ _null_ _null_ interval_collect _null_ _null_ _null_ )); DESCR("aggregate transition function"); ----------------------------------------------------------------------- Summary of changes: src/include/catalog/pg_aggregate.h | 319 +++++++++++++++++++----------------- src/include/catalog/pg_proc.h | 2 +- 2 files changed, 167 insertions(+), 154 deletions(-) hooks/post-receive -- Postgres-XC |
From: Michael P. <mic...@us...> - 2011-05-25 05:00:25
|
Project "Postgres-XC". The branch, master has been updated via 4a7fcb29f0ac168c99f78bdf86078be24015ddb0 (commit) from 49b66c77343ae1e9921118e0c902b1528f1cc2ae (commit) - Log ----------------------------------------------------------------- commit 4a7fcb29f0ac168c99f78bdf86078be24015ddb0 Author: Michael P <mic...@us...> Date: Wed May 25 13:56:14 2011 +0900 Support for DISCARD This solves also bug 3307003 where it was impossible to launch successive regression tests. Regression test guc is updated with correct output. diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c index e4f33c5..b7bf2a3 100644 --- a/src/backend/tcop/utility.c +++ b/src/backend/tcop/utility.c @@ -1482,6 +1482,19 @@ standard_ProcessUtility(Node *parsetree, /* should we allow DISCARD PLANS? */ CheckRestrictedOperation("DISCARD"); DiscardCommand((DiscardStmt *) parsetree, isTopLevel); +#ifdef PGXC + /* Let the pooler manage the statement */ + if (IS_PGXC_COORDINATOR && !IsConnFromCoord()) + { + /* + * If command is local and we are not in a transaction block do NOT + * send this query to backend nodes + */ + if (!IsTransactionBlock()) + if (PoolManagerSetCommand(false, queryString) < 0) + elog(ERROR, "Postgres-XC: ERROR DISCARD query"); + } +#endif break; case T_CreateTrigStmt: diff --git a/src/test/regress/expected/guc_1.out b/src/test/regress/expected/guc_1.out index 83b5b65..d71a66c 100644 --- a/src/test/regress/expected/guc_1.out +++ b/src/test/regress/expected/guc_1.out @@ -513,7 +513,6 @@ SELECT current_user = 'temp_reset_user'; (1 row) DROP ROLE temp_reset_user; -ERROR: permission denied to drop role -- -- Tests for function-local GUC settings -- @@ -521,35 +520,32 @@ set work_mem = '3MB'; create function report_guc(text) returns text as $$ select current_setting($1) $$ language sql set work_mem = '1MB'; -ERROR: stable and volatile not yet supported, function volatility has to be immutable select report_guc('work_mem'), current_setting('work_mem'); -ERROR: function report_guc(unknown) does not exist -LINE 1: select report_guc('work_mem'), current_setting('work_mem'); - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. + report_guc | current_setting +------------+----------------- + 1MB | 3MB +(1 row) + -- this should draw only a warning alter function report_guc(text) set search_path = no_such_schema; -ERROR: function report_guc(text) does not exist +NOTICE: schema "no_such_schema" does not exist -- with error occurring here select report_guc('work_mem'), current_setting('work_mem'); -ERROR: function report_guc(unknown) does not exist -LINE 1: select report_guc('work_mem'), current_setting('work_mem'); - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. +ERROR: schema "no_such_schema" does not exist alter function report_guc(text) reset search_path set work_mem = '2MB'; -ERROR: function report_guc(text) does not exist select report_guc('work_mem'), current_setting('work_mem'); -ERROR: function report_guc(unknown) does not exist -LINE 1: select report_guc('work_mem'), current_setting('work_mem'); - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. + report_guc | current_setting +------------+----------------- + 2MB | 3MB +(1 row) + alter function report_guc(text) reset all; -ERROR: function report_guc(text) does not exist select report_guc('work_mem'), current_setting('work_mem'); -ERROR: function report_guc(unknown) does not exist -LINE 1: select report_guc('work_mem'), current_setting('work_mem'); - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. + report_guc | current_setting +------------+----------------- + 3MB | 3MB +(1 row) + -- SET LOCAL is restricted by a function SET option create or replace function myfunc(int) returns text as $$ begin @@ -558,19 +554,19 @@ begin end $$ language plpgsql set work_mem = '1MB'; -ERROR: stable and volatile not yet supported, function volatility has to be immutable select myfunc(0), current_setting('work_mem'); -ERROR: function myfunc(integer) does not exist -LINE 1: select myfunc(0), current_setting('work_mem'); - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. + myfunc | current_setting +--------+----------------- + 2MB | 3MB +(1 row) + alter function myfunc(int) reset all; -ERROR: function myfunc(integer) does not exist select myfunc(0), current_setting('work_mem'); -ERROR: function myfunc(integer) does not exist -LINE 1: select myfunc(0), current_setting('work_mem'); - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. + myfunc | current_setting +--------+----------------- + 2MB | 2MB +(1 row) + set work_mem = '3MB'; -- but SET isn't create or replace function myfunc(int) returns text as $$ @@ -580,12 +576,12 @@ begin end $$ language plpgsql set work_mem = '1MB'; -ERROR: stable and volatile not yet supported, function volatility has to be immutable select myfunc(0), current_setting('work_mem'); -ERROR: function myfunc(integer) does not exist -LINE 1: select myfunc(0), current_setting('work_mem'); - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. + myfunc | current_setting +--------+----------------- + 2MB | 2MB +(1 row) + set work_mem = '3MB'; -- it should roll back on error, though create or replace function myfunc(int) returns text as $$ @@ -596,12 +592,10 @@ begin end $$ language plpgsql set work_mem = '1MB'; -ERROR: stable and volatile not yet supported, function volatility has to be immutable select myfunc(0); -ERROR: function myfunc(integer) does not exist -LINE 1: select myfunc(0); - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. +ERROR: division by zero +CONTEXT: SQL statement "SELECT 1/$1" +PL/pgSQL function "myfunc" line 3 at PERFORM select current_setting('work_mem'); current_setting ----------------- @@ -609,7 +603,8 @@ select current_setting('work_mem'); (1 row) select myfunc(1), current_setting('work_mem'); -ERROR: function myfunc(integer) does not exist -LINE 1: select myfunc(1), current_setting('work_mem'); - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. + myfunc | current_setting +--------+----------------- + 2MB | 2MB +(1 row) + ----------------------------------------------------------------------- Summary of changes: src/backend/tcop/utility.c | 13 +++++ src/test/regress/expected/guc_1.out | 85 ++++++++++++++++------------------ 2 files changed, 53 insertions(+), 45 deletions(-) hooks/post-receive -- Postgres-XC |
From: Koichi S. <ko...@in...> - 2011-05-25 01:28:10
|
I think current situation of float and float-based types are not adequate as distribution columns. On the other hand, I think it is not a good thing to exluce them by hard-coded logic. We should consider future extension to it and using pg_type or new catalog will be a good idea. --- Koichi pOn Tue, 24 May 2011 09:57:54 -0400 Mason Sharp <mas...@gm...> wrote: > On Tue, May 24, 2011 at 9:40 AM, Abbas Butt <abb...@te...> wrote: > > > > > > On Tue, May 24, 2011 at 6:03 PM, Mason <ma...@us...> > > wrote: > >> > >> On Tue, May 24, 2011 at 8:08 AM, Abbas Butt > >> <ga...@us...> wrote: > >> > Project "Postgres-XC". > >> > > >> > The branch, master has been updated > >> > via 49b66c77343ae1e9921118e0c902b1528f1cc2ae (commit) > >> > from 87a62879ab3492e3dd37d00478ffa857639e2b85 (commit) > >> > > >> > > >> > - Log ----------------------------------------------------------------- > >> > commit 49b66c77343ae1e9921118e0c902b1528f1cc2ae > >> > Author: Abbas <abb...@en...> > >> > Date: Tue May 24 17:06:30 2011 +0500 > >> > > >> > This patch adds support for the following data types to be used as > >> > distribution key > >> > > >> > INT8, INT2, OID, INT4, BOOL, INT2VECTOR, OIDVECTOR > >> > CHAR, NAME, TEXT, BPCHAR, BYTEA, VARCHAR > >> > FLOAT4, FLOAT8, NUMERIC, CASH > >> > ABSTIME, RELTIME, DATE, TIME, TIMESTAMP, TIMESTAMPTZ, INTERVAL, > >> > TIMETZ > >> > > >> > >> I am not sure some of these data types are a good idea to use for > >> distributing on. Float is inexact and seems problematic > >> > >> I just did a quick test: > >> > >> mds=# create table float1 (a float, b float) distribute by hash (a); > >> CREATE TABLE > >> > >> mds=# insert into float1 values (2.0/3, 2); > >> INSERT 0 1 > >> > >> mds=# select * from float1; > >> a | b > >> -------------------+--- > >> 0.666666666666667 | 2 > >> (1 row) > >> > >> Then, I copy and paste the output of a: > >> > >> mds=# select * from float1 where a = 0.666666666666667; > >> a | b > >> ---+--- > >> (0 rows) > >> > > > > float is a tricky type. Leave XC aside this test case will produce same > > results in plain postgres for this reason. > > The column actually does not contain 0.666666666666667, what psql is showing > > us is only an approximation of what is stored there. > > select * from float1 where a = 2.0/3; would however work. > > 2ndly suppose we have the same test case with data type float4. > > Now both > > select * from float1 where a = 0.666666666666667; and > > select * from float1 where a = 2.0/3; > > would show up no results both in PG and XC. > > The reason is that PG treats real numbers as float8 by default and float8 > > does not compare to float4. > > select * from float1 where a = cast (2.0/3 as float4); > > would therefore work. > > Any user willing to use float types has to be aware of these strange > > behaviors and knowing these he/she may benefit from being able to use it as > > a distribution key. > > > I don't think it is a good idea that they have to know that they > should change all of their application code and add casting to make > sure it works like they want. I think people are just going to get > themselves into trouble. I strongly recommend disabling distribution > support for some of these data types. > > Thanks, > > Mason > > > > > > >> > >> Looking at the plan it tries to take advantage of partitioning: > >> > >> mds=# explain select * from float1 where a = 0.666666666666667; > >> QUERY PLAN > >> ------------------------------------------------------------------- > >> Data Node Scan (Node Count [1]) (cost=0.00..0.00 rows=0 width=0) > >> (1 row) > >> > >> I think we should remove support for floats as a possible distribution > >> type; users may get themselves into trouble. > >> > >> > >> There may be similar issues with the timestamp data types: > >> > >> mds=# create table timestamp1 (a timestamp, b int) distribute by hash(a); > >> CREATE TABLE > >> mds=# insert into timestamp1 values (now(), 1); > >> INSERT 0 1 > >> mds=# select * from timestamp1; > >> a | b > >> ----------------------------+--- > >> 2011-05-24 08:51:21.597551 | 1 > >> (1 row) > >> > >> mds=# select * from timestamp1 where a = '2011-05-24 08:51:21.597551'; > >> a | b > >> ---+--- > >> (0 rows) > >> > >> > >> As far as BOOL goes, I suppose it may be ok, but of course there are > >> only two possible values. I would block it, or at the very least if > >> the user leaves off the distribution clause, I would not consider BOOL > >> columns and look at other columns as better partitioning candidates. > >> > >> In any event, I am very glad to see the various INT types, CHAR, > >> VARCHAR, TEXT, NUMERIC and DATE supported. I am not so sure how useful > >> some of the others are. > >> > >> Thanks, > >> > >> Mason > >> > >> > >> ------------------------------------------------------------------------------ > >> vRanger cuts backup time in half-while increasing security. > >> With the market-leading solution for virtual backup and recovery, > >> you get blazing-fast, flexible, and affordable data protection. > >> Download your free trial now. > >> http://p.sf.net/sfu/quest-d2dcopy1 > >> _______________________________________________ > >> Postgres-xc-committers mailing list > >> Pos...@li... > >> https://lists.sourceforge.net/lists/listinfo/postgres-xc-committers > > > > > > ------------------------------------------------------------------------------ > > vRanger cuts backup time in half-while increasing security. > > With the market-leading solution for virtual backup and recovery, > > you get blazing-fast, flexible, and affordable data protection. > > Download your free trial now. > > http://p.sf.net/sfu/quest-d2dcopy1 > > _______________________________________________ > > Postgres-xc-committers mailing list > > Pos...@li... > > https://lists.sourceforge.net/lists/listinfo/postgres-xc-committers > > > > > > ------------------------------------------------------------------------------ > vRanger cuts backup time in half-while increasing security. > With the market-leading solution for virtual backup and recovery, > you get blazing-fast, flexible, and affordable data protection. > Download your free trial now. > http://p.sf.net/sfu/quest-d2dcopy1 > _______________________________________________ > Postgres-xc-developers mailing list > Pos...@li... > https://lists.sourceforge.net/lists/listinfo/postgres-xc-developers > |
From: Koichi S. <ko...@in...> - 2011-05-25 01:11:32
|
Hi, Current code utilizes existing hash-generation mechanism and I think this is basically right thing to do. By using this, we can pick up almost any column (I'm not sure about about geometric types and composit types, would like to test) for hash distribution. Points are: 1) Is a distribution column stable enough? --- This is user's choice and most of float attribute is not stable. 2) Can we reproduce the same hash value from the same input value? Mason's point is 2). It will be better to handle this from more general view. Anyway, I think current implementation is simple and general enough. We need separete means to determine if specified column is good to select as distribution column. This should be applied not only embedded types but also user-defined types and need some design and implementation effort. At present, we may notice users that it is not recommended and may be prohibited in the future. We can introduce new catalog table or extend pg_type to describe what types are allowed as distribution key. --- Koichi # Geometric types element values are float and they're not adequate to use as distribution key. On Tue, 24 May 2011 09:03:29 -0400 Mason <ma...@us...> wrote: > On Tue, May 24, 2011 at 8:08 AM, Abbas Butt > <ga...@us...> wrote: > > Project "Postgres-XC". > > > > The branch, master has been updated > > via 49b66c77343ae1e9921118e0c902b1528f1cc2ae (commit) > > from 87a62879ab3492e3dd37d00478ffa857639e2b85 (commit) > > > > > > - Log ----------------------------------------------------------------- > > commit 49b66c77343ae1e9921118e0c902b1528f1cc2ae > > Author: Abbas <abb...@en...> > > Date: Tue May 24 17:06:30 2011 +0500 > > > > This patch adds support for the following data types to be used as distribution key > > > > INT8, INT2, OID, INT4, BOOL, INT2VECTOR, OIDVECTOR > > CHAR, NAME, TEXT, BPCHAR, BYTEA, VARCHAR > > FLOAT4, FLOAT8, NUMERIC, CASH > > ABSTIME, RELTIME, DATE, TIME, TIMESTAMP, TIMESTAMPTZ, INTERVAL, TIMETZ > > > > I am not sure some of these data types are a good idea to use for > distributing on. Float is inexact and seems problematic > > I just did a quick test: > > mds=# create table float1 (a float, b float) distribute by hash (a); > CREATE TABLE > > mds=# insert into float1 values (2.0/3, 2); > INSERT 0 1 > > mds=# select * from float1; > a | b > -------------------+--- > 0.666666666666667 | 2 > (1 row) > > Then, I copy and paste the output of a: > > mds=# select * from float1 where a = 0.666666666666667; > a | b > ---+--- > (0 rows) > > Looking at the plan it tries to take advantage of partitioning: > > mds=# explain select * from float1 where a = 0.666666666666667; > QUERY PLAN > ------------------------------------------------------------------- > Data Node Scan (Node Count [1]) (cost=0.00..0.00 rows=0 width=0) > (1 row) > > I think we should remove support for floats as a possible distribution > type; users may get themselves into trouble. > > > There may be similar issues with the timestamp data types: > > mds=# create table timestamp1 (a timestamp, b int) distribute by hash(a); > CREATE TABLE > mds=# insert into timestamp1 values (now(), 1); > INSERT 0 1 > mds=# select * from timestamp1; > a | b > ----------------------------+--- > 2011-05-24 08:51:21.597551 | 1 > (1 row) > > mds=# select * from timestamp1 where a = '2011-05-24 08:51:21.597551'; > a | b > ---+--- > (0 rows) > > > As far as BOOL goes, I suppose it may be ok, but of course there are > only two possible values. I would block it, or at the very least if > the user leaves off the distribution clause, I would not consider BOOL > columns and look at other columns as better partitioning candidates. > > In any event, I am very glad to see the various INT types, CHAR, > VARCHAR, TEXT, NUMERIC and DATE supported. I am not so sure how useful > some of the others are. > > Thanks, > > Mason > > ------------------------------------------------------------------------------ > vRanger cuts backup time in half-while increasing security. > With the market-leading solution for virtual backup and recovery, > you get blazing-fast, flexible, and affordable data protection. > Download your free trial now. > http://p.sf.net/sfu/quest-d2dcopy1 > _______________________________________________ > Postgres-xc-developers mailing list > Pos...@li... > https://lists.sourceforge.net/lists/listinfo/postgres-xc-developers > |
From: Andrei M. <and...@gm...> - 2011-05-24 17:18:59
|
Hi Abbas, I looked at the code and see that for some data types the compute_hash() returns not a hash code, but original value: + case INT8OID: + /* This gives added advantage that + * a = 8446744073709551359 + * and a = 8446744073709551359::int8 both work*/ + return DatumGetInt32(value); + case INT2OID: + return DatumGetInt16(value); + case OIDOID: + return DatumGetObjectId(value); + case INT4OID: + return DatumGetInt32(value); + case BOOLOID: + return DatumGetBool(value); That not a critical error and gives a bit better calculation speed but may cause poor distributions, when, for example, distribution column contains only even or only odd values. Some node may have many rows while other may not have rows at all. I suggest using hashintX functions here. And another point: Oid's are generated on data nodes, does it make sense to allow hashing here, where it is supposed the value is coming from coordinator? 2011/5/24 Abbas Butt <ga...@us...> > Project "Postgres-XC". > > The branch, master has been updated > via 49b66c77343ae1e9921118e0c902b1528f1cc2ae (commit) > from 87a62879ab3492e3dd37d00478ffa857639e2b85 (commit) > > > - Log ----------------------------------------------------------------- > commit 49b66c77343ae1e9921118e0c902b1528f1cc2ae > Author: Abbas <abb...@en...> > Date: Tue May 24 17:06:30 2011 +0500 > > This patch adds support for the following data types to be used as > distribution key > > INT8, INT2, OID, INT4, BOOL, INT2VECTOR, OIDVECTOR > CHAR, NAME, TEXT, BPCHAR, BYTEA, VARCHAR > FLOAT4, FLOAT8, NUMERIC, CASH > ABSTIME, RELTIME, DATE, TIME, TIMESTAMP, TIMESTAMPTZ, INTERVAL, TIMETZ > > A new function compute_hash is added in the system which is used to > compute hash of a any of the supported data types. > The computed hash is used in the function GetRelationNodes to > find the targeted data node. > > EXPLAIN for RemoteQuery has been modified to show the number of > data nodes targeted for a certain query. This is essential > to spot bugs in the optimizer in case it is targeting all nodes > by mistake. > > In case of optimisations where comparison with a constant leads > the optimiser to point to a single data node, there were a couple > of mistakes in examine_conditions_walker. > First it was not supporting RelabelType, which represents a "dummy" > type coercion between two binary compatible datatypes. > This was resulting in the optimization not working for varchar > type for example. > Secondly it was not catering for the case where the user specifies the > condition such that the constant expression is written towards LHS and > the > variable towards the RHS of the = operator. > i.e. 23 = a > > A number of test cases have been added in regression to make sure > further enhancements do not break this functionality. > > This change has a sizeable impact on current regression tests in the > following manner. > > 1. horology test case crashes the server and has been commented out in > serial_schedule. > 2. In money test case the planner optimizer wrongly kicks in to optimize > this query > SELECT m = '$123.01' FROM money_data; > to point to a single data node. > 3. There were a few un-necessary EXPLAINs in create_index test case. > Since we have added support in EXPLAIN to show the number of > data nodes targeted for RemoteQuery, this test case was producing > output dependent on the cluster configuration. > 4. In guc test case > DROP ROLE temp_reset_user; > results in > ERROR: permission denied to drop role > > diff --git a/src/backend/access/hash/hashfunc.c > b/src/backend/access/hash/hashfunc.c > index 577873b..22766c5 100644 > --- a/src/backend/access/hash/hashfunc.c > +++ b/src/backend/access/hash/hashfunc.c > @@ -28,6 +28,13 @@ > > #include "access/hash.h" > > +#ifdef PGXC > +#include "catalog/pg_type.h" > +#include "utils/builtins.h" > +#include "utils/timestamp.h" > +#include "utils/date.h" > +#include "utils/nabstime.h" > +#endif > > /* Note: this is used for both "char" and boolean datatypes */ > Datum > @@ -521,3 +528,91 @@ hash_uint32(uint32 k) > /* report the result */ > return UInt32GetDatum(c); > } > + > +#ifdef PGXC > +/* > + * compute_hash() -- Generaic hash function for all datatypes > + * > + */ > + > +Datum > +compute_hash(Oid type, Datum value, int *pErr) > +{ > + Assert(pErr); > + > + *pErr = 0; > + > + if (value == NULL) > + { > + *pErr = 1; > + return 0; > + } > + > + switch(type) > + { > + case INT8OID: > + /* This gives added advantage that > + * a = 8446744073709551359 > + * and a = 8446744073709551359::int8 both work*/ > + return DatumGetInt32(value); > + case INT2OID: > + return DatumGetInt16(value); > + case OIDOID: > + return DatumGetObjectId(value); > + case INT4OID: > + return DatumGetInt32(value); > + case BOOLOID: > + return DatumGetBool(value); > + > + case CHAROID: > + return DirectFunctionCall1(hashchar, value); > + case NAMEOID: > + return DirectFunctionCall1(hashname, value); > + case INT2VECTOROID: > + return DirectFunctionCall1(hashint2vector, value); > + > + case VARCHAROID: > + case TEXTOID: > + return DirectFunctionCall1(hashtext, value); > + > + case OIDVECTOROID: > + return DirectFunctionCall1(hashoidvector, value); > + case FLOAT4OID: > + return DirectFunctionCall1(hashfloat4, value); > + case FLOAT8OID: > + return DirectFunctionCall1(hashfloat8, value); > + > + case ABSTIMEOID: > + return DatumGetAbsoluteTime(value); > + case RELTIMEOID: > + return DatumGetRelativeTime(value); > + case CASHOID: > + return DirectFunctionCall1(hashint8, value); > + > + case BPCHAROID: > + return DirectFunctionCall1(hashbpchar, value); > + case BYTEAOID: > + return DirectFunctionCall1(hashvarlena, value); > + > + case DATEOID: > + return DatumGetDateADT(value); > + case TIMEOID: > + return DirectFunctionCall1(time_hash, value); > + case TIMESTAMPOID: > + return DirectFunctionCall1(timestamp_hash, value); > + case TIMESTAMPTZOID: > + return DirectFunctionCall1(timestamp_hash, value); > + case INTERVALOID: > + return DirectFunctionCall1(interval_hash, value); > + case TIMETZOID: > + return DirectFunctionCall1(timetz_hash, value); > + > + case NUMERICOID: > + return DirectFunctionCall1(hash_numeric, value); > + default: > + *pErr = 1; > + return 0; > + } > +} > + > +#endif > diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c > index 613d5ff..714190f 100644 > --- a/src/backend/commands/copy.c > +++ b/src/backend/commands/copy.c > @@ -1645,14 +1645,14 @@ CopyTo(CopyState cstate) > } > > #ifdef PGXC > - if (IS_PGXC_COORDINATOR && cstate->rel_loc) > + if (IS_PGXC_COORDINATOR && cstate->rel_loc) > { > cstate->processed = DataNodeCopyOut( > - GetRelationNodes(cstate->rel_loc, NULL, > RELATION_ACCESS_READ), > + GetRelationNodes(cstate->rel_loc, 0, > UNKNOWNOID, RELATION_ACCESS_READ), > cstate->connections, > cstate->copy_file); > } > - else > + else > { > #endif > > @@ -2417,15 +2417,18 @@ CopyFrom(CopyState cstate) > #ifdef PGXC > if (IS_PGXC_COORDINATOR && cstate->rel_loc) > { > - Datum *dist_col_value = NULL; > + Datum dist_col_value; > + Oid dist_col_type = UNKNOWNOID; > > if (cstate->idx_dist_by_col >= 0 && > !nulls[cstate->idx_dist_by_col]) > - dist_col_value = > &values[cstate->idx_dist_by_col]; > + { > + dist_col_value = > values[cstate->idx_dist_by_col]; > + dist_col_type = > attr[cstate->idx_dist_by_col]->atttypid; > + } > > if (DataNodeCopyIn(cstate->line_buf.data, > cstate->line_buf.len, > - > GetRelationNodes(cstate->rel_loc, (long *)dist_col_value, > - > RELATION_ACCESS_INSERT), > + > GetRelationNodes(cstate->rel_loc, dist_col_value, dist_col_type, > RELATION_ACCESS_INSERT), > cstate->connections)) > ereport(ERROR, > > (errcode(ERRCODE_CONNECTION_EXCEPTION), > @@ -4037,7 +4040,8 @@ DoInsertSelectCopy(EState *estate, TupleTableSlot > *slot) > HeapTuple tuple; > Datum *values; > bool *nulls; > - Datum *dist_col_value = NULL; > + Datum dist_col_value; > + Oid dist_col_type; > MemoryContext oldcontext; > CopyState cstate; > > @@ -4082,6 +4086,11 @@ DoInsertSelectCopy(EState *estate, TupleTableSlot > *slot) > cstate->fe_msgbuf = makeStringInfo(); > attr = cstate->tupDesc->attrs; > > + if (cstate->idx_dist_by_col >= 0) > + dist_col_type = > attr[cstate->idx_dist_by_col]->atttypid; > + else > + dist_col_type = UNKNOWNOID; > + > /* Get info about the columns we need to process. */ > cstate->out_functions = (FmgrInfo *) > palloc(cstate->tupDesc->natts * sizeof(FmgrInfo)); > foreach(lc, cstate->attnumlist) > @@ -4152,12 +4161,14 @@ DoInsertSelectCopy(EState *estate, TupleTableSlot > *slot) > > /* Get dist column, if any */ > if (cstate->idx_dist_by_col >= 0 && !nulls[cstate->idx_dist_by_col]) > - dist_col_value = &values[cstate->idx_dist_by_col]; > + dist_col_value = values[cstate->idx_dist_by_col]; > + else > + dist_col_type = UNKNOWNOID; > > /* Send item to the appropriate data node(s) (buffer) */ > if (DataNodeCopyIn(cstate->fe_msgbuf->data, > cstate->fe_msgbuf->len, > - GetRelationNodes(cstate->rel_loc, (long > *)dist_col_value, RELATION_ACCESS_INSERT), > + GetRelationNodes(cstate->rel_loc, > dist_col_value, dist_col_type, RELATION_ACCESS_INSERT), > cstate->connections)) > ereport(ERROR, > (errcode(ERRCODE_CONNECTION_EXCEPTION), > diff --git a/src/backend/commands/explain.c > b/src/backend/commands/explain.c > index a361186..fe74569 100644 > --- a/src/backend/commands/explain.c > +++ b/src/backend/commands/explain.c > @@ -851,8 +851,28 @@ ExplainNode(Plan *plan, PlanState *planstate, > case T_WorkTableScan: > #ifdef PGXC > case T_RemoteQuery: > + { > + RemoteQuery *remote_query = (RemoteQuery *) > plan; > + int pnc, nc; > + > + pnc = 0; > + nc = 0; > + if (remote_query->exec_nodes != NULL) > + { > + if > (remote_query->exec_nodes->primarynodelist != NULL) > + { > + pnc = > list_length(remote_query->exec_nodes->primarynodelist); > + appendStringInfo(es->str, " > (Primary Node Count [%d])", pnc); > + } > + if > (remote_query->exec_nodes->nodelist) > + { > + nc = > list_length(remote_query->exec_nodes->nodelist); > + appendStringInfo(es->str, " > (Node Count [%d])", nc); > + } > + } > #endif > - ExplainScanTarget((Scan *) plan, es); > + ExplainScanTarget((Scan *) plan, es); > + } > break; > case T_BitmapIndexScan: > { > diff --git a/src/backend/optimizer/plan/createplan.c > b/src/backend/optimizer/plan/createplan.c > index b6252a3..c03938d 100644 > --- a/src/backend/optimizer/plan/createplan.c > +++ b/src/backend/optimizer/plan/createplan.c > @@ -2418,9 +2418,7 @@ create_remotequery_plan(PlannerInfo *root, Path > *best_path, > scan_plan->exec_nodes->baselocatortype = > rel_loc_info->locatorType; > else > scan_plan->exec_nodes->baselocatortype = '\0'; > - scan_plan->exec_nodes = GetRelationNodes(rel_loc_info, > - > NULL, > - > RELATION_ACCESS_READ); > + scan_plan->exec_nodes = GetRelationNodes(rel_loc_info, 0, > UNKNOWNOID, RELATION_ACCESS_READ); > copy_path_costsize(&scan_plan->scan.plan, best_path); > > /* PGXCTODO - get better estimates */ > @@ -5024,8 +5022,7 @@ create_remotedelete_plan(PlannerInfo *root, Plan > *topplan) > fstep->sql_statement = pstrdup(buf->data); > fstep->combine_type = COMBINE_TYPE_SAME; > fstep->read_only = false; > - fstep->exec_nodes = GetRelationNodes(rel_loc_info, NULL, > - > RELATION_ACCESS_UPDATE); > + fstep->exec_nodes = GetRelationNodes(rel_loc_info, 0, > UNKNOWNOID, RELATION_ACCESS_UPDATE); > } > else > { > diff --git a/src/backend/pgxc/locator/locator.c > b/src/backend/pgxc/locator/locator.c > index 0ab157d..33fe8ac 100644 > --- a/src/backend/pgxc/locator/locator.c > +++ b/src/backend/pgxc/locator/locator.c > @@ -41,7 +41,7 @@ > > #include "catalog/pgxc_class.h" > #include "catalog/namespace.h" > - > +#include "access/hash.h" > > /* > * PGXCTODO For prototype, relations use the same hash mapping table. > @@ -206,7 +206,32 @@ char *pColName; > bool > IsHashDistributable(Oid col_type) > { > - if (col_type == INT4OID || col_type == INT2OID) > + if(col_type == INT8OID > + || col_type == INT2OID > + || col_type == OIDOID > + || col_type == INT4OID > + || col_type == BOOLOID > + || col_type == CHAROID > + || col_type == NAMEOID > + || col_type == INT2VECTOROID > + || col_type == TEXTOID > + || col_type == OIDVECTOROID > + || col_type == FLOAT4OID > + || col_type == FLOAT8OID > + || col_type == ABSTIMEOID > + || col_type == RELTIMEOID > + || col_type == CASHOID > + || col_type == BPCHAROID > + || col_type == BYTEAOID > + || col_type == VARCHAROID > + || col_type == DATEOID > + || col_type == TIMEOID > + || col_type == TIMESTAMPOID > + || col_type == TIMESTAMPTZOID > + || col_type == INTERVALOID > + || col_type == TIMETZOID > + || col_type == NUMERICOID > + ) > return true; > > return false; > @@ -296,7 +321,32 @@ RelationLocInfo *rel_loc_info; > bool > IsModuloDistributable(Oid col_type) > { > - if (col_type == INT4OID || col_type == INT2OID) > + if(col_type == INT8OID > + || col_type == INT2OID > + || col_type == OIDOID > + || col_type == INT4OID > + || col_type == BOOLOID > + || col_type == CHAROID > + || col_type == NAMEOID > + || col_type == INT2VECTOROID > + || col_type == TEXTOID > + || col_type == OIDVECTOROID > + || col_type == FLOAT4OID > + || col_type == FLOAT8OID > + || col_type == ABSTIMEOID > + || col_type == RELTIMEOID > + || col_type == CASHOID > + || col_type == BPCHAROID > + || col_type == BYTEAOID > + || col_type == VARCHAROID > + || col_type == DATEOID > + || col_type == TIMEOID > + || col_type == TIMESTAMPOID > + || col_type == TIMESTAMPTZOID > + || col_type == INTERVALOID > + || col_type == TIMETZOID > + || col_type == NUMERICOID > + ) > return true; > > return false; > @@ -409,13 +459,13 @@ GetRoundRobinNode(Oid relid) > * The returned List is a copy, so it should be freed when finished. > */ > ExecNodes * > -GetRelationNodes(RelationLocInfo *rel_loc_info, long *partValue, > - RelationAccessType accessType) > +GetRelationNodes(RelationLocInfo *rel_loc_info, Datum valueForDistCol, Oid > typeOfValueForDistCol, RelationAccessType accessType) > { > ListCell *prefItem; > ListCell *stepItem; > ExecNodes *exec_nodes; > - > + long hashValue; > + int nError; > > if (rel_loc_info == NULL) > return NULL; > @@ -480,10 +530,10 @@ GetRelationNodes(RelationLocInfo *rel_loc_info, long > *partValue, > break; > > case LOCATOR_TYPE_HASH: > - > - if (partValue != NULL) > + hashValue = compute_hash(typeOfValueForDistCol, > valueForDistCol, &nError); > + if (nError == 0) > /* in prototype, all partitioned tables use > same map */ > - exec_nodes->nodelist = lappend_int(NULL, > get_node_from_hash(hash_range_int(*partValue))); > + exec_nodes->nodelist = lappend_int(NULL, > get_node_from_hash(hash_range_int(hashValue))); > else > if (accessType == RELATION_ACCESS_INSERT) > /* Insert NULL to node 1 */ > @@ -494,9 +544,10 @@ GetRelationNodes(RelationLocInfo *rel_loc_info, long > *partValue, > break; > > case LOCATOR_TYPE_MODULO: > - if (partValue != NULL) > + hashValue = compute_hash(typeOfValueForDistCol, > valueForDistCol, &nError); > + if (nError == 0) > /* in prototype, all partitioned tables use > same map */ > - exec_nodes->nodelist = lappend_int(NULL, > get_node_from_modulo(compute_modulo(*partValue))); > + exec_nodes->nodelist = lappend_int(NULL, > get_node_from_modulo(compute_modulo(hashValue))); > else > if (accessType == RELATION_ACCESS_INSERT) > /* Insert NULL to node 1 */ > @@ -750,7 +801,6 @@ RelationLocInfo * > GetRelationLocInfo(Oid relid) > { > RelationLocInfo *ret_loc_info = NULL; > - char *namespace; > > Relation rel = relation_open(relid, AccessShareLock); > > diff --git a/src/backend/pgxc/plan/planner.c > b/src/backend/pgxc/plan/planner.c > index 2448a74..4873f19 100644 > --- a/src/backend/pgxc/plan/planner.c > +++ b/src/backend/pgxc/plan/planner.c > @@ -43,20 +43,23 @@ > #include "utils/lsyscache.h" > #include "utils/portal.h" > #include "utils/syscache.h" > - > +#include "utils/numeric.h" > +#include "access/hash.h" > +#include "utils/timestamp.h" > +#include "utils/date.h" > > /* > * Convenient format for literal comparisons > * > - * PGXCTODO - make constant type Datum, handle other types > */ > typedef struct > { > - Oid relid; > - RelationLocInfo *rel_loc_info; > - Oid attrnum; > - char *col_name; > - long constant; /* assume long PGXCTODO - > should be Datum */ > + Oid relid; > + RelationLocInfo *rel_loc_info; > + Oid attrnum; > + char *col_name; > + Datum constValue; > + Oid constType; > } Literal_Comparison; > > /* > @@ -471,15 +474,12 @@ get_base_var(Var *var, XCWalkerContext *context) > static void > get_plan_nodes_insert(PlannerInfo *root, RemoteQuery *step) > { > - Query *query = root->parse; > - RangeTblEntry *rte; > - RelationLocInfo *rel_loc_info; > - Const *constant; > - ListCell *lc; > - long part_value; > - long *part_value_ptr = NULL; > - Expr *eval_expr = NULL; > - > + Query *query = root->parse; > + RangeTblEntry *rte; > + RelationLocInfo *rel_loc_info; > + Const *constant; > + ListCell *lc; > + Expr *eval_expr = NULL; > > step->exec_nodes = NULL; > > @@ -568,7 +568,7 @@ get_plan_nodes_insert(PlannerInfo *root, RemoteQuery > *step) > if (!lc) > { > /* Skip rest, handle NULL */ > - step->exec_nodes = GetRelationNodes(rel_loc_info, > NULL, RELATION_ACCESS_INSERT); > + step->exec_nodes = GetRelationNodes(rel_loc_info, > 0, UNKNOWNOID, RELATION_ACCESS_INSERT); > return; > } > > @@ -650,21 +650,11 @@ get_plan_nodes_insert(PlannerInfo *root, RemoteQuery > *step) > } > > constant = (Const *) checkexpr; > - > - if (constant->consttype == INT4OID || > - constant->consttype == INT2OID || > - constant->consttype == INT8OID) > - { > - part_value = (long) constant->constvalue; > - part_value_ptr = &part_value; > - } > - /* PGXCTODO - handle other data types */ > } > } > > /* single call handles both replicated and partitioned types */ > - step->exec_nodes = GetRelationNodes(rel_loc_info, part_value_ptr, > - > RELATION_ACCESS_INSERT); > + step->exec_nodes = GetRelationNodes(rel_loc_info, > constant->constvalue, constant->consttype, RELATION_ACCESS_INSERT); > > if (eval_expr) > pfree(eval_expr); > @@ -1047,6 +1037,28 @@ examine_conditions_walker(Node *expr_node, > XCWalkerContext *context) > { > Expr *arg1 = linitial(opexpr->args); > Expr *arg2 = lsecond(opexpr->args); > + RelabelType *rt; > + Expr *targ; > + > + if (IsA(arg1, RelabelType)) > + { > + rt = arg1; > + arg1 = rt->arg; > + } > + > + if (IsA(arg2, RelabelType)) > + { > + rt = arg2; > + arg2 = rt->arg; > + } > + > + /* Handle constant = var */ > + if (IsA(arg2, Var)) > + { > + targ = arg1; > + arg1 = arg2; > + arg2 = targ; > + } > > /* Look for a table */ > if (IsA(arg1, Var)) > @@ -1134,7 +1146,8 @@ examine_conditions_walker(Node *expr_node, > XCWalkerContext *context) > lit_comp->relid = > column_base->relid; > lit_comp->rel_loc_info = > rel_loc_info1; > lit_comp->col_name = > column_base->colname; > - lit_comp->constant = > constant->constvalue; > + lit_comp->constValue = > constant->constvalue; > + lit_comp->constType = > constant->consttype; > > > context->conditions->partitioned_literal_comps = lappend( > > context->conditions->partitioned_literal_comps, > @@ -1742,9 +1755,7 @@ get_plan_nodes_walker(Node *query_node, > XCWalkerContext *context) > if (rel_loc_info->locatorType != LOCATOR_TYPE_HASH && > rel_loc_info->locatorType != LOCATOR_TYPE_MODULO) > /* do not need to determine partitioning expression > */ > - context->query_step->exec_nodes = > GetRelationNodes(rel_loc_info, > - > NULL, > - > context->accessType); > + context->query_step->exec_nodes = > GetRelationNodes(rel_loc_info, 0, UNKNOWNOID, context->accessType); > > /* Note replicated table usage for determining safe queries > */ > if (context->query_step->exec_nodes) > @@ -1800,9 +1811,7 @@ get_plan_nodes_walker(Node *query_node, > XCWalkerContext *context) > { > Literal_Comparison *lit_comp = (Literal_Comparison > *) lfirst(lc); > > - test_exec_nodes = GetRelationNodes( > - lit_comp->rel_loc_info, > &(lit_comp->constant), > - RELATION_ACCESS_READ); > + test_exec_nodes = > GetRelationNodes(lit_comp->rel_loc_info, lit_comp->constValue, > lit_comp->constType, RELATION_ACCESS_READ); > > test_exec_nodes->tableusagetype = table_usage_type; > if (context->query_step->exec_nodes == NULL) > @@ -1828,9 +1837,7 @@ get_plan_nodes_walker(Node *query_node, > XCWalkerContext *context) > parent_child = (Parent_Child_Join *) > > linitial(context->conditions->partitioned_parent_child); > > - context->query_step->exec_nodes = > GetRelationNodes(parent_child->rel_loc_info1, > - > NULL, > - > context->accessType); > + context->query_step->exec_nodes = > GetRelationNodes(parent_child->rel_loc_info1, 0, UNKNOWNOID, > context->accessType); > context->query_step->exec_nodes->tableusagetype = > table_usage_type; > } > > @@ -3378,8 +3385,6 @@ GetHashExecNodes(RelationLocInfo *rel_loc_info, > ExecNodes **exec_nodes, const Ex > Expr *checkexpr; > Expr *eval_expr = NULL; > Const *constant; > - long part_value; > - long *part_value_ptr = NULL; > > eval_expr = (Expr *) eval_const_expressions(NULL, (Node *)expr); > checkexpr = get_numeric_constant(eval_expr); > @@ -3389,17 +3394,8 @@ GetHashExecNodes(RelationLocInfo *rel_loc_info, > ExecNodes **exec_nodes, const Ex > > constant = (Const *) checkexpr; > > - if (constant->consttype == INT4OID || > - constant->consttype == INT2OID || > - constant->consttype == INT8OID) > - { > - part_value = (long) constant->constvalue; > - part_value_ptr = &part_value; > - } > - > /* single call handles both replicated and partitioned types */ > - *exec_nodes = GetRelationNodes(rel_loc_info, part_value_ptr, > - > RELATION_ACCESS_INSERT); > + *exec_nodes = GetRelationNodes(rel_loc_info, constant->constvalue, > constant->consttype, RELATION_ACCESS_INSERT); > if (eval_expr) > pfree(eval_expr); > > diff --git a/src/backend/pgxc/pool/execRemote.c > b/src/backend/pgxc/pool/execRemote.c > index 75aca21..76e3eef 100644 > --- a/src/backend/pgxc/pool/execRemote.c > +++ b/src/backend/pgxc/pool/execRemote.c > @@ -1061,7 +1061,8 @@ BufferConnection(PGXCNodeHandle *conn) > RemoteQueryState *combiner = conn->combiner; > MemoryContext oldcontext; > > - Assert(conn->state == DN_CONNECTION_STATE_QUERY && combiner); > + if (combiner == NULL || conn->state != DN_CONNECTION_STATE_QUERY) > + return; > > /* > * When BufferConnection is invoked CurrentContext is related to > other > @@ -3076,9 +3077,8 @@ get_exec_connections(RemoteQueryState *planstate, > if (!isnull) > { > RelationLocInfo *rel_loc_info = > GetRelationLocInfo(exec_nodes->relid); > - ExecNodes *nodes = > GetRelationNodes(rel_loc_info, > - > (long *) &partvalue, > - > exec_nodes->accesstype); > + /* PGXCTODO what is the type of > partvalue here*/ > + ExecNodes *nodes = > GetRelationNodes(rel_loc_info, partvalue, UNKNOWNOID, > exec_nodes->accesstype); > if (nodes) > { > nodelist = nodes->nodelist; > diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c > index 415fc47..6d7939b 100644 > --- a/src/backend/tcop/postgres.c > +++ b/src/backend/tcop/postgres.c > @@ -670,18 +670,18 @@ pg_analyze_and_rewrite(Node *parsetree, const char > *query_string, > querytree_list = pg_rewrite_query(query); > > #ifdef PGXC > - if (IS_PGXC_COORDINATOR && !IsConnFromCoord()) > - { > - ListCell *lc; > - > - foreach(lc, querytree_list) > - { > - Query *query = (Query *) lfirst(lc); > - > - if (query->sql_statement == NULL) > - query->sql_statement = pstrdup(query_string); > - } > - } > + if (IS_PGXC_COORDINATOR && !IsConnFromCoord()) > + { > + ListCell *lc; > + > + foreach(lc, querytree_list) > + { > + Query *query = (Query *) lfirst(lc); > + > + if (query->sql_statement == NULL) > + query->sql_statement = > pstrdup(query_string); > + } > + } > #endif > > TRACE_POSTGRESQL_QUERY_REWRITE_DONE(query_string); > @@ -1043,7 +1043,7 @@ exec_simple_query(const char *query_string) > > querytree_list = pg_analyze_and_rewrite(parsetree, > query_string, > > NULL, 0); > - > + > plantree_list = pg_plan_queries(querytree_list, 0, NULL); > > /* Done with the snapshot used for parsing/planning */ > diff --git a/src/include/access/hash.h b/src/include/access/hash.h > index d5899f4..4aaffaa 100644 > --- a/src/include/access/hash.h > +++ b/src/include/access/hash.h > @@ -353,4 +353,8 @@ extern OffsetNumber _hash_binsearch_last(Page page, > uint32 hash_value); > extern void hash_redo(XLogRecPtr lsn, XLogRecord *record); > extern void hash_desc(StringInfo buf, uint8 xl_info, char *rec); > > +#ifdef PGXC > +extern Datum compute_hash(Oid type, Datum value, int *pErr); > +#endif > + > #endif /* HASH_H */ > diff --git a/src/include/pgxc/locator.h b/src/include/pgxc/locator.h > index 9f669d9..9ee983c 100644 > --- a/src/include/pgxc/locator.h > +++ b/src/include/pgxc/locator.h > @@ -100,8 +100,7 @@ extern char ConvertToLocatorType(int disttype); > extern char *GetRelationHashColumn(RelationLocInfo *rel_loc_info); > extern RelationLocInfo *GetRelationLocInfo(Oid relid); > extern RelationLocInfo *CopyRelationLocInfo(RelationLocInfo *src_info); > -extern ExecNodes *GetRelationNodes(RelationLocInfo *rel_loc_info, long > *partValue, > - RelationAccessType accessType); > +extern ExecNodes *GetRelationNodes(RelationLocInfo *rel_loc_info, Datum > valueForDistCol, Oid typeOfValueForDistCol, RelationAccessType accessType); > extern bool IsHashColumn(RelationLocInfo *rel_loc_info, char > *part_col_name); > extern bool IsHashColumnForRelId(Oid relid, char *part_col_name); > extern int GetRoundRobinNode(Oid relid); > diff --git a/src/test/regress/expected/create_index_1.out > b/src/test/regress/expected/create_index_1.out > index 52fdc91..ab3807c 100644 > --- a/src/test/regress/expected/create_index_1.out > +++ b/src/test/regress/expected/create_index_1.out > @@ -174,15 +174,10 @@ SELECT count(*) FROM point_tbl p WHERE p.f1 ~= '(-5, > -12)'; > SET enable_seqscan = OFF; > SET enable_indexscan = ON; > SET enable_bitmapscan = ON; > -EXPLAIN (COSTS OFF) > -SELECT * FROM fast_emp4000 > - WHERE home_base @ '(200,200),(2000,1000)'::box > - ORDER BY (home_base[0])[0]; > - QUERY PLAN > ----------------- > - Data Node Scan > -(1 row) > - > +--EXPLAIN (COSTS OFF) > +--SELECT * FROM fast_emp4000 > +-- WHERE home_base @ '(200,200),(2000,1000)'::box > +-- ORDER BY (home_base[0])[0]; > SELECT * FROM fast_emp4000 > WHERE home_base @ '(200,200),(2000,1000)'::box > ORDER BY (home_base[0])[0]; > @@ -190,40 +185,25 @@ SELECT * FROM fast_emp4000 > ----------- > (0 rows) > > -EXPLAIN (COSTS OFF) > -SELECT count(*) FROM fast_emp4000 WHERE home_base && > '(1000,1000,0,0)'::box; > - QUERY PLAN > ----------------- > - Data Node Scan > -(1 row) > - > +--EXPLAIN (COSTS OFF) > +--SELECT count(*) FROM fast_emp4000 WHERE home_base && > '(1000,1000,0,0)'::box; > SELECT count(*) FROM fast_emp4000 WHERE home_base && > '(1000,1000,0,0)'::box; > count > ------- > 1 > (1 row) > > -EXPLAIN (COSTS OFF) > -SELECT count(*) FROM fast_emp4000 WHERE home_base IS NULL; > - QUERY PLAN > ----------------- > - Data Node Scan > -(1 row) > - > +--EXPLAIN (COSTS OFF) > +--SELECT count(*) FROM fast_emp4000 WHERE home_base IS NULL; > SELECT count(*) FROM fast_emp4000 WHERE home_base IS NULL; > count > ------- > 138 > (1 row) > > -EXPLAIN (COSTS OFF) > -SELECT * FROM polygon_tbl WHERE f1 ~ '((1,1),(2,2),(2,1))'::polygon > - ORDER BY (poly_center(f1))[0]; > - QUERY PLAN > ----------------- > - Data Node Scan > -(1 row) > - > +--EXPLAIN (COSTS OFF) > +--SELECT * FROM polygon_tbl WHERE f1 ~ '((1,1),(2,2),(2,1))'::polygon > +-- ORDER BY (poly_center(f1))[0]; > SELECT * FROM polygon_tbl WHERE f1 ~ '((1,1),(2,2),(2,1))'::polygon > ORDER BY (poly_center(f1))[0]; > id | f1 > @@ -231,14 +211,9 @@ SELECT * FROM polygon_tbl WHERE f1 ~ > '((1,1),(2,2),(2,1))'::polygon > 1 | ((2,0),(2,4),(0,0)) > (1 row) > > -EXPLAIN (COSTS OFF) > -SELECT * FROM circle_tbl WHERE f1 && circle(point(1,-2), 1) > - ORDER BY area(f1); > - QUERY PLAN > ----------------- > - Data Node Scan > -(1 row) > - > +--EXPLAIN (COSTS OFF) > +--SELECT * FROM circle_tbl WHERE f1 && circle(point(1,-2), 1) > +-- ORDER BY area(f1); > SELECT * FROM circle_tbl WHERE f1 && circle(point(1,-2), 1) > ORDER BY area(f1); > f1 > @@ -269,9 +244,9 @@ LINE 1: SELECT count(*) FROM gcircle_tbl WHERE f1 && > '<(500,500),500... > ^ > EXPLAIN (COSTS OFF) > SELECT count(*) FROM point_tbl WHERE f1 <@ box '(0,0,100,100)'; > - QUERY PLAN > ----------------- > - Data Node Scan > + QUERY PLAN > +--------------------------------- > + Data Node Scan (Node Count [1]) > (1 row) > > SELECT count(*) FROM point_tbl WHERE f1 <@ box '(0,0,100,100)'; > @@ -282,9 +257,9 @@ SELECT count(*) FROM point_tbl WHERE f1 <@ box > '(0,0,100,100)'; > > EXPLAIN (COSTS OFF) > SELECT count(*) FROM point_tbl WHERE box '(0,0,100,100)' @> f1; > - QUERY PLAN > ----------------- > - Data Node Scan > + QUERY PLAN > +--------------------------------- > + Data Node Scan (Node Count [1]) > (1 row) > > SELECT count(*) FROM point_tbl WHERE box '(0,0,100,100)' @> f1; > @@ -295,9 +270,9 @@ SELECT count(*) FROM point_tbl WHERE box > '(0,0,100,100)' @> f1; > > EXPLAIN (COSTS OFF) > SELECT count(*) FROM point_tbl WHERE f1 <@ polygon > '(0,0),(0,100),(100,100),(50,50),(100,0),(0,0)'; > - QUERY PLAN > ----------------- > - Data Node Scan > + QUERY PLAN > +--------------------------------- > + Data Node Scan (Node Count [1]) > (1 row) > > SELECT count(*) FROM point_tbl WHERE f1 <@ polygon > '(0,0),(0,100),(100,100),(50,50),(100,0),(0,0)'; > @@ -308,9 +283,9 @@ SELECT count(*) FROM point_tbl WHERE f1 <@ polygon > '(0,0),(0,100),(100,100),(50, > > EXPLAIN (COSTS OFF) > SELECT count(*) FROM point_tbl WHERE f1 <@ circle '<(50,50),50>'; > - QUERY PLAN > ----------------- > - Data Node Scan > + QUERY PLAN > +--------------------------------- > + Data Node Scan (Node Count [1]) > (1 row) > > SELECT count(*) FROM point_tbl WHERE f1 <@ circle '<(50,50),50>'; > @@ -321,9 +296,9 @@ SELECT count(*) FROM point_tbl WHERE f1 <@ circle > '<(50,50),50>'; > > EXPLAIN (COSTS OFF) > SELECT count(*) FROM point_tbl p WHERE p.f1 << '(0.0, 0.0)'; > - QUERY PLAN > ----------------- > - Data Node Scan > + QUERY PLAN > +--------------------------------- > + Data Node Scan (Node Count [1]) > (1 row) > > SELECT count(*) FROM point_tbl p WHERE p.f1 << '(0.0, 0.0)'; > @@ -334,9 +309,9 @@ SELECT count(*) FROM point_tbl p WHERE p.f1 << '(0.0, > 0.0)'; > > EXPLAIN (COSTS OFF) > SELECT count(*) FROM point_tbl p WHERE p.f1 >> '(0.0, 0.0)'; > - QUERY PLAN > ----------------- > - Data Node Scan > + QUERY PLAN > +--------------------------------- > + Data Node Scan (Node Count [1]) > (1 row) > > SELECT count(*) FROM point_tbl p WHERE p.f1 >> '(0.0, 0.0)'; > @@ -347,9 +322,9 @@ SELECT count(*) FROM point_tbl p WHERE p.f1 >> '(0.0, > 0.0)'; > > EXPLAIN (COSTS OFF) > SELECT count(*) FROM point_tbl p WHERE p.f1 <^ '(0.0, 0.0)'; > - QUERY PLAN > ----------------- > - Data Node Scan > + QUERY PLAN > +--------------------------------- > + Data Node Scan (Node Count [1]) > (1 row) > > SELECT count(*) FROM point_tbl p WHERE p.f1 <^ '(0.0, 0.0)'; > @@ -360,9 +335,9 @@ SELECT count(*) FROM point_tbl p WHERE p.f1 <^ '(0.0, > 0.0)'; > > EXPLAIN (COSTS OFF) > SELECT count(*) FROM point_tbl p WHERE p.f1 >^ '(0.0, 0.0)'; > - QUERY PLAN > ----------------- > - Data Node Scan > + QUERY PLAN > +--------------------------------- > + Data Node Scan (Node Count [1]) > (1 row) > > SELECT count(*) FROM point_tbl p WHERE p.f1 >^ '(0.0, 0.0)'; > @@ -373,9 +348,9 @@ SELECT count(*) FROM point_tbl p WHERE p.f1 >^ '(0.0, > 0.0)'; > > EXPLAIN (COSTS OFF) > SELECT count(*) FROM point_tbl p WHERE p.f1 ~= '(-5, -12)'; > - QUERY PLAN > ----------------- > - Data Node Scan > + QUERY PLAN > +--------------------------------- > + Data Node Scan (Node Count [1]) > (1 row) > > SELECT count(*) FROM point_tbl p WHERE p.f1 ~= '(-5, -12)'; > @@ -774,7 +749,7 @@ CREATE INDEX hash_f8_index ON hash_f8_heap USING hash > (random float8_ops); > -- > CREATE TABLE func_index_heap (f1 text, f2 text); > CREATE UNIQUE INDEX func_index_index on func_index_heap (textcat(f1,f2)); > -ERROR: Cannot locally enforce a unique index on round robin distributed > table. > +ERROR: Unique index of partitioned table must contain the hash/modulo > distribution column. > INSERT INTO func_index_heap VALUES('ABC','DEF'); > INSERT INTO func_index_heap VALUES('AB','CDEFG'); > INSERT INTO func_index_heap VALUES('QWE','RTY'); > @@ -788,7 +763,7 @@ INSERT INTO func_index_heap VALUES('QWERTY'); > DROP TABLE func_index_heap; > CREATE TABLE func_index_heap (f1 text, f2 text); > CREATE UNIQUE INDEX func_index_index on func_index_heap ((f1 || f2) > text_ops); > -ERROR: Cannot locally enforce a unique index on round robin distributed > table. > +ERROR: Unique index of partitioned table must contain the hash/modulo > distribution column. > INSERT INTO func_index_heap VALUES('ABC','DEF'); > INSERT INTO func_index_heap VALUES('AB','CDEFG'); > INSERT INTO func_index_heap VALUES('QWE','RTY'); > diff --git a/src/test/regress/expected/float4_1.out > b/src/test/regress/expected/float4_1.out > index 432d159..f50147d 100644 > --- a/src/test/regress/expected/float4_1.out > +++ b/src/test/regress/expected/float4_1.out > @@ -125,16 +125,6 @@ SELECT 'nan'::numeric::float4; > NaN > (1 row) > > -SELECT '' AS five, * FROM FLOAT4_TBL; > - five | f1 > -------+------------- > - | 1004.3 > - | 1.23457e+20 > - | 0 > - | -34.84 > - | 1.23457e-20 > -(5 rows) > - > SELECT '' AS five, * FROM FLOAT4_TBL ORDER BY f1; > five | f1 > ------+------------- > @@ -257,13 +247,14 @@ SELECT '' AS five, f.f1, @f.f1 AS abs_f1 FROM > FLOAT4_TBL f ORDER BY f1; > UPDATE FLOAT4_TBL > SET f1 = FLOAT4_TBL.f1 * '-1' > WHERE FLOAT4_TBL.f1 > '0.0'; > +ERROR: Partition column can't be updated in current version > SELECT '' AS five, * FROM FLOAT4_TBL ORDER BY f1; > - five | f1 > -------+-------------- > - | -1.23457e+20 > - | -1004.3 > - | -34.84 > - | -1.23457e-20 > - | 0 > + five | f1 > +------+------------- > + | -34.84 > + | 0 > + | 1.23457e-20 > + | 1004.3 > + | 1.23457e+20 > (5 rows) > > diff --git a/src/test/regress/expected/float8_1.out > b/src/test/regress/expected/float8_1.out > index 65fe187..8ce7930 100644 > --- a/src/test/regress/expected/float8_1.out > +++ b/src/test/regress/expected/float8_1.out > @@ -381,6 +381,7 @@ SELECT '' AS five, * FROM FLOAT8_TBL ORDER BY f1; > UPDATE FLOAT8_TBL > SET f1 = FLOAT8_TBL.f1 * '-1' > WHERE FLOAT8_TBL.f1 > '0.0'; > +ERROR: Partition column can't be updated in current version > SELECT '' AS bad, f.f1 ^ '1e200' from FLOAT8_TBL f ORDER BY f1; > ERROR: value out of range: overflow > SELECT '' AS bad, f.f1 ^ '1e200' from FLOAT8_TBL f ORDER BY f1; > @@ -396,17 +397,17 @@ ERROR: cannot take logarithm of zero > SELECT '' AS bad, ln(f.f1) from FLOAT8_TBL f where f.f1 < '0.0'; > ERROR: cannot take logarithm of a negative number > SELECT '' AS bad, exp(f.f1) from FLOAT8_TBL f ORDER BY f1; > -ERROR: value out of range: underflow > +ERROR: value out of range: overflow > SELECT '' AS bad, f.f1 / '0.0' from FLOAT8_TBL f; > ERROR: division by zero > SELECT '' AS five, * FROM FLOAT8_TBL ORDER BY f1; > - five | f1 > -------+----------------------- > - | -1.2345678901234e+200 > - | -1004.3 > - | -34.84 > - | -1.2345678901234e-200 > - | 0 > + five | f1 > +------+---------------------- > + | -34.84 > + | 0 > + | 1.2345678901234e-200 > + | 1004.3 > + | 1.2345678901234e+200 > (5 rows) > > -- test for over- and underflow > diff --git a/src/test/regress/expected/foreign_key_1.out > b/src/test/regress/expected/foreign_key_1.out > index 7eccdc6..3cb7d17 100644 > --- a/src/test/regress/expected/foreign_key_1.out > +++ b/src/test/regress/expected/foreign_key_1.out > @@ -773,9 +773,9 @@ INSERT INTO FKTABLE VALUES(43); -- should > fail > ERROR: insert or update on table "fktable" violates foreign key > constraint "fktable_ftest1_fkey" > DETAIL: Key (ftest1)=(43) is not present in table "pktable". > UPDATE FKTABLE SET ftest1 = ftest1; -- should succeed > +ERROR: Partition column can't be updated in current version > UPDATE FKTABLE SET ftest1 = ftest1 + 1; -- should fail > -ERROR: insert or update on table "fktable" violates foreign key > constraint "fktable_ftest1_fkey" > -DETAIL: Key (ftest1)=(43) is not present in table "pktable". > +ERROR: Partition column can't be updated in current version > DROP TABLE FKTABLE; > -- This should fail, because we'd have to cast numeric to int which is > -- not an implicit coercion (or use numeric=numeric, but that's not part > @@ -787,34 +787,22 @@ DROP TABLE PKTABLE; > -- On the other hand, this should work because int implicitly promotes to > -- numeric, and we allow promotion on the FK side > CREATE TABLE PKTABLE (ptest1 numeric PRIMARY KEY); > -ERROR: Column ptest1 is not a hash distributable data type > +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index > "pktable_pkey" for table "pktable" > INSERT INTO PKTABLE VALUES(42); > -ERROR: relation "pktable" does not exist > -LINE 1: INSERT INTO PKTABLE VALUES(42); > - ^ > CREATE TABLE FKTABLE (ftest1 int REFERENCES pktable); > -ERROR: relation "pktable" does not exist > -- Check it actually works > INSERT INTO FKTABLE VALUES(42); -- should succeed > -ERROR: relation "fktable" does not exist > -LINE 1: INSERT INTO FKTABLE VALUES(42); > - ^ > +ERROR: insert or update on table "fktable" violates foreign key > constraint "fktable_ftest1_fkey" > +DETAIL: Key (ftest1)=(42) is not present in table "pktable". > INSERT INTO FKTABLE VALUES(43); -- should fail > -ERROR: relation "fktable" does not exist > -LINE 1: INSERT INTO FKTABLE VALUES(43); > - ^ > +ERROR: insert or update on table "fktable" violates foreign key > constraint "fktable_ftest1_fkey" > +DETAIL: Key (ftest1)=(43) is not present in table "pktable". > UPDATE FKTABLE SET ftest1 = ftest1; -- should succeed > -ERROR: relation "fktable" does not exist > -LINE 1: UPDATE FKTABLE SET ftest1 = ftest1; > - ^ > +ERROR: Partition column can't be updated in current version > UPDATE FKTABLE SET ftest1 = ftest1 + 1; -- should fail > -ERROR: relation "fktable" does not exist > -LINE 1: UPDATE FKTABLE SET ftest1 = ftest1 + 1; > - ^ > +ERROR: Partition column can't be updated in current version > DROP TABLE FKTABLE; > -ERROR: table "fktable" does not exist > DROP TABLE PKTABLE; > -ERROR: table "pktable" does not exist > -- Two columns, two tables > CREATE TABLE PKTABLE (ptest1 int, ptest2 inet, PRIMARY KEY(ptest1, > ptest2)); > NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index > "pktable_pkey" for table "pktable" > diff --git a/src/test/regress/expected/money_1.out > b/src/test/regress/expected/money_1.out > new file mode 100644 > index 0000000..6a15792 > --- /dev/null > +++ b/src/test/regress/expected/money_1.out > @@ -0,0 +1,186 @@ > +-- > +-- MONEY > +-- > +CREATE TABLE money_data (m money); > +INSERT INTO money_data VALUES ('123'); > +SELECT * FROM money_data; > + m > +--------- > + $123.00 > +(1 row) > + > +SELECT m + '123' FROM money_data; > + ?column? > +---------- > + $246.00 > +(1 row) > + > +SELECT m + '123.45' FROM money_data; > + ?column? > +---------- > + $246.45 > +(1 row) > + > +SELECT m - '123.45' FROM money_data; > + ?column? > +---------- > + -$0.45 > +(1 row) > + > +SELECT m * 2 FROM money_data; > + ?column? > +---------- > + $246.00 > +(1 row) > + > +SELECT m / 2 FROM money_data; > + ?column? > +---------- > + $61.50 > +(1 row) > + > +-- All true > +SELECT m = '$123.00' FROM money_data; > + ?column? > +---------- > + t > +(1 row) > + > +SELECT m != '$124.00' FROM money_data; > + ?column? > +---------- > + t > +(1 row) > + > +SELECT m <= '$123.00' FROM money_data; > + ?column? > +---------- > + t > +(1 row) > + > +SELECT m >= '$123.00' FROM money_data; > + ?column? > +---------- > + t > +(1 row) > + > +SELECT m < '$124.00' FROM money_data; > + ?column? > +---------- > + t > +(1 row) > + > +SELECT m > '$122.00' FROM money_data; > + ?column? > +---------- > + t > +(1 row) > + > +-- All false > +SELECT m = '$123.01' FROM money_data; > + ?column? > +---------- > +(0 rows) > + > +SELECT m != '$123.00' FROM money_data; > + ?column? > +---------- > + f > +(1 row) > + > +SELECT m <= '$122.99' FROM money_data; > + ?column? > +---------- > + f > +(1 row) > + > +SELECT m >= '$123.01' FROM money_data; > + ?column? > +---------- > + f > +(1 row) > + > +SELECT m > '$124.00' FROM money_data; > + ?column? > +---------- > + f > +(1 row) > + > +SELECT m < '$122.00' FROM money_data; > + ?column? > +---------- > + f > +(1 row) > + > +SELECT cashlarger(m, '$124.00') FROM money_data; > + cashlarger > +------------ > + $124.00 > +(1 row) > + > +SELECT cashsmaller(m, '$124.00') FROM money_data; > + cashsmaller > +------------- > + $123.00 > +(1 row) > + > +SELECT cash_words(m) FROM money_data; > + cash_words > +------------------------------------------------- > + One hundred twenty three dollars and zero cents > +(1 row) > + > +SELECT cash_words(m + '1.23') FROM money_data; > + cash_words > +-------------------------------------------------------- > + One hundred twenty four dollars and twenty three cents > +(1 row) > + > +DELETE FROM money_data; > +INSERT INTO money_data VALUES ('$123.45'); > +SELECT * FROM money_data; > + m > +--------- > + $123.45 > +(1 row) > + > +DELETE FROM money_data; > +INSERT INTO money_data VALUES ('$123.451'); > +SELECT * FROM money_data; > + m > +--------- > + $123.45 > +(1 row) > + > +DELETE FROM money_data; > +INSERT INTO money_data VALUES ('$123.454'); > +SELECT * FROM money_data; > + m > +--------- > + $123.45 > +(1 row) > + > +DELETE FROM money_data; > +INSERT INTO money_data VALUES ('$123.455'); > +SELECT * FROM money_data; > + m > +--------- > + $123.46 > +(1 row) > + > +DELETE FROM money_data; > +INSERT INTO money_data VALUES ('$123.456'); > +SELECT * FROM money_data; > + m > +--------- > + $123.46 > +(1 row) > + > +DELETE FROM money_data; > +INSERT INTO money_data VALUES ('$123.459'); > +SELECT * FROM money_data; > + m > +--------- > + $123.46 > +(1 row) > + > diff --git a/src/test/regress/expected/prepared_xacts_2.out > b/src/test/regress/expected/prepared_xacts_2.out > index e456200..307ffad 100644 > --- a/src/test/regress/expected/prepared_xacts_2.out > +++ b/src/test/regress/expected/prepared_xacts_2.out > @@ -6,7 +6,7 @@ > -- isn't really needed ... stopping and starting the postmaster would > -- be enough, but we can't even do that here. > -- create a simple table that we'll use in the tests > -CREATE TABLE pxtest1 (foobar VARCHAR(10)); > +CREATE TABLE pxtest1 (foobar VARCHAR(10)) distribute by replication; > INSERT INTO pxtest1 VALUES ('aaa'); > -- Test PREPARE TRANSACTION > BEGIN; > diff --git a/src/test/regress/expected/reltime_1.out > b/src/test/regress/expected/reltime_1.out > new file mode 100644 > index 0000000..83f61f9 > --- /dev/null > +++ b/src/test/regress/expected/reltime_1.out > @@ -0,0 +1,109 @@ > +-- > +-- RELTIME > +-- > +CREATE TABLE RELTIME_TBL (f1 reltime); > +INSERT INTO RELTIME_TBL (f1) VALUES ('@ 1 minute'); > +INSERT INTO RELTIME_TBL (f1) VALUES ('@ 5 hour'); > +INSERT INTO RELTIME_TBL (f1) VALUES ('@ 10 day'); > +INSERT INTO RELTIME_TBL (f1) VALUES ('@ 34 year'); > +INSERT INTO RELTIME_TBL (f1) VALUES ('@ 3 months'); > +INSERT INTO RELTIME_TBL (f1) VALUES ('@ 14 seconds ago'); > +-- badly formatted reltimes > +INSERT INTO RELTIME_TBL (f1) VALUES ('badly formatted reltime'); > +ERROR: invalid input syntax for type reltime: "badly formatted reltime" > +LINE 1: INSERT INTO RELTIME_TBL (f1) VALUES ('badly formatted reltim... > + ^ > +INSERT INTO RELTIME_TBL (f1) VALUES ('@ 30 eons ago'); > +ERROR: invalid input syntax for type reltime: "@ 30 eons ago" > +LINE 1: INSERT INTO RELTIME_TBL (f1) VALUES ('@ 30 eons ago'); > + ^ > +-- test reltime operators > +SELECT '' AS six, * FROM RELTIME_TBL ORDER BY f1; > + six | f1 > +-----+--------------- > + | @ 14 secs ago > + | @ 1 min > + | @ 5 hours > + | @ 10 days > + | @ 3 mons > + | @ 34 years > +(6 rows) > + > +SELECT '' AS five, * FROM RELTIME_TBL > + WHERE RELTIME_TBL.f1 <> reltime '@ 10 days' ORDER BY f1; > + five | f1 > +------+--------------- > + | @ 14 secs ago > + | @ 1 min > + | @ 5 hours > + | @ 3 mons > + | @ 34 years > +(5 rows) > + > +SELECT '' AS three, * FROM RELTIME_TBL > + WHERE RELTIME_TBL.f1 <= reltime '@ 5 hours' ORDER BY f1; > + three | f1 > +-------+--------------- > + | @ 14 secs ago > + | @ 1 min > + | @ 5 hours > +(3 rows) > + > +SELECT '' AS three, * FROM RELTIME_TBL > + WHERE RELTIME_TBL.f1 < reltime '@ 1 day' ORDER BY f1; > + three | f1 > +-------+--------------- > + | @ 14 secs ago > + | @ 1 min > + | @ 5 hours > +(3 rows) > + > +SELECT '' AS one, * FROM RELTIME_TBL > + WHERE RELTIME_TBL.f1 = reltime '@ 34 years' ORDER BY f1; > + one | f1 > +-----+---------- > + | 34 years > +(1 row) > + > +SELECT '' AS two, * FROM RELTIME_TBL > + WHERE RELTIME_TBL.f1 >= reltime '@ 1 month' ORDER BY f1; > + two | f1 > +-----+------------ > + | @ 3 mons > + | @ 34 years > +(2 rows) > + > +SELECT '' AS five, * FROM RELTIME_TBL > + WHERE RELTIME_TBL.f1 > reltime '@ 3 seconds ago' ORDER BY f1; > + five | f1 > +------+------------ > + | @ 1 min > + | @ 5 hours > + | @ 10 days > + | @ 3 mons > + | @ 34 years > +(5 rows) > + > +SELECT '' AS fifteen, r1.*, r2.* > + FROM RELTIME_TBL r1, RELTIME_TBL r2 > + WHERE r1.f1 > r2.f1 > + ORDER BY r1.f1, r2.f1; > + fifteen | f1 | f1 > +---------+------------+--------------- > + | @ 1 min | @ 14 secs ago > + | @ 5 hours | @ 14 secs ago > + | @ 5 hours | @ 1 min > + | @ 10 days | @ 14 secs ago > + | @ 10 days | @ 1 min > + | @ 10 days | @ 5 hours > + | @ 3 mons | @ 14 secs ago > + | @ 3 mons | @ 1 min > + | @ 3 mons | @ 5 hours > + | @ 3 mons | @ 10 days > + | @ 34 years | @ 14 secs ago > + | @ 34 years | @ 1 min > + | @ 34 years | @ 5 hours > + | @ 34 years | @ 10 days > + | @ 34 years | @ 3 mons > +(15 rows) > + > diff --git a/src/test/regress/expected/triggers_1.out > b/src/test/regress/expected/triggers_1.out > index 5528c66..a9f83ec 100644 > --- a/src/test/regress/expected/triggers_1.out > +++ b/src/test/regress/expected/triggers_1.out > @@ -717,30 +717,30 @@ ERROR: Postgres-XC does not support TRIGGER yet > DETAIL: The feature is not currently supported > \set QUIET false > UPDATE min_updates_test SET f1 = f1; > -UPDATE 2 > -UPDATE min_updates_test SET f2 = f2 + 1; > ERROR: Partition column can't be updated in current version > +UPDATE min_updates_test SET f2 = f2 + 1; > +UPDATE 2 > UPDATE min_updates_test SET f3 = 2 WHERE f3 is null; > UPDATE 1 > UPDATE min_updates_test_oids SET f1 = f1; > -UPDATE 2 > -UPDATE min_updates_test_oids SET f2 = f2 + 1; > ERROR: Partition column can't be updated in current version > +UPDATE min_updates_test_oids SET f2 = f2 + 1; > +UPDATE 2 > UPDATE min_updates_test_oids SET f3 = 2 WHERE f3 is null; > UPDATE 1 > \set QUIET true > SELECT * FROM min_updates_test ORDER BY 1,2,3; > f1 | f2 | f3 > ----+----+---- > - a | 1 | 2 > - b | 2 | 2 > + a | 2 | 2 > + b | 3 | 2 > (2 rows) > > SELECT * FROM min_updates_test_oids ORDER BY 1,2,3; > f1 | f2 | f3 > ----+----+---- > - a | 1 | 2 > - b | 2 | 2 > + a | 2 | 2 > + b | 3 | 2 > (2 rows) > > DROP TABLE min_updates_test; > diff --git a/src/test/regress/expected/tsearch_1.out > b/src/test/regress/expected/tsearch_1.out > index e8c35d4..4d1f1b1 100644 > --- a/src/test/regress/expected/tsearch_1.out > +++ b/src/test/regress/expected/tsearch_1.out > @@ -801,7 +801,7 @@ SELECT COUNT(*) FROM test_tsquery WHERE keyword > 'new > & york'; > (1 row) > > CREATE UNIQUE INDEX bt_tsq ON test_tsquery (keyword); > -ERROR: Cannot locally enforce a unique index on round robin distributed > table. > +ERROR: Unique index of partitioned table must contain the hash/modulo > distribution column. > SET enable_seqscan=OFF; > SELECT COUNT(*) FROM test_tsquery WHERE keyword < 'new & york'; > count > @@ -1054,6 +1054,7 @@ SELECT count(*) FROM test_tsvector WHERE a @@ > to_tsquery('345&qwerty'); > (0 rows) > > UPDATE test_tsvector SET t = null WHERE t = '345 qwerty'; > +ERROR: Partition column can't be updated in current version > SELECT count(*) FROM test_tsvector WHERE a @@ to_tsquery('345&qwerty'); > count > ------- > diff --git a/src/test/regress/expected/xc_distkey.out > b/src/test/regress/expected/xc_distkey.out > new file mode 100644 > index 0000000..d050b27 > --- /dev/null > +++ b/src/test/regress/expected/xc_distkey.out > @@ -0,0 +1,618 @@ > +-- XC Test cases to verify that all supported data types are working as > distribution key > +-- Also verifies that the comaparison with a constant for equality is > optimized. > +create table ch_tab(a char) distribute by modulo(a); > +insert into ch_tab values('a'); > +select hashchar('a'); > + hashchar > +----------- > + 463612535 > +(1 row) > + > +create table nm_tab(a name) distribute by modulo(a); > +insert into nm_tab values('abbas'); > +select hashname('abbas'); > + hashname > +----------- > + 605752656 > +(1 row) > + > +create table nu_tab(a numeric(10,5)) distribute by modulo(a); > +insert into nu_tab values(123.456); > +insert into nu_tab values(789.412); > +select * from nu_tab order by a; > + a > +----------- > + 123.45600 > + 789.41200 > +(2 rows) > + > +select * from nu_tab where a = 123.456; > + a > +----------- > + 123.45600 > +(1 row) > + > +select * from nu_tab where 789.412 = a; > + a > +----------- > + 789.41200 > +(1 row) > + > +explain select * from nu_tab where a = 123.456; > + QUERY PLAN > +------------------------------------------------------------------- > + Data Node Scan (Node Count [1]) (cost=0.00..0.00 rows=0 width=0) > +(1 row) > + > +explain select * from nu_tab where 789.412 = a; > + QUERY PLAN > +------------------------------------------------------------------- > + Data Node Scan (Node Count [1]) (cost=0.00..0.00 rows=0 width=0) > +(1 row) > + > +create table tx_tab(a text) distribute by modulo(a); > +insert into tx_tab values('hello world'); > +insert into tx_tab values('Did the quick brown fox jump over the lazy > dog?'); > +select * from tx_tab order by a; > + a > +------------------------------------------------- > + Did the quick brown fox jump over the lazy dog? > + hello world > +(2 rows) > + > +select * from tx_tab where a = 'hello world'; > + a > +------------- > + hello world > +(1 row) > + > +select * from tx_tab where a = 'Did the quick brown fox jump over the lazy > dog?'; > + a > +------------------------------------------------- > + Did the quick brown fox jump over the lazy dog? > +(1 row) > + > +select * from tx_tab where 'hello world' = a; > + a > +------------- > + hello world > +(1 row) > + > +select * from tx_tab where 'Did the quick brown fox jump over the lazy > dog?' = a; > + a > +------------------------------------------------- > + Did the quick brown fox jump over the lazy dog? > +(1 row) > + > +explain select * from tx_tab where a = 'hello world'; > + QUERY PLAN > +------------------------------------------------------------------- > + Data Node Scan (Node Count [1]) (cost=0.00..0.00 rows=0 width=0) > +(1 row) > + > +explain select * from tx_tab where a = 'Did the quick brown fox jump over > the lazy dog?'; > + QUERY PLAN > +------------------------------------------------------------------- > + Data Node Scan (Node Count [1]) (cost=0.00..0.00 rows=0 width=0) > +(1 row) > + > +create ta... [truncated message content] |
From: Mason S. <mas...@gm...> - 2011-05-24 13:58:05
|
On Tue, May 24, 2011 at 9:40 AM, Abbas Butt <abb...@te...> wrote: > > > On Tue, May 24, 2011 at 6:03 PM, Mason <ma...@us...> > wrote: >> >> On Tue, May 24, 2011 at 8:08 AM, Abbas Butt >> <ga...@us...> wrote: >> > Project "Postgres-XC". >> > >> > The branch, master has been updated >> > via 49b66c77343ae1e9921118e0c902b1528f1cc2ae (commit) >> > from 87a62879ab3492e3dd37d00478ffa857639e2b85 (commit) >> > >> > >> > - Log ----------------------------------------------------------------- >> > commit 49b66c77343ae1e9921118e0c902b1528f1cc2ae >> > Author: Abbas <abb...@en...> >> > Date: Tue May 24 17:06:30 2011 +0500 >> > >> > This patch adds support for the following data types to be used as >> > distribution key >> > >> > INT8, INT2, OID, INT4, BOOL, INT2VECTOR, OIDVECTOR >> > CHAR, NAME, TEXT, BPCHAR, BYTEA, VARCHAR >> > FLOAT4, FLOAT8, NUMERIC, CASH >> > ABSTIME, RELTIME, DATE, TIME, TIMESTAMP, TIMESTAMPTZ, INTERVAL, >> > TIMETZ >> > >> >> I am not sure some of these data types are a good idea to use for >> distributing on. Float is inexact and seems problematic >> >> I just did a quick test: >> >> mds=# create table float1 (a float, b float) distribute by hash (a); >> CREATE TABLE >> >> mds=# insert into float1 values (2.0/3, 2); >> INSERT 0 1 >> >> mds=# select * from float1; >> a | b >> -------------------+--- >> 0.666666666666667 | 2 >> (1 row) >> >> Then, I copy and paste the output of a: >> >> mds=# select * from float1 where a = 0.666666666666667; >> a | b >> ---+--- >> (0 rows) >> > > float is a tricky type. Leave XC aside this test case will produce same > results in plain postgres for this reason. > The column actually does not contain 0.666666666666667, what psql is showing > us is only an approximation of what is stored there. > select * from float1 where a = 2.0/3; would however work. > 2ndly suppose we have the same test case with data type float4. > Now both > select * from float1 where a = 0.666666666666667; and > select * from float1 where a = 2.0/3; > would show up no results both in PG and XC. > The reason is that PG treats real numbers as float8 by default and float8 > does not compare to float4. > select * from float1 where a = cast (2.0/3 as float4); > would therefore work. > Any user willing to use float types has to be aware of these strange > behaviors and knowing these he/she may benefit from being able to use it as > a distribution key. I don't think it is a good idea that they have to know that they should change all of their application code and add casting to make sure it works like they want. I think people are just going to get themselves into trouble. I strongly recommend disabling distribution support for some of these data types. Thanks, Mason > >> >> Looking at the plan it tries to take advantage of partitioning: >> >> mds=# explain select * from float1 where a = 0.666666666666667; >> QUERY PLAN >> ------------------------------------------------------------------- >> Data Node Scan (Node Count [1]) (cost=0.00..0.00 rows=0 width=0) >> (1 row) >> >> I think we should remove support for floats as a possible distribution >> type; users may get themselves into trouble. >> >> >> There may be similar issues with the timestamp data types: >> >> mds=# create table timestamp1 (a timestamp, b int) distribute by hash(a); >> CREATE TABLE >> mds=# insert into timestamp1 values (now(), 1); >> INSERT 0 1 >> mds=# select * from timestamp1; >> a | b >> ----------------------------+--- >> 2011-05-24 08:51:21.597551 | 1 >> (1 row) >> >> mds=# select * from timestamp1 where a = '2011-05-24 08:51:21.597551'; >> a | b >> ---+--- >> (0 rows) >> >> >> As far as BOOL goes, I suppose it may be ok, but of course there are >> only two possible values. I would block it, or at the very least if >> the user leaves off the distribution clause, I would not consider BOOL >> columns and look at other columns as better partitioning candidates. >> >> In any event, I am very glad to see the various INT types, CHAR, >> VARCHAR, TEXT, NUMERIC and DATE supported. I am not so sure how useful >> some of the others are. >> >> Thanks, >> >> Mason >> >> >> ------------------------------------------------------------------------------ >> vRanger cuts backup time in half-while increasing security. >> With the market-leading solution for virtual backup and recovery, >> you get blazing-fast, flexible, and affordable data protection. >> Download your free trial now. >> http://p.sf.net/sfu/quest-d2dcopy1 >> _______________________________________________ >> Postgres-xc-committers mailing list >> Pos...@li... >> https://lists.sourceforge.net/lists/listinfo/postgres-xc-committers > > > ------------------------------------------------------------------------------ > vRanger cuts backup time in half-while increasing security. > With the market-leading solution for virtual backup and recovery, > you get blazing-fast, flexible, and affordable data protection. > Download your free trial now. > http://p.sf.net/sfu/quest-d2dcopy1 > _______________________________________________ > Postgres-xc-committers mailing list > Pos...@li... > https://lists.sourceforge.net/lists/listinfo/postgres-xc-committers > > |
From: Abbas B. <abb...@te...> - 2011-05-24 13:40:17
|
On Tue, May 24, 2011 at 6:03 PM, Mason <ma...@us...>wrote: > On Tue, May 24, 2011 at 8:08 AM, Abbas Butt > <ga...@us...> wrote: > > Project "Postgres-XC". > > > > The branch, master has been updated > > via 49b66c77343ae1e9921118e0c902b1528f1cc2ae (commit) > > from 87a62879ab3492e3dd37d00478ffa857639e2b85 (commit) > > > > > > - Log ----------------------------------------------------------------- > > commit 49b66c77343ae1e9921118e0c902b1528f1cc2ae > > Author: Abbas <abb...@en...> > > Date: Tue May 24 17:06:30 2011 +0500 > > > > This patch adds support for the following data types to be used as > distribution key > > > > INT8, INT2, OID, INT4, BOOL, INT2VECTOR, OIDVECTOR > > CHAR, NAME, TEXT, BPCHAR, BYTEA, VARCHAR > > FLOAT4, FLOAT8, NUMERIC, CASH > > ABSTIME, RELTIME, DATE, TIME, TIMESTAMP, TIMESTAMPTZ, INTERVAL, TIMETZ > > > > I am not sure some of these data types are a good idea to use for > distributing on. Float is inexact and seems problematic > > I just did a quick test: > > mds=# create table float1 (a float, b float) distribute by hash (a); > CREATE TABLE > > mds=# insert into float1 values (2.0/3, 2); > INSERT 0 1 > > mds=# select * from float1; > a | b > -------------------+--- > 0.666666666666667 | 2 > (1 row) > > Then, I copy and paste the output of a: > > mds=# select * from float1 where a = 0.666666666666667; > a | b > ---+--- > (0 rows) > > float is a tricky type. Leave XC aside this test case will produce same results in plain postgres for this reason. The column actually does not contain 0.666666666666667, what psql is showing us is only an approximation of what is stored there. select * from float1 where a = 2.0/3; would however work. 2ndly suppose we have the same test case with data type float4. Now both select * from float1 where a = 0.666666666666667; and select * from float1 where a = 2.0/3; would show up no results both in PG and XC. The reason is that PG treats real numbers as float8 by default and float8 does not compare to float4. select * from float1 where a = cast (2.0/3 as float4); would therefore work. Any user willing to use float types has to be aware of these strange behaviors and knowing these he/she may benefit from being able to use it as a distribution key. > Looking at the plan it tries to take advantage of partitioning: > > mds=# explain select * from float1 where a = 0.666666666666667; > QUERY PLAN > ------------------------------------------------------------------- > Data Node Scan (Node Count [1]) (cost=0.00..0.00 rows=0 width=0) > (1 row) > > I think we should remove support for floats as a possible distribution > type; users may get themselves into trouble. > > > There may be similar issues with the timestamp data types: > > mds=# create table timestamp1 (a timestamp, b int) distribute by hash(a); > CREATE TABLE > mds=# insert into timestamp1 values (now(), 1); > INSERT 0 1 > mds=# select * from timestamp1; > a | b > ----------------------------+--- > 2011-05-24 08:51:21.597551 | 1 > (1 row) > > mds=# select * from timestamp1 where a = '2011-05-24 08:51:21.597551'; > a | b > ---+--- > (0 rows) > > > As far as BOOL goes, I suppose it may be ok, but of course there are > only two possible values. I would block it, or at the very least if > the user leaves off the distribution clause, I would not consider BOOL > columns and look at other columns as better partitioning candidates. > > In any event, I am very glad to see the various INT types, CHAR, > VARCHAR, TEXT, NUMERIC and DATE supported. I am not so sure how useful > some of the others are. > > Thanks, > > Mason > > > ------------------------------------------------------------------------------ > vRanger cuts backup time in half-while increasing security. > With the market-leading solution for virtual backup and recovery, > you get blazing-fast, flexible, and affordable data protection. > Download your free trial now. > http://p.sf.net/sfu/quest-d2dcopy1 > _______________________________________________ > Postgres-xc-committers mailing list > Pos...@li... > https://lists.sourceforge.net/lists/listinfo/postgres-xc-committers > |
From: Mason <ma...@us...> - 2011-05-24 13:03:39
|
On Tue, May 24, 2011 at 8:08 AM, Abbas Butt <ga...@us...> wrote: > Project "Postgres-XC". > > The branch, master has been updated > via 49b66c77343ae1e9921118e0c902b1528f1cc2ae (commit) > from 87a62879ab3492e3dd37d00478ffa857639e2b85 (commit) > > > - Log ----------------------------------------------------------------- > commit 49b66c77343ae1e9921118e0c902b1528f1cc2ae > Author: Abbas <abb...@en...> > Date: Tue May 24 17:06:30 2011 +0500 > > This patch adds support for the following data types to be used as distribution key > > INT8, INT2, OID, INT4, BOOL, INT2VECTOR, OIDVECTOR > CHAR, NAME, TEXT, BPCHAR, BYTEA, VARCHAR > FLOAT4, FLOAT8, NUMERIC, CASH > ABSTIME, RELTIME, DATE, TIME, TIMESTAMP, TIMESTAMPTZ, INTERVAL, TIMETZ > I am not sure some of these data types are a good idea to use for distributing on. Float is inexact and seems problematic I just did a quick test: mds=# create table float1 (a float, b float) distribute by hash (a); CREATE TABLE mds=# insert into float1 values (2.0/3, 2); INSERT 0 1 mds=# select * from float1; a | b -------------------+--- 0.666666666666667 | 2 (1 row) Then, I copy and paste the output of a: mds=# select * from float1 where a = 0.666666666666667; a | b ---+--- (0 rows) Looking at the plan it tries to take advantage of partitioning: mds=# explain select * from float1 where a = 0.666666666666667; QUERY PLAN ------------------------------------------------------------------- Data Node Scan (Node Count [1]) (cost=0.00..0.00 rows=0 width=0) (1 row) I think we should remove support for floats as a possible distribution type; users may get themselves into trouble. There may be similar issues with the timestamp data types: mds=# create table timestamp1 (a timestamp, b int) distribute by hash(a); CREATE TABLE mds=# insert into timestamp1 values (now(), 1); INSERT 0 1 mds=# select * from timestamp1; a | b ----------------------------+--- 2011-05-24 08:51:21.597551 | 1 (1 row) mds=# select * from timestamp1 where a = '2011-05-24 08:51:21.597551'; a | b ---+--- (0 rows) As far as BOOL goes, I suppose it may be ok, but of course there are only two possible values. I would block it, or at the very least if the user leaves off the distribution clause, I would not consider BOOL columns and look at other columns as better partitioning candidates. In any event, I am very glad to see the various INT types, CHAR, VARCHAR, TEXT, NUMERIC and DATE supported. I am not so sure how useful some of the others are. Thanks, Mason |
From: Pavan D. <pa...@us...> - 2011-05-24 12:12:40
|
Project "Postgres-XC". The branch, PGXC-TrialMaster has been updated via dd8e15cc5988aaa71b519724ab3d59e3e82f42e5 (commit) via df91a4341c34cfb5c63fec787e5602ac5e1bdc6d (commit) via 351a1751b7ee8c17d080fe0de9c9bef4bdbc653d (commit) via 7d2a58c8e4cafbcfe48741317813783603b8fb3f (commit) via b931435761b21f27aef8aca7e7e319bd0bee3a3a (commit) via ba0c6b460db82dbd38b5d2cb2d86c9ee36d3adc3 (commit) via 7a810c69a82a7d5990e922ee653b2301b1f91f2b (commit) via 11115542296b6b3eb7a6e9ec07cc4b3d87d44f87 (commit) via 632865878512957d7d65aec3ffb0e596c587f064 (commit) via d14afc6f37bf712e014059a687a02f609322d932 (commit) via d627d77cb86e4ce844b547bd88b5e2d060d6ed5a (commit) via 3d8241213435d7e6e1026de9ba9cf2ad8f6ff258 (commit) via 486318e5a5ed1299488d682ece3b08d40aa3629e (commit) via 52164f4446b11e3dfcb3d209ef527467f1fe9045 (commit) via 4d6a850dac9ba1c9cdf828fd07e739327a2aa878 (commit) via 57ff3183c379a4629b9d215731daf1bb84fdb36d (commit) via e389cedc177bd2f6c3192aa765477d9e9685b121 (commit) via 6c5bea9a0905bcb16a77249bfcc1978df0c571c1 (commit) via 86a985e81a831c78e02548033a63e685439c875e (commit) via 5373463dbee4a82c9080bcc21d1a734d112d165b (commit) via 0faf06811096886d28d5e91e8050b00d3bd8548a (commit) via 585d094996f822269d7e7c7eeab53dbb34369330 (commit) via 198c04c231615a9e81f274831f11bc2deca222e3 (commit) via ffd210505e1a331a8c6980a4032178a42e6f797a (commit) via 169a44eaa6ccca0ada3be43605e6a0c2ca4bd7e9 (commit) via f15bab9584259095b535beb6d47124808b26598c (commit) via 3d971742e7b1f74414603fa854416c7988d8d43c (commit) via c45032009fab9d2ca7c8f60d119d9084dfd18d98 (commit) via f81a5c58113391b7920238eedf713df0a7da36be (commit) via 6704a5b9e10344ee5accc99daf6accbf12fd5667 (commit) via 5f94f595e330101d47be4b6d0dec62cf3d5f2971 (commit) via c95a353c086776d54ea8c0fed8d89ff4628b2ed7 (commit) via 55948f23432759cc06299cd640d41a18bc7b6219 (commit) via f1841ef74cd880500114196f27e1adae59969665 (commit) via bb093447c90f611b55db8aafea10daa77c022f1d (commit) via 873b6c2937c9fc7ecec8943200521ac1fb5a4bd5 (commit) via 83fa5f092a16a54d3981acc6b9c487d4473d97dc (commit) via f400f82341a28b9bf8266ad829650b922a9df85d (commit) via 0fa0474cf15efd2cfea4ebeabf32f15aba13ecc1 (commit) via deb67bfdb6fa6e6b9a8e6984ac84ce77f2b93fe0 (commit) via d1e193d252c93c317674429bfe9a2119857cd2e9 (commit) via 1743b7631e870f97fe3b03d87a5c5bc34ab0a15c (commit) via 7b238dccef00c4f2dc86743dcacbac65e0b1b3e7 (commit) via 94f06fee218c0c36325deff3069acda920fbeba9 (commit) via e3ba278036f21aa3d512c7fa5087ce89767e0caf (commit) via 6650014f3bdfe7d7dc864fb05921d2f32f1918ab (commit) via e6ac8b0b1495941c0a5a82507235e1edbef0e884 (commit) via 81c026427de962fb825088814228fae7da0b71ed (commit) via 2eae75aeecd8532a922cc9195f63edc59931b90f (commit) via 59675139ae3c1dff0d1189f1a415d586bf0852f5 (commit) via b1db2304ea053effb9160bbaafbdd3256ff58497 (commit) via 100e7976889fcc29a674423b8dac9c9ffd5a64cb (commit) via 66cdb22ee68d07c2d7db94ed27903f311e568fdf (commit) via a335e0db6295f992bae8ea62be485a10ea6fdc6c (commit) via f9b53553de919e79edab5469989d1d23112da4e4 (commit) via d3cd801d005812ec499a91c03818c7f32abb4331 (commit) via ee06f1c1455b396332980c3b309f5e37280de706 (commit) via fbfe94557ad94095b40b79ca1a3b1a2e879e1418 (commit) via d2fe66518e3aa72196b44a0e8ebfa745e3e698b4 (commit) via 3fa6d176d5551c1e86e89b80e781b1f27e063a78 (commit) via d5835d5366c7abec7daca0d674159136cc0fb4b9 (commit) via db52bff897a1ce4b2a99266a4aed9b141eca3974 (commit) via f8fb49d57f04fd0e1930847340c1c0f7897d2438 (commit) via 23fa07e339ebbba8e7cf6d57538b12bfc17f66ed (commit) via 7ba1c7cab398bc8e0cc55e8449558a843d940ad9 (commit) via 7fbd5823dfeace7cfa11d968691d6c3c351eccdd (commit) via ce7b5737c56440152b9b06db88a4a36eb0ae5425 (commit) via debc8202c2802adb4c16b51db0d7f80819137308 (commit) via 39a94aecfcb03ddff964aa84cf27fd28070a90c0 (commit) via 147d0b4a7d172d1f36c61f091c67a0c2efa15986 (commit) via 12a24a40765d64691ec2ac7d5dbcc561b3806b9f (commit) via 1e1dc8653b58622fd2531defc585facc114df0f5 (commit) via 7dccc3dca57147acbce13cd6721022ec94671e4b (commit) via 4f5cb7d0335b4cb6de1cbfe24eace05401c74128 (commit) via 9881d357467dd087c32843857c22a08c2f306550 (commit) via b944a8f1673433c65516b71457f17c1e69fa79b3 (commit) via 6dd5db3a89920c3a226f2aef2eb4bc332d5b1607 (commit) via 8345fd2a6c90ea4ae9363a9527260d69eb73d15f (commit) via aef28684d3a06fd6a47f11a3ad44b9db3cee8e22 (commit) via 6b28e34230944f89006b630ba77944f4cbe89a13 (commit) via 47b8ada14365c528f7069ae4e1972d0de64a8ae0 (commit) via f623dedea3c43a0ad2fc6790ce458e12820e2663 (commit) via d2e1f1e8ab97b6377026e1f9aaed3a1905b8fde7 (commit) via 6c3c74605bcb2f11556aa36e7894b90efaf8bd38 (commit) via 9b2d9869a824ee899a9c4893d78c175025b06e7a (commit) via 7d1401cc9cf8b398a59a2c8eb616ebfabf1d9fcb (commit) via 6d9651b0ac536c48eb2aae27dda7caed2520ae4e (commit) via 11f7f78e067b043e7cca85f4f4d6624c2b8994cc (commit) via 7fc7e783709b3854361e2d51cfd0a689ded0176f (commit) via 4399065fde1b20d31582ccad7256d3abd247e35c (commit) via 986115564b64fea51b427e07cf8d718cff8273cb (commit) via 73fe8a9d3c13ca13d85dfa86aa643e4da8b08649 (commit) via c1c1bb130168681174c8bc2c5ac1e6ba51ed0f9e (commit) via 45d885b0e00e4d58247bfcc4ffc0a2173f7353ee (commit) via f3d43f2194c9a52eb256f4ffcb164bd2f45b72ca (commit) via 30d5d5a9d9fcfe347d017d50aedf1b724224058f (commit) via 91c0375894a91fb5694bea26118b33542f33919a (commit) via bd5d82f011becbc06a363d4425ce3d9cc7992884 (commit) via 6967c5ad3698f67ba8ee437bbc23c392714b9d66 (commit) via 91b01409d3d99abcfb0ee09132a2d9e4d41ee923 (commit) via d8e138378d5f81fadc683149d49315dec94957a3 (commit) via 3054e8dc3d2a4b08e147f7433c6e5ee465bce6bd (commit) via 4fab03b86306594c1e43be0d8beb35889a7da59a (commit) via ad968a480b13dbaaed744bb49042ede1d746a797 (commit) via b05b69d335ff22bac802782a16183b9fc97dab12 (commit) via 3a13cf00e625ebab6518dfbe545549225e72e5ad (commit) via 03ede3b6648742aa25378a7da57c94dfd60e7f94 (commit) via 296a4804b2c2d75e904ce620071b6aa1747f18f5 (commit) via d4e2e55ecbcae1b3e2797d50c310bd5f3f1219fb (commit) via c056288a515a3c320152b64e31d841b49e241a5e (commit) via 94ea17b04ef17529a66de9fe951c2095c4ff1aae (commit) via 3970d69b53711f0575683343c869e5e6dfd84c5c (commit) via 71ba7f047af5d7dbdbb5287146802b07b5970d82 (commit) via 3ea4ee8f50c165af648cf7d7c8ff71a5b81d2d50 (commit) via 54302b0b32f35c34ad29e58ac9aa163ff4cd4d08 (commit) via 1c94e275e7b0add9545fcd5a3efe7ca175f22747 (commit) via 8180e6e744070aa983041caad246ae8d29f7f9e8 (commit) via 62d3e5a87be00d3998fddc357a7fad9acd90e061 (commit) via 8fd6387446eadfea0a406ac0ab7f41fa89e2eb84 (commit) via 715edf7186ba9eac3b573714cba8801212e6bb52 (commit) via 95ac89486dec6f31ed0840ed7167349f4273c243 (commit) via 60ccc433a461862a6e8d8c1c88a60c7c78068be0 (commit) via 6cecdfd3b30e09fcabddfd00d3086b9f3de62cbc (commit) via 5de13fab3f9d7986534edbb08f6488124990f45f (commit) via b265af4375d1868ef2b199c536b2fa679e35539d (commit) via 05384c6a05e81cbacfde52605672a2d87acbcd6a (commit) via e4e773b5a1b5f731f293c3b8a481df18829ce27f (commit) via dbd5de39c35a19e086e4680271674dbaa4141b67 (commit) via 56648b6ea8d0f36b33336fc6f36a4be9da2f07d2 (commit) via 8a3fec65c055440b594468c98dfae68e2e9d99ef (commit) via 6c9638d7f6a65affb70813ee74220744c4e18d56 (commit) via 7bc7601b37435d81b945fd89d71e8a3b1ccea05f (commit) via 50197c86bd6223364ed17cb8e26d68bc3829bddf (commit) via 6433e94cae17fe65a317e2d3285cbadea0900164 (commit) via b2a59774c29aab0fd57ee5e55bd910ced7fc59bf (commit) via 00ae1abffcb95c547d86fa8131af7aeebb1de05a (commit) via 9a7116eeea0a3ce1fae43c67448d80a0bfaae352 (commit) via 0a8769638a6ab9aea6bfb25b4d1a073511df1d4a (commit) via 6f6a4d6e2adfc21ce374f83887272f5c524f1a34 (commit) via da6c8b453d35630d21cb6e965a9f80e0e409283d (commit) via 6763c5088b9edcd9811fe23eb22599d5515307d3 (commit) via d6374c413c0bb0fea6b2e1537e742e2ae137ead0 (commit) via 0fb29e5111a2de924e5839e8ea84979e4105f5f4 (commit) via 7fa926a961fb04ef1dd28dc6c460530007d6c2c3 (commit) via 96d31539572e3a2cf670862a538cd2bc514fd876 (commit) via 83eda6d7ea0bcfa8f94bf7a607ecf5afdc0c390c (commit) via 1ba8046dc870ba2c8cf56f4a6d2ac7509cc00dc2 (commit) via 041b7e3253263be70d8bb36fe58d2345b81d7772 (commit) via 5910df676a86ce99231d385e03652fd2d2867666 (commit) via 2a37acfee98d8c711ce6d35e20a736db425500bb (commit) via 4b7399b7722c3eabdb247bf61d7a22ff9e237743 (commit) via c68e8b698c6ba20c57a7525d12fd4c93a6d7cefe (commit) via 6549ab4bddf29ae58bd06443f2701f454276efeb (commit) via a40049bcdcfa96c97e49eeba5598f8a18bb2d5d3 (commit) via bae7c55d569be3185bc940d2c6ca7b54ea32093b (commit) via cbd4f1b71dd935f3459e84678218870bc8bd6ab8 (commit) from 07a3dce533cd254d470ea1fbacfc1fe92d42e40d (commit) - Log ----------------------------------------------------------------- commit dd8e15cc5988aaa71b519724ab3d59e3e82f42e5 Author: Pavan Deolasee <pav...@gm...> Date: Tue May 24 16:58:38 2011 +0530 Fix a merge issue introduced while picking PGXC-commits post 9.0.3 merge from the master branch diff --git a/src/backend/parser/parse_func.c b/src/backend/parser/parse_func.c index 5a3945e..99ecd14 100644 --- a/src/backend/parser/parse_func.c +++ b/src/backend/parser/parse_func.c @@ -1581,55 +1581,6 @@ LookupAggNameTypeNames(List *aggname, List *argtypes, bool noError) return oid; } -/* - * pg_get_expr() is a system function that exposes the expression - * deparsing functionality in ruleutils.c to users. Very handy, but it was - * later realized that the functions in ruleutils.c don't check the input - * rigorously, assuming it to come from system catalogs and to therefore - * be valid. That makes it easy for a user to crash the backend by passing - * a maliciously crafted string representation of an expression to - * pg_get_expr(). - * - * There's a lot of code in ruleutils.c, so it's not feasible to add - * water-proof input checking after the fact. Even if we did it once, it - * would need to be taken into account in any future patches too. - * - * Instead, we restrict pg_rule_expr() to only allow input from system - * catalogs. This is a hack, but it's the most robust and easiest - * to backpatch way of plugging the vulnerability. - * - * This is transparent to the typical usage pattern of - * "pg_get_expr(systemcolumn, ...)", but will break "pg_get_expr('foo', - * ...)", even if 'foo' is a valid expression fetched earlier from a - * system catalog. Hopefully there aren't many clients doing that out there. - */ -void -check_pg_get_expr_args(ParseState *pstate, Oid fnoid, List *args) -{ - Node *arg; - - /* if not being called for pg_get_expr, do nothing */ - if (fnoid != F_PG_GET_EXPR && fnoid != F_PG_GET_EXPR_EXT) - return; - - /* superusers are allowed to call it anyway (dubious) */ - if (superuser()) - return; - - /* - * The first argument must be a Var referencing one of the allowed - * system-catalog columns. It could be a join alias Var or subquery - * reference Var, though, so we need a recursive subroutine to chase - * through those possibilities. - */ - Assert(list_length(args) > 1); - arg = (Node *) linitial(args); - - if (!check_pg_get_expr_arg(pstate, arg, 0)) - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("argument to pg_get_expr() must come from system catalogs"))); -} #ifdef PGXC /* @@ -1726,85 +1677,3 @@ IsParseFuncImmutable(ParseState *pstate, List *targs, List *funcname, bool func_ } #endif -static bool -check_pg_get_expr_arg(ParseState *pstate, Node *arg, int netlevelsup) -{ - if (arg && IsA(arg, Var)) - { - Var *var = (Var *) arg; - RangeTblEntry *rte; - AttrNumber attnum; - - netlevelsup += var->varlevelsup; - rte = GetRTEByRangeTablePosn(pstate, var->varno, netlevelsup); - attnum = var->varattno; - - if (rte->rtekind == RTE_JOIN) - { - /* Recursively examine join alias variable */ - if (attnum > 0 && - attnum <= list_length(rte->joinaliasvars)) - { - arg = (Node *) list_nth(rte->joinaliasvars, attnum - 1); - return check_pg_get_expr_arg(pstate, arg, netlevelsup); - } - } - else if (rte->rtekind == RTE_SUBQUERY) - { - /* Subselect-in-FROM: examine sub-select's output expr */ - TargetEntry *ste = get_tle_by_resno(rte->subquery->targetList, - attnum); - ParseState mypstate; - - if (ste == NULL || ste->resjunk) - elog(ERROR, "subquery %s does not have attribute %d", - rte->eref->aliasname, attnum); - arg = (Node *) ste->expr; - - /* - * Recurse into the sub-select to see what its expr refers to. - * We have to build an additional level of ParseState to keep in - * step with varlevelsup in the subselect. - */ - MemSet(&mypstate, 0, sizeof(mypstate)); - mypstate.parentParseState = pstate; - mypstate.p_rtable = rte->subquery->rtable; - /* don't bother filling the rest of the fake pstate */ - - return check_pg_get_expr_arg(&mypstate, arg, 0); - } - else if (rte->rtekind == RTE_RELATION) - { - switch (rte->relid) - { - case IndexRelationId: - if (attnum == Anum_pg_index_indexprs || - attnum == Anum_pg_index_indpred) - return true; - break; - - case AttrDefaultRelationId: - if (attnum == Anum_pg_attrdef_adbin) - return true; - break; - - case ProcedureRelationId: - if (attnum == Anum_pg_proc_proargdefaults) - return true; - break; - - case ConstraintRelationId: - if (attnum == Anum_pg_constraint_conbin) - return true; - break; - - case TypeRelationId: - if (attnum == Anum_pg_type_typdefaultbin) - return true; - break; - } - } - } - - return false; -} commit df91a4341c34cfb5c63fec787e5602ac5e1bdc6d Author: Ashutosh Bapat <ash...@en...> Date: Thu May 19 14:45:02 2011 +0530 While copying the message from datanode to a slot, copy it within the memory context of the slot. Fix some compiler warnings. diff --git a/src/backend/executor/execTuples.c b/src/backend/executor/execTuples.c index 9f6adff..87302b4 100644 --- a/src/backend/executor/execTuples.c +++ b/src/backend/executor/execTuples.c @@ -466,64 +466,6 @@ ExecStoreMinimalTuple(MinimalTuple mtup, return slot; } -#ifdef PGXC -/* -------------------------------- - * ExecStoreDataRowTuple - * - * Store a buffer in DataRow message format into the slot. - * - * -------------------------------- - */ -TupleTableSlot * -ExecStoreDataRowTuple(char *msg, size_t len, int node, TupleTableSlot *slot, - bool shouldFree) -{ - /* - * sanity checks - */ - Assert(msg != NULL); - Assert(len > 0); - Assert(slot != NULL); - Assert(slot->tts_tupleDescriptor != NULL); - - /* - * Free any old physical tuple belonging to the slot. - */ - if (slot->tts_shouldFree) - heap_freetuple(slot->tts_tuple); - if (slot->tts_shouldFreeMin) - heap_free_minimal_tuple(slot->tts_mintuple); - if (slot->tts_shouldFreeRow) - pfree(slot->tts_dataRow); - - /* - * Drop the pin on the referenced buffer, if there is one. - */ - if (BufferIsValid(slot->tts_buffer)) - ReleaseBuffer(slot->tts_buffer); - - slot->tts_buffer = InvalidBuffer; - - /* - * Store the new tuple into the specified slot. - */ - slot->tts_isempty = false; - slot->tts_shouldFree = false; - slot->tts_shouldFreeMin = false; - slot->tts_shouldFreeRow = shouldFree; - slot->tts_tuple = NULL; - slot->tts_mintuple = NULL; - slot->tts_dataRow = msg; - slot->tts_dataLen = len; - slot->tts_dataNode = node; - - /* Mark extracted state invalid */ - slot->tts_nvalid = 0; - - return slot; -} -#endif - /* -------------------------------- * ExecClearTuple * @@ -1416,3 +1358,68 @@ end_tup_output(TupOutputState *tstate) ExecDropSingleTupleTableSlot(tstate->slot); pfree(tstate); } + +#ifdef PGXC +/* -------------------------------- + * ExecStoreDataRowTuple + * + * Store a buffer in DataRow message format into the slot. + * + * -------------------------------- + */ +TupleTableSlot * +ExecStoreDataRowTuple(char *msg, size_t len, int node, TupleTableSlot *slot, + bool shouldFree) +{ + /* + * sanity checks + */ + Assert(msg != NULL); + Assert(len > 0); + Assert(slot != NULL); + Assert(slot->tts_tupleDescriptor != NULL); + + /* + * Free any old physical tuple belonging to the slot. + */ + if (slot->tts_shouldFree) + heap_freetuple(slot->tts_tuple); + if (slot->tts_shouldFreeMin) + heap_free_minimal_tuple(slot->tts_mintuple); + /* + * if msg == slot->tts_dataRow then we would + * free the dataRow in the slot loosing the contents in msg. It is safe + * to reset shouldFreeRow, since it will be overwritten just below. + */ + if (msg == slot->tts_dataRow) + slot->tts_shouldFreeRow = false; + if (slot->tts_shouldFreeRow) + pfree(slot->tts_dataRow); + + /* + * Drop the pin on the referenced buffer, if there is one. + */ + if (BufferIsValid(slot->tts_buffer)) + ReleaseBuffer(slot->tts_buffer); + + slot->tts_buffer = InvalidBuffer; + + /* + * Store the new tuple into the specified slot. + */ + slot->tts_isempty = false; + slot->tts_shouldFree = false; + slot->tts_shouldFreeMin = false; + slot->tts_shouldFreeRow = shouldFree; + slot->tts_tuple = NULL; + slot->tts_mintuple = NULL; + slot->tts_dataRow = msg; + slot->tts_dataLen = len; + slot->tts_dataNode = node; + + /* Mark extracted state invalid */ + slot->tts_nvalid = 0; + + return slot; +} +#endif diff --git a/src/backend/pgxc/pool/execRemote.c b/src/backend/pgxc/pool/execRemote.c index 99b05ed..335c05f 100644 --- a/src/backend/pgxc/pool/execRemote.c +++ b/src/backend/pgxc/pool/execRemote.c @@ -49,7 +49,7 @@ #define PRIMARY_NODE_WRITEAHEAD 1024 * 1024 static bool autocommit = true; -static is_ddl = false; +static bool is_ddl = false; static bool implicit_force_autocommit = false; static PGXCNodeHandle **write_node_list = NULL; static int write_node_count = 0; @@ -420,7 +420,6 @@ create_tuple_desc(char *msg_body, size_t len) char *typname; Oid oidtypeid; int32 typemode, typmod; - uint32 n32; attnum = (AttrNumber) i; @@ -1152,6 +1151,27 @@ BufferConnection(PGXCNodeHandle *conn) } /* + * copy the datarow from combiner to the given slot, in the slot's memory + * context + */ +static void +CopyDataRowTupleToSlot(RemoteQueryState *combiner, TupleTableSlot *slot) +{ + char *msg; + MemoryContext oldcontext; + oldcontext = MemoryContextSwitchTo(slot->tts_mcxt); + msg = (char *)palloc(combiner->currentRow.msglen); + memcpy(msg, combiner->currentRow.msg, combiner->currentRow.msglen); + ExecStoreDataRowTuple(msg, combiner->currentRow.msglen, + combiner->currentRow.msgnode, slot, true); + pfree(combiner->currentRow.msg); + combiner->currentRow.msg = NULL; + combiner->currentRow.msglen = 0; + combiner->currentRow.msgnode = 0; + MemoryContextSwitchTo(oldcontext); +} + +/* * Get next data row from the combiner's buffer into provided slot * Just clear slot and return false if buffer is empty, that means end of result * set is reached @@ -1164,12 +1184,7 @@ FetchTuple(RemoteQueryState *combiner, TupleTableSlot *slot) /* If we have message in the buffer, consume it */ if (combiner->currentRow.msg) { - ExecStoreDataRowTuple(combiner->currentRow.msg, - combiner->currentRow.msglen, - combiner->currentRow.msgnode, slot, true); - combiner->currentRow.msg = NULL; - combiner->currentRow.msglen = 0; - combiner->currentRow.msgnode = 0; + CopyDataRowTupleToSlot(combiner, slot); have_tuple = true; } @@ -1189,6 +1204,10 @@ FetchTuple(RemoteQueryState *combiner, TupleTableSlot *slot) * completed. Afterwards rows will be taken from the buffer bypassing * currentRow until buffer is empty, and only after that data are read * from a connection. + * PGXCTODO: the message should be allocated in the same memory context as + * that of the slot. Are we sure of that in the call to + * ExecStoreDataRowTuple below? If one fixes this memory issue, please + * consider using CopyDataRowTupleToSlot() for the same. */ if (list_length(combiner->rowBuffer) > 0) { @@ -1279,12 +1298,7 @@ FetchTuple(RemoteQueryState *combiner, TupleTableSlot *slot) /* If we have message in the buffer, consume it */ if (combiner->currentRow.msg) { - ExecStoreDataRowTuple(combiner->currentRow.msg, - combiner->currentRow.msglen, - combiner->currentRow.msgnode, slot, true); - combiner->currentRow.msg = NULL; - combiner->currentRow.msglen = 0; - combiner->currentRow.msgnode = 0; + CopyDataRowTupleToSlot(combiner, slot); have_tuple = true; } @@ -3762,7 +3776,7 @@ handle_results: natts = resultslot->tts_tupleDescriptor->natts; for (i = 0; i < natts; ++i) { - if (resultslot->tts_values[i] == NULL) + if (resultslot->tts_values[i] == (Datum) NULL) return NULL; } commit 351a1751b7ee8c17d080fe0de9c9bef4bdbc653d Author: Michael P <mic...@us...> Date: Wed May 11 18:29:55 2011 +0900 Support for single-prepared PL/PGSQL functions This commit fixes primarily problems like in bug 3138450 (cache lookup for type 0) where XC was not able to set up plpgsql parameter values because values were not correctly fetched. This commit does not yet solve the special case of multiple uses of same plpgsql datum within a SQL command. PL/PGSQL functions using subqueries are out of scope for the moment due to XC's restrictions regarding multi-prepared statements. diff --git a/src/backend/pgxc/pool/execRemote.c b/src/backend/pgxc/pool/execRemote.c index a8a1070..99b05ed 100644 --- a/src/backend/pgxc/pool/execRemote.c +++ b/src/backend/pgxc/pool/execRemote.c @@ -4057,17 +4057,45 @@ ParamListToDataRow(ParamListInfo params, char** result) StringInfoData buf; uint16 n16; int i; + int real_num_params = params->numParams; + + /* + * It is necessary to fetch parameters + * before looking at the output value. + */ + for (i = 0; i < params->numParams; i++) + { + ParamExternData *param; + + param = ¶ms->params[i]; + + if (!OidIsValid(param->ptype) && params->paramFetch != NULL) + (*params->paramFetch) (params, i + 1); + + /* + * In case parameter type is not defined, it is not necessary to include + * it in message sent to backend nodes. + */ + if (!OidIsValid(param->ptype)) + real_num_params--; + } initStringInfo(&buf); + /* Number of parameter values */ - n16 = htons(params->numParams); + n16 = htons(real_num_params); appendBinaryStringInfo(&buf, (char *) &n16, 2); /* Parameter values */ for (i = 0; i < params->numParams; i++) { - ParamExternData *param = params->params + i; + ParamExternData *param = ¶ms->params[i]; uint32 n32; + + /* If parameter has no type defined it is not necessary to include it in message */ + if (!OidIsValid(param->ptype)) + continue; + if (param->isnull) { n32 = htonl(-1); commit 7d2a58c8e4cafbcfe48741317813783603b8fb3f Author: Abbas <abb...@en...> Date: Wed May 4 13:26:10 2011 +0500 This patch fixes a problem in XC that INSERTS/UPDATES in catalog tables were not possible from psql prompt. The problem was in XC planner. XC planner should first check if all the tables in the query are catalog tables then it should invoke standard plannner. This change enables us to remove a temp fix in GetRelationLocInfo. Also a query is added in system_views.sql to add a corresponding entry in pgxc_class. RelationBuildDesc is asked to include bootstrap objetcs too while building location info. diff --git a/src/backend/catalog/system_views.sql b/src/backend/catalog/system_views.sql index 2edaf48..083a6d8 100644 --- a/src/backend/catalog/system_views.sql +++ b/src/backend/catalog/system_views.sql @@ -155,6 +155,8 @@ CREATE SCHEMA __pgxc_datanode_schema__; create table __pgxc_coordinator_schema__.pg_prepared_xacts ( transaction xid, gid text, prepared timestamptz, owner name, database name ); +INSERT INTO pgxc_class VALUES((SELECT oid FROM pg_class WHERE relkind = 'r' AND relname = 'pg_prepared_xacts'), 'N', 0,0,0); + CREATE VIEW __pgxc_datanode_schema__.pg_prepared_xacts AS SELECT P.transaction, P.gid, P.prepared, U.rolname AS owner, D.datname AS database diff --git a/src/backend/pgxc/locator/locator.c b/src/backend/pgxc/locator/locator.c index 4116476..1eff17c 100644 --- a/src/backend/pgxc/locator/locator.c +++ b/src/backend/pgxc/locator/locator.c @@ -754,37 +754,6 @@ GetRelationLocInfo(Oid relid) Relation rel = relation_open(relid, AccessShareLock); - /* This check has been added as a temp fix for CREATE TABLE not adding entry in pgxc_class - * when run from system_views.sql - */ - if ( rel != NULL && - rel->rd_rel != NULL && - rel->rd_rel->relkind == RELKIND_RELATION && - rel->rd_rel->relname.data != NULL && - (strcmp(rel->rd_rel->relname.data, PREPARED_XACTS_TABLE) == 0) ) - { - namespace = get_namespace_name(rel->rd_rel->relnamespace); - - if (namespace != NULL && (strcmp(namespace, PGXC_COORDINATOR_SCHEMA) == 0)) - { - RelationLocInfo *dest_info; - - dest_info = (RelationLocInfo *) palloc0(sizeof(RelationLocInfo)); - - dest_info->relid = relid; - dest_info->locatorType = 'N'; - dest_info->nodeCount = NumDataNodes; - dest_info->nodeList = GetAllDataNodes(); - - relation_close(rel, AccessShareLock); - pfree(namespace); - - return dest_info; - } - - if (namespace != NULL) pfree(namespace); - } - if (rel && rel->rd_locator_info) ret_loc_info = CopyRelationLocInfo(rel->rd_locator_info); diff --git a/src/backend/pgxc/plan/planner.c b/src/backend/pgxc/plan/planner.c index 8f24bbe..2da079f 100644 --- a/src/backend/pgxc/plan/planner.c +++ b/src/backend/pgxc/plan/planner.c @@ -2895,6 +2895,12 @@ pgxc_planner(Query *query, int cursorOptions, ParamListInfo boundParams) if (query->commandType != CMD_SELECT) result->resultRelations = list_make1_int(query->resultRelation); + if (contains_only_pg_catalog (query->rtable)) + { + result = standard_planner(query, cursorOptions, boundParams); + return result; + } + if (query_step->exec_nodes == NULL) get_plan_nodes_command(query_step, root); diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c index a2a1d4d..b79e41a 100644 --- a/src/backend/utils/cache/relcache.c +++ b/src/backend/utils/cache/relcache.c @@ -889,7 +889,7 @@ RelationBuildDesc(Oid targetRelId, bool insertIt) relation->trigdesc = NULL; #ifdef PGXC - if (IS_PGXC_COORDINATOR && relation->rd_id >= FirstNormalObjectId) + if (IS_PGXC_COORDINATOR && relation->rd_id >= FirstBootstrapObjectId) RelationBuildLocator(relation); #endif /* commit b931435761b21f27aef8aca7e7e319bd0bee3a3a Author: Abbas <abb...@en...> Date: Tue May 3 22:29:51 2011 +0500 This patch makes the group by on XC work. The changes are as follows 1. The application of final function at coordinator is enabled though AggState execution. Till now final function was being applied during execution of RemoteQuery only, if there were aggregates in target list of remote query. This only worked in certain cases of aggregates (expressions involving aggregates, aggregation of join results etc. being some of the exceptions). With this change the way grouping works the same way as PG except a. the data comes from remote nodes in the form of tuples b. the aggregates go through three steps transition, collection (extra step to collect the data across the nodes) and finalization. 2. Till now, the collection and transition result type for some aggregates like sum, count, regr_count were different. I have added a function int8_sum__to_int8() which adds to int8 datums and converts the result into int8 datum. This function is used as collection function for these aggregates so that collection and transition functions have same result types. 3. Changed some of the alternate outputs to correct results now that grouping is working. Commented out test join, since it's crashing with grouping enabled. The test has a query which involves aggregates, group by and order by. The crash is happening because of order by and aggregates. Earlier the test didn't crash since GROUPING as disabled and the query would throw error, but now with grouping is enabled, the crash occurs. Bug id 3284321 tracks the crash. 4. Added new test xc_groupby.sql to test the grouping in XC with round robin and replicated tables with some simple aggregates like sum, count and avg. All work done by Ashutosh Bapat. diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c index be2007b..437bf20 100644 --- a/src/backend/executor/nodeAgg.c +++ b/src/backend/executor/nodeAgg.c @@ -89,6 +89,7 @@ #include "optimizer/tlist.h" #include "parser/parse_agg.h" #include "parser/parse_coerce.h" +#include "pgxc/pgxc.h" #include "utils/acl.h" #include "utils/builtins.h" #include "utils/lsyscache.h" @@ -121,6 +122,9 @@ typedef struct AggStatePerAggData /* Oids of transfer functions */ Oid transfn_oid; Oid finalfn_oid; /* may be InvalidOid */ +#ifdef PGXC + Oid collectfn_oid; /* may be InvalidOid */ +#endif /* PGXC */ /* * fmgr lookup data for transfer functions --- only valid when @@ -129,6 +133,9 @@ typedef struct AggStatePerAggData */ FmgrInfo transfn; FmgrInfo finalfn; +#ifdef PGXC + FmgrInfo collectfn; +#endif /* PGXC */ /* number of sorting columns */ int numSortCols; @@ -154,6 +161,10 @@ typedef struct AggStatePerAggData */ Datum initValue; bool initValueIsNull; +#ifdef PGXC + Datum initCollectValue; + bool initCollectValueIsNull; +#endif /* PGXC */ /* * We need the len and byval info for the agg's input, result, and @@ -165,9 +176,15 @@ typedef struct AggStatePerAggData int16 inputtypeLen, resulttypeLen, transtypeLen; +#ifdef PGXC + int16 collecttypeLen; +#endif /* PGXC */ bool inputtypeByVal, resulttypeByVal, transtypeByVal; +#ifdef PGXC + bool collecttypeByVal; +#endif /* PGXC */ /* * Stuff for evaluation of inputs. We used to just use ExecEvalExpr, but @@ -725,6 +742,55 @@ finalize_aggregate(AggState *aggstate, MemoryContext oldContext; oldContext = MemoryContextSwitchTo(aggstate->ss.ps.ps_ExprContext->ecxt_per_tuple_memory); +#ifdef PGXC + /* + * PGXCTODO: see PGXCTODO item in advance_collect_function + * this step is needed in case the transition function does not produce + * result consumable by final function and need collection function to be + * applied on transition function results. Usually results by both functions + * should be consumable by final function. + * As such this step is meant only to convert transition results into form + * consumable by final function, the step does not actually do any + * collection. + */ + if (OidIsValid(peraggstate->collectfn_oid)) + { + FunctionCallInfoData fcinfo; + InitFunctionCallInfoData(fcinfo, &(peraggstate->collectfn), 2, + (void *) aggstate, NULL); + /* + * copy the initial datum since it might get changed inside the + * collection function + */ + if (peraggstate->initCollectValueIsNull) + fcinfo.arg[0] = peraggstate->initCollectValue; + else + fcinfo.arg[0] = datumCopy(peraggstate->initCollectValue, + peraggstate->collecttypeByVal, + peraggstate->collecttypeLen); + fcinfo.argnull[0] = peraggstate->initCollectValueIsNull; + fcinfo.arg[1] = pergroupstate->transValue; + fcinfo.argnull[1] = pergroupstate->transValueIsNull; + if (fcinfo.flinfo->fn_strict && + (pergroupstate->transValueIsNull || peraggstate->initCollectValueIsNull)) + { + pergroupstate->transValue = (Datum)0; + pergroupstate->transValueIsNull = true; + } + else + { + Datum newVal = FunctionCallInvoke(&fcinfo); + + /* + * set the result of collection function to the transValue so that code + * below invoking final function does not change + */ + /* PGXCTODO: worry about the memory management here? */ + pergroupstate->transValue = newVal; + pergroupstate->transValueIsNull = fcinfo.isnull; + } + } +#endif /* PGXC */ /* * Apply the agg's finalfn if one is provided, else return transValue. @@ -1546,6 +1612,10 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) AclResult aclresult; Oid transfn_oid, finalfn_oid; +#ifdef PGXC + Oid collectfn_oid; + Expr *collectfnexpr; +#endif /* PGXC */ Expr *transfnexpr, *finalfnexpr; Datum textInitVal; @@ -1612,13 +1682,19 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) get_func_name(aggref->aggfnoid)); peraggstate->transfn_oid = transfn_oid = aggform->aggtransfn; -#ifdef PGXC - /* For PGXC final function is executed when combining, disable it here */ - peraggstate->finalfn_oid = finalfn_oid = InvalidOid; -#else peraggstate->finalfn_oid = finalfn_oid = aggform->aggfinalfn; -#endif - +#ifdef PGXC + peraggstate->collectfn_oid = collectfn_oid = aggform->aggcollectfn; + /* + * For PGXC final and collection functions are used to combine results at coordinator, + * disable those for data node + */ + if (IS_PGXC_DATANODE) + { + peraggstate->finalfn_oid = finalfn_oid = InvalidOid; + peraggstate->collectfn_oid = collectfn_oid = InvalidOid; + } +#endif /* PGXC */ /* Check that aggregate owner has permission to call component fns */ { HeapTuple procTuple; @@ -1645,6 +1721,17 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) aclcheck_error(aclresult, ACL_KIND_PROC, get_func_name(finalfn_oid)); } + +#ifdef PGXC + if (OidIsValid(collectfn_oid)) + { + aclresult = pg_proc_aclcheck(collectfn_oid, aggOwner, + ACL_EXECUTE); + if (aclresult != ACLCHECK_OK) + aclcheck_error(aclresult, ACL_KIND_PROC, + get_func_name(collectfn_oid)); + } +#endif /* PGXC */ } /* resolve actual type of transition state, if polymorphic */ @@ -1675,6 +1762,32 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) finalfn_oid, &transfnexpr, &finalfnexpr); +#ifdef PGXC + if (OidIsValid(collectfn_oid)) + { + /* we expect final function expression to be NULL in call to + * build_aggregate_fnexprs below, since InvalidOid is passed for + * finalfn_oid argument. Use a dummy expression to accept that. + */ + Expr *dummyexpr; + /* + * for XC, we need to setup the collection function expression as well. + * Use the same function with invalid final function oid, and collection + * function information instead of transition function information. + * PGXCTODO: we should really be adding this step inside + * build_aggregate_fnexprs() but this way it becomes easy to merge. + */ + build_aggregate_fnexprs(&aggform->aggtranstype, + 1, + aggform->aggcollecttype, + aggref->aggtype, + collectfn_oid, + InvalidOid, + &collectfnexpr, + &dummyexpr); + Assert(!dummyexpr); + } +#endif /* PGXC */ fmgr_info(transfn_oid, &peraggstate->transfn); peraggstate->transfn.fn_expr = (Node *) transfnexpr; @@ -1685,12 +1798,25 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) peraggstate->finalfn.fn_expr = (Node *) finalfnexpr; } +#ifdef PGXC + if (OidIsValid(collectfn_oid)) + { + fmgr_info(collectfn_oid, &peraggstate->collectfn); + peraggstate->collectfn.fn_expr = (Node *)collectfnexpr; + } +#endif /* PGXC */ + get_typlenbyval(aggref->aggtype, &peraggstate->resulttypeLen, &peraggstate->resulttypeByVal); get_typlenbyval(aggtranstype, &peraggstate->transtypeLen, &peraggstate->transtypeByVal); +#ifdef PGXC + get_typlenbyval(aggform->aggcollecttype, + &peraggstate->collecttypeLen, + &peraggstate->collecttypeByVal); +#endif /* PGXC */ /* * initval is potentially null, so don't try to access it as a struct @@ -1706,6 +1832,23 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) peraggstate->initValue = GetAggInitVal(textInitVal, aggtranstype); +#ifdef PGXC + /* + * initval for collection function is potentially null, so don't try to + * access it as a struct field. Must do it the hard way with + * SysCacheGetAttr. + */ + textInitVal = SysCacheGetAttr(AGGFNOID, aggTuple, + Anum_pg_aggregate_agginitcollect, + &peraggstate->initCollectValueIsNull); + + if (peraggstate->initCollectValueIsNull) + peraggstate->initCollectValue = (Datum) 0; + else + peraggstate->initCollectValue = GetAggInitVal(textInitVal, + aggform->aggcollecttype); +#endif /* PGXC */ + /* * If the transfn is strict and the initval is NULL, make sure input * type and transtype are the same (or at least binary-compatible), so diff --git a/src/backend/optimizer/plan/planmain.c b/src/backend/optimizer/plan/planmain.c index 7bc9a11..ad6c6f5 100644 --- a/src/backend/optimizer/plan/planmain.c +++ b/src/backend/optimizer/plan/planmain.c @@ -291,12 +291,6 @@ query_planner(PlannerInfo *root, List *tlist, { List *groupExprs; -#ifdef PGXC - ereport(ERROR, - (errcode(ERRCODE_STATEMENT_TOO_COMPLEX), - (errmsg("GROUP BY clause is not yet supported")))); -#endif - groupExprs = get_sortgrouplist_exprs(parse->groupClause, parse->targetList); *num_groups = estimate_num_groups(root, diff --git a/src/backend/pgxc/plan/planner.c b/src/backend/pgxc/plan/planner.c index d386ded..8f24bbe 100644 --- a/src/backend/pgxc/plan/planner.c +++ b/src/backend/pgxc/plan/planner.c @@ -2977,17 +2977,16 @@ pgxc_planner(Query *query, int cursorOptions, ParamListInfo boundParams) } /* - * Use standard plan if we have more than one data node with either - * group by, hasWindowFuncs, or hasRecursive - */ - /* * PGXCTODO - this could be improved to check if the first * group by expression is the partitioning column, in which * case it is ok to treat as a single step. + * PGXCTODO - whatever number of nodes involved in the query, grouping, + * windowing and recursive queries take place at the coordinator. The + * corresponding planner should be able to optimize the queries such that + * most of the query is pushed to datanode, based on the kind of + * distribution the table has. */ if (query->commandType == CMD_SELECT - && query_step->exec_nodes - && list_length(query_step->exec_nodes->nodelist) > 1 && (query->groupClause || query->hasWindowFuncs || query->hasRecursive)) { result = standard_planner(query, cursorOptions, boundParams); diff --git a/src/backend/pgxc/pool/execRemote.c b/src/backend/pgxc/pool/execRemote.c index 61a6263..a8a1070 100644 --- a/src/backend/pgxc/pool/execRemote.c +++ b/src/backend/pgxc/pool/execRemote.c @@ -337,6 +337,14 @@ advance_collect_function(SimpleAgg *simple_agg, FunctionCallInfoData *fcinfo) * result has not been initialized * We must copy the datum into result if it is pass-by-ref. We * do not need to pfree the old result, since it's NULL. + * PGXCTODO: in case the transition result type is different from + * collection result type, this code would not work, since we are + * assigning datum of one type to another. For this code to work the + * input and output of collection function needs to be binary + * compatible which is not. So, either check in AggregateCreate, + * that the input and output of collection function are binary + * coercible or set the initial values something non-null or change + * this code */ simple_agg->collectValue = datumCopy(fcinfo->arg[1], simple_agg->transtypeByVal, diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c index a36fb63..754da6e 100644 --- a/src/backend/utils/adt/numeric.c +++ b/src/backend/utils/adt/numeric.c @@ -2795,6 +2795,36 @@ int8_sum(PG_FUNCTION_ARGS) NumericGetDatum(oldsum), newval)); } +/* + * similar to int8_sum, except that the result is casted into int8 + */ +Datum +int8_sum_to_int8(PG_FUNCTION_ARGS) +{ + Datum result_num; + Datum numeric_arg; + + /* if both arguments are null, the result is null */ + if (PG_ARGISNULL(0) && PG_ARGISNULL(1)) + PG_RETURN_NULL(); + + /* if either of them is null, the other is the result */ + if (PG_ARGISNULL(0)) + PG_RETURN_DATUM(PG_GETARG_DATUM(1)); + + if (PG_ARGISNULL(1)) + PG_RETURN_DATUM(PG_GETARG_DATUM(0)); + + /* + * convert the first argument to numeric (second one is converted into + * numeric) + * add both the arguments using int8_sum + * convert the result into int8 using numeric_int8 + */ + numeric_arg = DirectFunctionCall1(int8_numeric, PG_GETARG_DATUM(0)); + result_num = DirectFunctionCall2(int8_sum, numeric_arg, PG_GETARG_DATUM(1)); + PG_RETURN_DATUM(DirectFunctionCall1(numeric_int8, result_num)); +} /* * Routines for avg(int2) and avg(int4). The transition datatype diff --git a/src/include/catalog/pg_aggregate.h b/src/include/catalog/pg_aggregate.h index 443b135..57b0b71 100644 --- a/src/include/catalog/pg_aggregate.h +++ b/src/include/catalog/pg_aggregate.h @@ -130,14 +130,14 @@ DATA(insert ( 2106 interval_accum interval_avg 0 1187 "{0 second,0 second}" )); /* sum */ #ifdef PGXC -DATA(insert ( 2107 int8_sum numeric_add - 0 1700 1700 _null_ _null_ )); -DATA(insert ( 2108 int4_sum int8_sum 1779 0 20 1700 _null_ _null_ )); -DATA(insert ( 2109 int2_sum int8_sum 1779 0 20 1700 _null_ _null_ )); -DATA(insert ( 2110 float4pl float4pl - 0 700 700 _null_ _null_ )); -DATA(insert ( 2111 float8pl float8pl - 0 701 701 _null_ _null_ )); +DATA(insert ( 2107 int8_sum numeric_add - 0 1700 1700 _null_ "0" )); +DATA(insert ( 2108 int4_sum int8_sum_to_int8 - 0 20 20 _null_ _null_ )); +DATA(insert ( 2109 int2_sum int8_sum_to_int8 - 0 20 20 _null_ _null_ )); +DATA(insert ( 2110 float4pl float4pl - 0 700 700 _null_ "0" )); +DATA(insert ( 2111 float8pl float8pl - 0 701 701 _null_ "0" )); DATA(insert ( 2112 cash_pl cash_pl - 0 790 790 _null_ _null_ )); DATA(insert ( 2113 interval_pl interval_pl - 0 1186 1186 _null_ _null_ )); -DATA(insert ( 2114 numeric_add numeric_add - 0 1700 1700 _null_ _null_ )); +DATA(insert ( 2114 numeric_add numeric_add - 0 1700 1700 _null_ "0" )); #else DATA(insert ( 2107 int8_sum - 0 1700 _null_ )); DATA(insert ( 2108 int4_sum - 0 20 _null_ )); @@ -242,8 +242,8 @@ DATA(insert ( 3527 enum_smaller - 3518 3500 _null_ )); /* count */ /* Final function is data type conversion function numeric_int8 is refernced by OID because of ambiguous defininition in pg_proc */ #ifdef PGXC -DATA(insert ( 2147 int8inc_any int8_sum 1779 0 20 1700 "0" _null_ )); -DATA(insert ( 2803 int8inc int8_sum 1779 0 20 1700 "0" _null_ )); +DATA(insert ( 2147 int8inc_any int8_sum_to_int8 - 0 20 20 "0" _null_ )); +DATA(insert ( 2803 int8inc int8_sum_to_int8 - 0 20 20 "0" _null_ )); #else DATA(insert ( 2147 int8inc_any - 0 20 "0" )); DATA(insert ( 2803 int8inc - 0 20 "0" )); @@ -353,7 +353,7 @@ DATA(insert ( 2159 numeric_accum numeric_stddev_samp 0 1231 "{0,0,0}" )); /* SQL2003 binary regression aggregates */ #ifdef PGXC -DATA(insert ( 2818 int8inc_float8_float8 int8_sum 1779 0 20 1700 "0" _null_ )); +DATA(insert ( 2818 int8inc_float8_float8 int8_sum_to_int8 - 0 20 20 "0" _null_ )); DATA(insert ( 2819 float8_regr_accum float8_regr_collect float8_regr_sxx 0 1022 1022 "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" )); DATA(insert ( 2820 float8_regr_accum float8_regr_collect float8_regr_syy 0 1022 1022 "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" )); DATA(insert ( 2821 float8_regr_accum float8_regr_collect float8_regr_sxy 0 1022 1022 "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" )); diff --git a/src/include/catalog/pg_proc.h b/src/include/catalog/pg_proc.h index f24e0bc..7ae0b73 100644 --- a/src/include/catalog/pg_proc.h +++ b/src/include/catalog/pg_proc.h @@ -2792,7 +2792,8 @@ DESCR("SUM(int2) transition function"); DATA(insert OID = 1841 ( int4_sum PGNSP PGUID 12 1 0 0 f f f f f i 2 0 20 "20 23" _null_ _null_ _null_ _null_ int4_sum _null_ _null_ _null_ )); DESCR("SUM(int4) transition function"); DATA(insert OID = 1842 ( int8_sum PGNSP PGUID 12 1 0 0 f f f f f i 2 0 1700 "1700 20" _null_ _null_ _null_ _null_ int8_sum _null_ _null_ _null_ )); -DESCR("SUM(int8) transition function"); +DATA(insert OID = 3037 ( int8_sum_to_int8 PGNSP PGUID 12 1 0 0 f f f f f i 2 0 20 "20 20" _null_ _null_ _null_ _null_ int8_sum_to_int8 _null_ _null_ _null_ )); +DESCR("SUM(int*) collection function"); DATA(insert OID = 1843 ( interval_accum PGNSP PGUID 12 1 0 0 f f f t f i 2 0 1187 "1187 1186" _null_ _null_ _null_ _null_ interval_accum _null_ _null_ _null_ )); DESCR("aggregate transition function"); DATA(insert OID = 1844 ( interval_avg PGNSP PGUID 12 1 0 0 f f f t f i 1 0 1186 "1187" _null_ _null_ _null_ _null_ interval_avg _null_ _null_ _null_ )); diff --git a/src/include/pgxc/execRemote.h b/src/include/pgxc/execRemote.h index 7ccef33..405325b 100644 --- a/src/include/pgxc/execRemote.h +++ b/src/include/pgxc/execRemote.h @@ -101,6 +101,12 @@ typedef struct RemoteQueryState * to initialize collecting of aggregates from the DNs */ bool initAggregates; + /* + * PGXCTODO - + * we should get rid of the simple_aggregates member, that should work + * through Agg node and grouping_planner should take care of optimizing it + * to the fullest + */ List *simple_aggregates; /* description of aggregate functions */ void *tuplesortstate; /* for merge sort */ /* Simple DISTINCT support */ diff --git a/src/include/utils/builtins.h b/src/include/utils/builtins.h index 3e3637d..93d0c31 100644 --- a/src/include/utils/builtins.h +++ b/src/include/utils/builtins.h @@ -940,6 +940,7 @@ extern Datum numeric_stddev_samp(PG_FUNCTION_ARGS); extern Datum int2_sum(PG_FUNCTION_ARGS); extern Datum int4_sum(PG_FUNCTION_ARGS); extern Datum int8_sum(PG_FUNCTION_ARGS); +extern Datum int8_sum_to_int8(PG_FUNCTION_ARGS); extern Datum int2_avg_accum(PG_FUNCTION_ARGS); extern Datum int4_avg_accum(PG_FUNCTION_ARGS); #ifdef PGXC diff --git a/src/test/regress/expected/opr_sanity_1.out b/src/test/regress/expected/opr_sanity_1.out index 885cb13..bf70944 100644 --- a/src/test/regress/expected/opr_sanity_1.out +++ b/src/test/regress/expected/opr_sanity_1.out @@ -709,14 +709,9 @@ WHERE a.aggfnoid = p.oid AND OR NOT binary_coercible(pfn.prorettype, p.prorettype) OR pfn.pronargs != 1 OR NOT binary_coercible(a.aggtranstype, pfn.proargtypes[0])); - aggfnoid | proname | oid | proname -----------+------------+------+--------- - 2108 | sum | 1779 | int8 - 2109 | sum | 1779 | int8 - 2147 | count | 1779 | int8 - 2803 | count | 1779 | int8 - 2818 | regr_count | 1779 | int8 -(5 rows) + aggfnoid | proname | oid | proname +----------+---------+-----+--------- +(0 rows) -- If transfn is strict then either initval should be non-NULL, or -- input type should match transtype so that the first non-null input @@ -1120,7 +1115,10 @@ FROM pg_am am JOIN pg_opclass op ON opcmethod = am.oid WHERE am.amname <> 'gin' GROUP BY amname, amsupport, opcname, amprocfamily HAVING count(*) != amsupport OR amprocfamily IS NULL; -ERROR: GROUP BY clause is not yet supported + amname | opcname | count +--------+---------+------- +(0 rows) + SELECT amname, opcname, count(*) FROM pg_am am JOIN pg_opclass op ON opcmethod = am.oid LEFT JOIN pg_amproc p ON amprocfamily = opcfamily AND @@ -1128,7 +1126,10 @@ FROM pg_am am JOIN pg_opclass op ON opcmethod = am.oid WHERE am.amname = 'gin' GROUP BY amname, amsupport, opcname, amprocfamily HAVING count(*) < amsupport - 1 OR amprocfamily IS NULL; -ERROR: GROUP BY clause is not yet supported + amname | opcname | count +--------+---------+------- +(0 rows) + -- Unfortunately, we can't check the amproc link very well because the -- signature of the function may be different for different support routines -- or different base data types. diff --git a/src/test/regress/expected/with_1.out b/src/test/regress/expected/with_1.out index 5ae3440..7048e51 100644 --- a/src/test/regress/expected/with_1.out +++ b/src/test/regress/expected/with_1.out @@ -247,7 +247,11 @@ WITH q1(x,y) AS ( SELECT hundred, sum(ten) FROM tenk1 GROUP BY hundred ) SELECT count(*) FROM q1 WHERE y > (SELECT sum(y)/100 FROM q1 qsub); -ERROR: GROUP BY clause is not yet supported + count +------- + 50 +(1 row) + -- via a VIEW CREATE TEMPORARY VIEW vsubdepartment AS WITH RECURSIVE subdepartment AS diff --git a/src/test/regress/expected/xc_groupby.out b/src/test/regress/expected/xc_groupby.out new file mode 100644 index 0000000..58f9ea7 --- /dev/null +++ b/src/test/regress/expected/xc_groupby.out @@ -0,0 +1,475 @@ +-- create required tables and fill them with data +create table tab1 (val int, val2 int); +create table tab2 (val int, val2 int); +insert into tab1 values (1, 1), (2, 1), (3, 1), (2, 2), (6, 2), (4, 3), (1, 3), (6, 3); +insert into tab2 values (1, 1), (4, 1), (8, 1), (2, 4), (9, 4), (3, 4), (4, 2), (5, 2), (3, 2); +select count(*), sum(val), avg(val), sum(val)::float8/count(*), val2 from tab1 group by val2; + count | sum | avg | ?column? | val2 +-------+-----+--------------------+------------------+------ + 3 | 6 | 2.0000000000000000 | 2 | 1 + 2 | 8 | 4.0000000000000000 | 4 | 2 + 3 | 11 | 3.6666666666666667 | 3.66666666666667 | 3 +(3 rows) + +-- joins and group by +select count(*), sum(tab1.val * tab2.val), avg(tab1.val*tab2.val), sum(tab1.val*tab2.val)::float8/count(*), tab1.val2, tab2.val2 from tab1 full outer join tab2 on tab1.val2 = tab2.val2 group by tab1.val2, tab2.val2; + count | sum | avg | ?column? | val2 | val2 +-------+-----+---------------------+------------------+------+------ + 6 | 96 | 16.0000000000000000 | 16 | 2 | 2 + 9 | 78 | 8.6666666666666667 | 8.66666666666667 | 1 | 1 + 3 | | | | 3 | + 3 | | | | | 4 +(4 rows) + +-- aggregates over aggregates +select sum(y) from (select sum(val) y, val2%2 x from tab1 group by val2) q1 group by x; + sum +----- + 8 + 17 +(2 rows) + +-- group by without aggregate, just like distinct? +select val2 from tab1 group by val2; + val2 +------ + 1 + 2 + 3 +(3 rows) + +-- group by with aggregates in expression +select count(*) + sum(val) + avg(val), val2 from tab1 group by val2; + ?column? | val2 +---------------------+------ + 11.0000000000000000 | 1 + 14.0000000000000000 | 2 + 17.6666666666666667 | 3 +(3 rows) + +-- group by with expressions in group by clause +select sum(val), avg(val), 2 * val2 from tab1 group by 2 * val2; + sum | avg | ?column? +-----+--------------------+---------- + 11 | 3.6666666666666667 | 6 + 6 | 2.0000000000000000 | 2 + 8 | 4.0000000000000000 | 4 +(3 rows) + +drop table tab1; +drop table tab2; +-- repeat the same tests for replicated tables +-- create required tables and fill them with data +create table tab1 (val int, val2 int) distribute by replication; +create table tab2 (val int, val2 int) distribute by replication; +insert into tab1 values (1, 1), (2, 1), (3, 1), (2, 2), (6, 2), (4, 3), (1, 3), (6, 3); +insert into tab2 values (1, 1), (4, 1), (8, 1), (2, 4), (9, 4), (3, 4), (4, 2), (5, 2), (3, 2); +select count(*), sum(val), avg(val), sum(val)::float8/count(*), val2 from tab1 group by val2; + count | sum | avg | ?column? | val2 +-------+-----+--------------------+------------------+------ + 3 | 6 | 2.0000000000000000 | 2 | 1 + 2 | 8 | 4.0000000000000000 | 4 | 2 + 3 | 11 | 3.6666666666666667 | 3.66666666666667 | 3 +(3 rows) + +-- joins and group by +select count(*), sum(tab1.val * tab2.val), avg(tab1.val*tab2.val), sum(tab1.val*tab2.val)::float8/count(*), tab1.val2, tab2.val2 from tab1 full outer join tab2 on tab1.val2 = tab2.val2 group by tab1.val2, tab2.val2; + count | sum | avg | ?column? | val2 | val2 +-------+-----+---------------------+------------------+------+------ + 6 | 96 | 16.0000000000000000 | 16 | 2 | 2 + 9 | 78 | 8.6666666666666667 | 8.66666666666667 | 1 | 1 + 3 | | | | 3 | + 3 | | | | | 4 +(4 rows) + +-- aggregates over aggregates +select sum(y) from (select sum(val) y, val2%2 x from tab1 group by val2) q1 group by x; + sum +----- + 8 + 17 +(2 rows) + +-- group by without aggregate, just like distinct? +select val2 from tab1 group by val2; + val2 +------ + 1 + 2 + 3 +(3 rows) + +-- group by with aggregates in expression +select count(*) + sum(val) + avg(val), val2 from tab1 group by val2; + ?column? | val2 +---------------------+------ + 11.0000000000000000 | 1 + 14.0000000000000000 | 2 + 17.6666666666666667 | 3 +(3 rows) + +-- group by with expressions in group by clause +select sum(val), avg(val), 2 * val2 from tab1 group by 2 * val2; + sum | avg | ?column? +-----+--------------------+---------- + 11 | 3.6666666666666667 | 6 + 6 | 2.0000000000000000 | 2 + 8 | 4.0000000000000000 | 4 +(3 rows) + +drop table tab1; +drop table tab2; +-- some tests involving nulls, characters, float type etc. +create table def(a int, b varchar(25)); +insert into def VALUES (NULL, NULL); +insert into def VALUES (1, NULL); +insert into def VALUES (NULL, 'One'); +insert into def VALUES (2, 'Two'); +insert into def VALUES (2, 'Two'); +insert into def VALUES (3, 'Three'); +insert into def VALUES (4, 'Three'); +insert into def VALUES (5, 'Three'); +insert into def VALUES (6, 'Two'); +insert into def VALUES (7, NULL); +insert into def VALUES (8, 'Two'); +insert into def VALUES (9, 'Three'); +insert into def VALUES (10, 'Three'); +select a,count(a) from def group by a order by a; + a | count +----+------- + 1 | 1 + 2 | 2 + 3 | 1 + 4 | 1 + 5 | 1 + 6 | 1 + 7 | 1 + 8 | 1 + 9 | 1 + 10 | 1 + | 0 +(11 rows) + +select avg(a) from def group by a; + avg +------------------------ + + 6.0000000000000000 + 5.0000000000000000 + 8.0000000000000000 + 1.00000000000000000000 + 9.0000000000000000 + 2.0000000000000000 + 7.0000000000000000 + 10.0000000000000000 + 3.0000000000000000 + 4.0000000000000000 +(11 rows) + +select avg(a) from def group by a; + avg +------------------------ + + 6.0000000000000000 + 5.0000000000000000 + 8.0000000000000000 + 1.00000000000000000000 + 9.0000000000000000 + 2.0000000000000000 + 7.0000000000000000 + 10.0000000000000000 + 3.0000000000000000 + 4.0000000000000000 +(11 rows) + +select avg(a) from def group by b; + avg +-------------------- + 4.0000000000000000 + + 4.5000000000000000 + 6.2000000000000000 +(4 rows) + +select sum(a) from def group by b; + sum +----- + 8 + + 18 + 31 +(4 rows) + +select count(*) from def group by b; + count +------- + 3 + 1 + 4 + 5 +(4 rows) + +select count(*) from def where a is not null group by a; + count +------- + 1 + 1 + 1 + 1 + 1 + 1 + 2 + 1 + 1 + 1 +(10 rows) + +select b from def group by b; + b +------- + + One + Two + Three +(4 rows) + +select b,count(b) from def group by b; + b | count +-------+------- + | 0 + One | 1 + Two | 4 + Three | 5 +(4 rows) + +select count(*) from def where b is null group by b; + count +------- + 3 +(1 row) + +create table g(a int, b float, c numeric); +insert into g values(1,2.1,3.2); +insert into g values(1,2.1,3.2); +insert into g values(2,2.3,5.2); +select sum(a) from g group by a; + sum +----- + 2 + 2 +(2 rows) + +select sum(b) from g group by b; + sum +----- + 2.3 + 4.2 +(2 rows) + +select sum(c) from g group by b; + sum +----- + 5.2 + 6.4 +(2 rows) + +select avg(a) from g group by b; + avg +------------------------ + 2.0000000000000000 + 1.00000000000000000000 +(2 rows) + +select avg(b) from g group by c; + avg +----- + 2.3 + 2.1 +(2 rows) + +select avg(c) from g group by c; + avg +-------------------- + 5.2000000000000000 + 3.2000000000000000 +(2 rows) + +drop table def; +drop table g; +-- same test with replicated tables +create table def(a int, b varchar(25)) distribute by replication; +insert into def VALUES (NULL, NULL); +insert into def VALUES (1, NULL); +insert into def VALUES (NULL, 'One'); +insert into def VALUES (2, 'Two'); +insert into def VALUES (2, 'Two'); +insert into def VALUES (3, 'Three'); +insert into def VALUES (4, 'Three'); +insert into def VALUES (5, 'Three'); +insert into def VALUES (6, 'Two'); +insert into def VALUES (7, NULL); +insert into def VALUES (8, 'Two'); +insert into def VALUES (9, 'Three'); +insert into def VALUES (10, 'Three'); +select a,count(a) from def group by a order by a; + a | count +----+------- + 1 | 1 + 2 | 2 + 3 | 1 + 4 | 1 + 5 | 1 + 6 | 1 + 7 | 1 + 8 | 1 + 9 | 1 + 10 | 1 + | 0 +(11 rows) + +select avg(a) from def group by a; + avg +------------------------ + + 6.0000000000000000 + 5.0000000000000000 + 8.0000000000000000 + 1.00000000000000000000 + 2.0000000000000000 + 9.0000000000000000 + 3.0000000000000000 + 7.0000000000000000 + 10.0000000000000000 + 4.0000000000000000 +(11 rows) + +select avg(a) from def group by a; + avg +------------------------ + + 6.0000000000000000 + 5.0000000000000000 + 8.0000000000000000 + 1.00000000000000000000 + 2.0000000000000000 + 9.0000000000000000 + 3.0000000000000000 + 7.0000000000000000 + 10.0000000000000000 + 4.0000000000000000 +(11 rows) + +select avg(a) from def group by b; + avg +-------------------- + 4.0000000000000000 + + 4.5000000000000000 + 6.2000000000000000 +(4 rows) + +select sum(a) from def group by b; + sum +----- + 8 + + 18 + 31 +(4 rows) + +select count(*) from def group by b; + count +------- + 3 + 1 + 4 + 5 +(4 rows) + +select count(*) from def where a is not null group by a; + count +------- + 1 + 1 + 1 + 1 + 1 + 2 + 1 + 1 + 1 + 1 +(10 rows) + +select b from def group by b; + b +------- + + One + Two + Three +(4 rows) + +select b,count(b) from def group by b; + b | count +-------+------- + | 0 + One | 1 + Two | 4 + Three | 5 +(4 rows) + +select count(*) from def where b is null group by b; + count +------- + 3 +(1 row) + +create table g(a int, b float, c numeric) distribute by replication; +insert into g values(1,2.1,3.2); +insert into g values(1,2.1,3.2); +insert into g values(2,2.3,5.2); +select sum(a) from g group by a; + sum +----- + 2 + 2 +(2 rows) + +select sum(b) from g group by ... [truncated message content] |