You can subscribe to this list here.
2010 |
Jan
|
Feb
|
Mar
|
Apr
(4) |
May
(28) |
Jun
(12) |
Jul
(11) |
Aug
(12) |
Sep
(5) |
Oct
(19) |
Nov
(14) |
Dec
(12) |
---|---|---|---|---|---|---|---|---|---|---|---|---|
2011 |
Jan
(18) |
Feb
(30) |
Mar
(115) |
Apr
(89) |
May
(50) |
Jun
(44) |
Jul
(22) |
Aug
(13) |
Sep
(11) |
Oct
(30) |
Nov
(28) |
Dec
(39) |
2012 |
Jan
(38) |
Feb
(18) |
Mar
(43) |
Apr
(91) |
May
(108) |
Jun
(46) |
Jul
(37) |
Aug
(44) |
Sep
(33) |
Oct
(29) |
Nov
(36) |
Dec
(15) |
2013 |
Jan
(35) |
Feb
(611) |
Mar
(5) |
Apr
(55) |
May
(30) |
Jun
(28) |
Jul
(458) |
Aug
(34) |
Sep
(9) |
Oct
(39) |
Nov
(22) |
Dec
(32) |
2014 |
Jan
(16) |
Feb
(16) |
Mar
(42) |
Apr
(179) |
May
(7) |
Jun
(6) |
Jul
(9) |
Aug
|
Sep
(4) |
Oct
|
Nov
(3) |
Dec
|
2015 |
Jan
|
Feb
|
Mar
|
Apr
(2) |
May
(4) |
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
From: Michael P. <mic...@us...> - 2011-03-08 01:44:36
|
Project "Postgres-XC". The branch, merge_postgres_9_0_3 has been updated via 11b8a7940017006e07f7448c9a42e20b11655ba0 (commit) from 49107f629e3628a89ec4848e167ef9c8e6d2a4e7 (commit) - Log ----------------------------------------------------------------- commit 11b8a7940017006e07f7448c9a42e20b11655ba0 Author: Michael P <mic...@us...> Date: Tue Mar 8 10:43:44 2011 +0900 Block trigger as this feature is not supported diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c index 2bb2200..a48c4a3 100644 --- a/src/backend/tcop/utility.c +++ b/src/backend/tcop/utility.c @@ -1308,6 +1308,11 @@ standard_ProcessUtility(Node *parsetree, (void) CreateTrigger((CreateTrigStmt *) parsetree, queryString, InvalidOid, InvalidOid, false); #ifdef PGXC + /* Postgres-XC does not support yet triggers */ + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("TRIGGER is not supported"))); + if (IS_PGXC_COORDINATOR) ExecUtilityStmtOnNodes(queryString, NULL, false, EXEC_ON_ALL_NODES); #endif ----------------------------------------------------------------------- Summary of changes: src/backend/tcop/utility.c | 5 +++++ 1 files changed, 5 insertions(+), 0 deletions(-) hooks/post-receive -- Postgres-XC |
From: Michael P. <mic...@us...> - 2011-03-08 01:28:54
|
Project "Postgres-XC". The branch, merge_postgres_9_0_3 has been updated via 49107f629e3628a89ec4848e167ef9c8e6d2a4e7 (commit) from 901cb86e90d8d286dd0ca3b4c337ed512b28021c (commit) - Log ----------------------------------------------------------------- commit 49107f629e3628a89ec4848e167ef9c8e6d2a4e7 Author: Michael P <mic...@us...> Date: Tue Mar 8 10:27:37 2011 +0900 Fix for regression test txid PG-XC does not support yet TEMP tables. diff --git a/src/test/regress/expected/txid_1.out b/src/test/regress/expected/txid_1.out new file mode 100644 index 0000000..9bb1966 --- /dev/null +++ b/src/test/regress/expected/txid_1.out @@ -0,0 +1,124 @@ +-- txid_snapshot data type and related functions +-- i/o +select '12:13:'::txid_snapshot; + txid_snapshot +--------------- + 12:13: +(1 row) + +select '12:18:14,16'::txid_snapshot; + txid_snapshot +--------------- + 12:18:14,16 +(1 row) + +-- errors +select '31:12:'::txid_snapshot; +ERROR: invalid input for txid_snapshot: "31:12:" +LINE 1: select '31:12:'::txid_snapshot; + ^ +select '0:1:'::txid_snapshot; +ERROR: invalid input for txid_snapshot: "0:1:" +LINE 1: select '0:1:'::txid_snapshot; + ^ +select '12:13:0'::txid_snapshot; +ERROR: invalid input for txid_snapshot: "12:13:0" +LINE 1: select '12:13:0'::txid_snapshot; + ^ +select '12:16:14,13'::txid_snapshot; +ERROR: invalid input for txid_snapshot: "12:16:14,13" +LINE 1: select '12:16:14,13'::txid_snapshot; + ^ +select '12:16:14,14'::txid_snapshot; +ERROR: invalid input for txid_snapshot: "12:16:14,14" +LINE 1: select '12:16:14,14'::txid_snapshot; + ^ +create temp table snapshot_test ( + nr integer, + snap txid_snapshot +); +ERROR: PG-XC does not yet support temporary tables +insert into snapshot_test values (1, '12:13:'); +ERROR: relation "snapshot_test" does not exist +LINE 1: insert into snapshot_test values (1, '12:13:'); + ^ +insert into snapshot_test values (2, '12:20:13,15,18'); +ERROR: relation "snapshot_test" does not exist +LINE 1: insert into snapshot_test values (2, '12:20:13,15,18'); + ^ +insert into snapshot_test values (3, '100001:100009:100005,100007,100008'); +ERROR: relation "snapshot_test" does not exist +LINE 1: insert into snapshot_test values (3, '100001:100009:100005,1... + ^ +insert into snapshot_test values (4, '100:150:101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131'); +ERROR: relation "snapshot_test" does not exist +LINE 1: insert into snapshot_test values (4, '100:150:101,102,103,10... + ^ +select snap from snapshot_test order by nr; +ERROR: relation "snapshot_test" does not exist +LINE 1: select snap from snapshot_test order by nr; + ^ +select txid_snapshot_xmin(snap), + txid_snapshot_xmax(snap), + txid_snapshot_xip(snap) +from snapshot_test order by nr; +ERROR: relation "snapshot_test" does not exist +LINE 4: from snapshot_test order by nr; + ^ +select id, txid_visible_in_snapshot(id, snap) +from snapshot_test, generate_series(11, 21) id +where nr = 2; +ERROR: relation "snapshot_test" does not exist +LINE 2: from snapshot_test, generate_series(11, 21) id + ^ +-- test bsearch +select id, txid_visible_in_snapshot(id, snap) +from snapshot_test, generate_series(90, 160) id +where nr = 4; +ERROR: relation "snapshot_test" does not exist +LINE 2: from snapshot_test, generate_series(90, 160) id + ^ +-- test current values also +select txid_current() >= txid_snapshot_xmin(txid_current_snapshot()); + ?column? +---------- + t +(1 row) + +-- we can't assume current is always less than xmax, however +select txid_visible_in_snapshot(txid_current(), txid_current_snapshot()); + txid_visible_in_snapshot +-------------------------- + f +(1 row) + +-- test 64bitness +select txid_snapshot '1000100010001000:1000100010001100:1000100010001012,1000100010001013'; + txid_snapshot +--------------------------------------------------------------------- + 1000100010001000:1000100010001100:1000100010001012,1000100010001013 +(1 row) + +select txid_visible_in_snapshot('1000100010001012', '1000100010001000:1000100010001100:1000100010001012,1000100010001013'); + txid_visible_in_snapshot +-------------------------- + f +(1 row) + +select txid_visible_in_snapshot('1000100010001015', '1000100010001000:1000100010001100:1000100010001012,1000100010001013'); + txid_visible_in_snapshot +-------------------------- + t +(1 row) + +-- test 64bit overflow +SELECT txid_snapshot '1:9223372036854775807:3'; + txid_snapshot +------------------------- + 1:9223372036854775807:3 +(1 row) + +SELECT txid_snapshot '1:9223372036854775808:3'; +ERROR: invalid input for txid_snapshot: "1:9223372036854775808:3" +LINE 1: SELECT txid_snapshot '1:9223372036854775808:3'; + ^ ----------------------------------------------------------------------- Summary of changes: src/test/regress/expected/txid_1.out | 124 ++++++++++++++++++++++++++++++++++ 1 files changed, 124 insertions(+), 0 deletions(-) create mode 100644 src/test/regress/expected/txid_1.out hooks/post-receive -- Postgres-XC |
From: Michael P. <mic...@us...> - 2011-03-08 01:15:40
|
Project "Postgres-XC". The branch, merge_postgres_9_0_3 has been updated via 901cb86e90d8d286dd0ca3b4c337ed512b28021c (commit) from 4200394d48340e4a304f4f5f6ac3c4668445f6e4 (commit) - Log ----------------------------------------------------------------- commit 901cb86e90d8d286dd0ca3b4c337ed512b28021c Author: Michael P <mic...@us...> Date: Tue Mar 8 10:14:22 2011 +0900 Fix for regression test create_table INHERITS based on multiple parents is not supported in XC. diff --git a/src/test/regress/expected/create_table_1.out b/src/test/regress/expected/create_table_1.out new file mode 100644 index 0000000..76a1b71 --- /dev/null +++ b/src/test/regress/expected/create_table_1.out @@ -0,0 +1,202 @@ +-- +-- CREATE_TABLE +-- +-- +-- CLASS DEFINITIONS +-- +CREATE TABLE hobbies_r ( + name text, + person text +); +CREATE TABLE equipment_r ( + name text, + hobby text +); +CREATE TABLE onek ( + unique1 int4, + unique2 int4, + two int4, + four int4, + ten int4, + twenty int4, + hundred int4, + thousand int4, + twothousand int4, + fivethous int4, + tenthous int4, + odd int4, + even int4, + stringu1 name, + stringu2 name, + string4 name +); +CREATE TABLE tenk1 ( + unique1 int4, + unique2 int4, + two int4, + four int4, + ten int4, + twenty int4, + hundred int4, + thousand int4, + twothousand int4, + fivethous int4, + tenthous int4, + odd int4, + even int4, + stringu1 name, + stringu2 name, + string4 name +) WITH OIDS; +CREATE TABLE tenk2 ( + unique1 int4, + unique2 int4, + two int4, + four int4, + ten int4, + twenty int4, + hundred int4, + thousand int4, + twothousand int4, + fivethous int4, + tenthous int4, + odd int4, + even int4, + stringu1 name, + stringu2 name, + string4 name +); +CREATE TABLE person ( + name text, + age int4, + location point +); +CREATE TABLE emp ( + salary int4, + manager name +) INHERITS (person) WITH OIDS; +CREATE TABLE student ( + gpa float8 +) INHERITS (person); +CREATE TABLE stud_emp ( + percent int4 +) INHERITS (emp, student); +ERROR: Cannot currently distribute a table with more than one parent. +CREATE TABLE city ( + name name, + location box, + budget city_budget +); +ERROR: type "city_budget" does not exist +LINE 4: budget city_budget + ^ +CREATE TABLE dept ( + dname name, + mgrname text +); +CREATE TABLE slow_emp4000 ( + home_base box +); +CREATE TABLE fast_emp4000 ( + home_base box +); +CREATE TABLE road ( + name text, + thepath path +); +CREATE TABLE ihighway () INHERITS (road); +CREATE TABLE shighway ( + surface text +) INHERITS (road); +CREATE TABLE real_city ( + pop int4, + cname text, + outline path +); +-- +-- test the "star" operators a bit more thoroughly -- this time, +-- throw in lots of NULL fields... +-- +-- a is the type root +-- b and c inherit from a (one-level single inheritance) +-- d inherits from b and c (two-level multiple inheritance) +-- e inherits from c (two-level single inheritance) +-- f inherits from e (three-level single inheritance) +-- +CREATE TABLE a_star ( + class char, + a int4 +); +CREATE TABLE b_star ( + b text +) INHERITS (a_star); +CREATE TABLE c_star ( + c name +) INHERITS (a_star); +CREATE TABLE d_star ( + d float8 +) INHERITS (b_star, c_star); +ERROR: Cannot currently distribute a table with more than one parent. +CREATE TABLE e_star ( + e int2 +) INHERITS (c_star); +CREATE TABLE f_star ( + f polygon +) INHERITS (e_star); +CREATE TABLE aggtest ( + a int2, + b float4 +); +CREATE TABLE hash_i4_heap ( + seqno int4, + random int4 +); +CREATE TABLE hash_name_heap ( + seqno int4, + random name +); +CREATE TABLE hash_txt_heap ( + seqno int4, + random text +); +CREATE TABLE hash_f8_heap ( + seqno int4, + random float8 +); +-- don't include the hash_ovfl_heap stuff in the distribution +-- the data set is too large for what it's worth +-- +-- CREATE TABLE hash_ovfl_heap ( +-- x int4, +-- y int4 +-- ); +CREATE TABLE bt_i4_heap ( + seqno int4, + random int4 +); +CREATE TABLE bt_name_heap ( + seqno name, + random int4 +); +CREATE TABLE bt_txt_heap ( + seqno text, + random int4 +); +CREATE TABLE bt_f8_heap ( + seqno float8, + random int4 +); +CREATE TABLE array_op_test ( + seqno int4, + i int4[], + t text[] +); +CREATE TABLE array_index_op_test ( + seqno int4, + i int4[], + t text[] +); +CREATE TABLE test_tsvector( + t text, + a tsvector +); ----------------------------------------------------------------------- Summary of changes: .../create_table_1.out} | 47 ++----------------- 1 files changed, 5 insertions(+), 42 deletions(-) copy src/test/regress/{sql/create_table.sql => expected/create_table_1.out} (93%) hooks/post-receive -- Postgres-XC |
From: Michael P. <mic...@us...> - 2011-03-08 01:06:24
|
Project "Postgres-XC". The branch, merge_postgres_9_0_3 has been updated via 4200394d48340e4a304f4f5f6ac3c4668445f6e4 (commit) via fa1747011efd3841353894a0500e31411b4f4efa (commit) via 637f46cda04fee2d9d4bd78c1c9f4f07373aece5 (commit) via 405874a1f0d9b6634349bcee5165cd7e305ba81a (commit) from 94b048a92d21bd787294cabd3a144cf69badd419 (commit) - Log ----------------------------------------------------------------- commit 4200394d48340e4a304f4f5f6ac3c4668445f6e4 Author: Michael P <mic...@us...> Date: Tue Mar 8 10:04:45 2011 +0900 Partial fix for regression test float fix There is still a diff linked to an error message. diff --git a/src/test/regress/expected/float8_1.out b/src/test/regress/expected/float8_1.out new file mode 100644 index 0000000..1f79f66 --- /dev/null +++ b/src/test/regress/expected/float8_1.out @@ -0,0 +1,447 @@ +-- +-- FLOAT8 +-- +CREATE TABLE FLOAT8_TBL(f1 float8); +INSERT INTO FLOAT8_TBL(f1) VALUES (' 0.0 '); +INSERT INTO FLOAT8_TBL(f1) VALUES ('1004.30 '); +INSERT INTO FLOAT8_TBL(f1) VALUES (' -34.84'); +INSERT INTO FLOAT8_TBL(f1) VALUES ('1.2345678901234e+200'); +INSERT INTO FLOAT8_TBL(f1) VALUES ('1.2345678901234e-200'); +-- test for underflow and overflow handling +SELECT '10e400'::float8; +ERROR: "10e400" is out of range for type double precision +LINE 1: SELECT '10e400'::float8; + ^ +SELECT '-10e400'::float8; +ERROR: "-10e400" is out of range for type double precision +LINE 1: SELECT '-10e400'::float8; + ^ +SELECT '10e-400'::float8; +ERROR: "10e-400" is out of range for type double precision +LINE 1: SELECT '10e-400'::float8; + ^ +SELECT '-10e-400'::float8; +ERROR: "-10e-400" is out of range for type double precision +LINE 1: SELECT '-10e-400'::float8; + ^ +-- bad input +INSERT INTO FLOAT8_TBL(f1) VALUES (''); +ERROR: invalid input syntax for type double precision: "" +LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES (''); + ^ +INSERT INTO FLOAT8_TBL(f1) VALUES (' '); +ERROR: invalid input syntax for type double precision: " " +LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES (' '); + ^ +INSERT INTO FLOAT8_TBL(f1) VALUES ('xyz'); +ERROR: invalid input syntax for type double precision: "xyz" +LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('xyz'); + ^ +INSERT INTO FLOAT8_TBL(f1) VALUES ('5.0.0'); +ERROR: invalid input syntax for type double precision: "5.0.0" +LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('5.0.0'); + ^ +INSERT INTO FLOAT8_TBL(f1) VALUES ('5 . 0'); +ERROR: invalid input syntax for type double precision: "5 . 0" +LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('5 . 0'); + ^ +INSERT INTO FLOAT8_TBL(f1) VALUES ('5. 0'); +ERROR: invalid input syntax for type double precision: "5. 0" +LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('5. 0'); + ^ +INSERT INTO FLOAT8_TBL(f1) VALUES (' - 3'); +ERROR: invalid input syntax for type double precision: " - 3" +LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES (' - 3'); + ^ +INSERT INTO FLOAT8_TBL(f1) VALUES ('123 5'); +ERROR: invalid input syntax for type double precision: "123 5" +LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('123 5'); + ^ +-- special inputs +SELECT 'NaN'::float8; + float8 +-------- + NaN +(1 row) + +SELECT 'nan'::float8; + float8 +-------- + NaN +(1 row) + +SELECT ' NAN '::float8; + float8 +-------- + NaN +(1 row) + +SELECT 'infinity'::float8; + float8 +---------- + Infinity +(1 row) + +SELECT ' -INFINiTY '::float8; + float8 +----------- + -Infinity +(1 row) + +-- bad special inputs +SELECT 'N A N'::float8; +ERROR: invalid input syntax for type double precision: "N A N" +LINE 1: SELECT 'N A N'::float8; + ^ +SELECT 'NaN x'::float8; +ERROR: invalid input syntax for type double precision: "NaN x" +LINE 1: SELECT 'NaN x'::float8; + ^ +SELECT ' INFINITY x'::float8; +ERROR: invalid input syntax for type double precision: " INFINITY x" +LINE 1: SELECT ' INFINITY x'::float8; + ^ +SELECT 'Infinity'::float8 + 100.0; + ?column? +---------- + Infinity +(1 row) + +SELECT 'Infinity'::float8 / 'Infinity'::float8; + ?column? +---------- + NaN +(1 row) + +SELECT 'nan'::float8 / 'nan'::float8; + ?column? +---------- + NaN +(1 row) + +SELECT 'nan'::numeric::float8; + float8 +-------- + NaN +(1 row) + +SELECT '' AS five, * FROM FLOAT8_TBL ORDER BY f1; + five | f1 +------+---------------------- + | -34.84 + | 0 + | 1.2345678901234e-200 + | 1004.3 + | 1.2345678901234e+200 +(5 rows) + +SELECT '' AS four, f.* FROM FLOAT8_TBL f WHERE f.f1 <> '1004.3' ORDER BY f1; + four | f1 +------+---------------------- + | -34.84 + | 0 + | 1.2345678901234e-200 + | 1.2345678901234e+200 +(4 rows) + +SELECT '' AS one, f.* FROM FLOAT8_TBL f WHERE f.f1 = '1004.3'; + one | f1 +-----+-------- + | 1004.3 +(1 row) + +SELECT '' AS three, f.* FROM FLOAT8_TBL f WHERE '1004.3' > f.f1 ORDER BY f1; + three | f1 +-------+---------------------- + | -34.84 + | 0 + | 1.2345678901234e-200 +(3 rows) + +SELECT '' AS three, f.* FROM FLOAT8_TBL f WHERE f.f1 < '1004.3' ORDER BY f1; + three | f1 +-------+---------------------- + | -34.84 + | 0 + | 1.2345678901234e-200 +(3 rows) + +SELECT '' AS four, f.* FROM FLOAT8_TBL f WHERE '1004.3' >= f.f1 ORDER BY f1; + four | f1 +------+---------------------- + | -34.84 + | 0 + | 1.2345678901234e-200 + | 1004.3 +(4 rows) + +SELECT '' AS four, f.* FROM FLOAT8_TBL f WHERE f.f1 <= '1004.3' ORDER BY f1; + four | f1 +------+---------------------- + | -34.84 + | 0 + | 1.2345678901234e-200 + | 1004.3 +(4 rows) + +SELECT '' AS three, f.f1, f.f1 * '-10' AS x + FROM FLOAT8_TBL f + WHERE f.f1 > '0.0' ORDER BY f1; + three | f1 | x +-------+----------------------+----------------------- + | 1.2345678901234e-200 | -1.2345678901234e-199 + | 1004.3 | -10043 + | 1.2345678901234e+200 | -1.2345678901234e+201 +(3 rows) + +SELECT '' AS three, f.f1, f.f1 + '-10' AS x + FROM FLOAT8_TBL f + WHERE f.f1 > '0.0' ORDER BY f1; + three | f1 | x +-------+----------------------+---------------------- + | 1.2345678901234e-200 | -10 + | 1004.3 | 994.3 + | 1.2345678901234e+200 | 1.2345678901234e+200 +(3 rows) + +SELECT '' AS three, f.f1, f.f1 / '-10' AS x + FROM FLOAT8_TBL f + WHERE f.f1 > '0.0' ORDER BY f1; + three | f1 | x +-------+----------------------+----------------------- + | 1.2345678901234e-200 | -1.2345678901234e-201 + | 1004.3 | -100.43 + | 1.2345678901234e+200 | -1.2345678901234e+199 +(3 rows) + +SELECT '' AS three, f.f1, f.f1 - '-10' AS x + FROM FLOAT8_TBL f + WHERE f.f1 > '0.0' ORDER BY f1; + three | f1 | x +-------+----------------------+---------------------- + | 1.2345678901234e-200 | 10 + | 1004.3 | 1014.3 + | 1.2345678901234e+200 | 1.2345678901234e+200 +(3 rows) + +SELECT '' AS one, f.f1 ^ '2.0' AS square_f1 + FROM FLOAT8_TBL f where f.f1 = '1004.3'; + one | square_f1 +-----+------------ + | 1008618.49 +(1 row) + +-- absolute value +SELECT '' AS five, f.f1, @f.f1 AS abs_f1 + FROM FLOAT8_TBL f ORDER BY f1; + five | f1 | abs_f1 +------+----------------------+---------------------- + | -34.84 | 34.84 + | 0 | 0 + | 1.2345678901234e-200 | 1.2345678901234e-200 + | 1004.3 | 1004.3 + | 1.2345678901234e+200 | 1.2345678901234e+200 +(5 rows) + +-- truncate +SELECT '' AS five, f.f1, trunc(f.f1) AS trunc_f1 + FROM FLOAT8_TBL f ORDER BY f1; + five | f1 | trunc_f1 +------+----------------------+---------------------- + | -34.84 | -34 + | 0 | 0 + | 1.2345678901234e-200 | 0 + | 1004.3 | 1004 + | 1.2345678901234e+200 | 1.2345678901234e+200 +(5 rows) + +-- round +SELECT '' AS five, f.f1, round(f.f1) AS round_f1 + FROM FLOAT8_TBL f ORDER BY f1; + five | f1 | round_f1 +------+----------------------+---------------------- + | -34.84 | -35 + | 0 | 0 + | 1.2345678901234e-200 | 0 + | 1004.3 | 1004 + | 1.2345678901234e+200 | 1.2345678901234e+200 +(5 rows) + +-- ceil / ceiling +select ceil(f1) as ceil_f1 from float8_tbl f ORDER BY f1; + ceil_f1 +---------------------- + -34 + 0 + 1 + 1005 + 1.2345678901234e+200 +(5 rows) + +select ceiling(f1) as ceiling_f1 from float8_tbl f ORDER BY f1; + ceiling_f1 +---------------------- + -34 + 0 + 1 + 1005 + 1.2345678901234e+200 +(5 rows) + +-- floor +select floor(f1) as floor_f1 from float8_tbl f ORDER BY f1; + floor_f1 +---------------------- + -35 + 0 + 0 + 1004 + 1.2345678901234e+200 +(5 rows) + +-- sign +select sign(f1) as sign_f1 from float8_tbl f ORDER BY f1; + sign_f1 +--------- + -1 + 0 + 1 + 1 + 1 +(5 rows) + +-- square root +SELECT sqrt(float8 '64') AS eight; + eight +------- + 8 +(1 row) + +SELECT |/ float8 '64' AS eight; + eight +------- + 8 +(1 row) + +SELECT '' AS three, f.f1, |/f.f1 AS sqrt_f1 + FROM FLOAT8_TBL f + WHERE f.f1 > '0.0' ORDER BY f1; + three | f1 | sqrt_f1 +-------+----------------------+----------------------- + | 1.2345678901234e-200 | 1.11111110611109e-100 + | 1004.3 | 31.6906926399535 + | 1.2345678901234e+200 | 1.11111110611109e+100 +(3 rows) + +-- power +SELECT power(float8 '144', float8 '0.5'); + power +------- + 12 +(1 row) + +-- take exp of ln(f.f1) +SELECT '' AS three, f.f1, exp(ln(f.f1)) AS exp_ln_f1 + FROM FLOAT8_TBL f + WHERE f.f1 > '0.0' ORDER BY f1; + three | f1 | exp_ln_f1 +-------+----------------------+----------------------- + | 1.2345678901234e-200 | 1.23456789012339e-200 + | 1004.3 | 1004.3 + | 1.2345678901234e+200 | 1.23456789012338e+200 +(3 rows) + +-- cube root +SELECT ||/ float8 '27' AS three; + three +------- + 3 +(1 row) + +SELECT '' AS five, f.f1, ||/f.f1 AS cbrt_f1 FROM FLOAT8_TBL f ORDER BY f1; + five | f1 | cbrt_f1 +------+----------------------+---------------------- + | -34.84 | -3.26607421344208 + | 0 | 0 + | 1.2345678901234e-200 | 2.3112042409018e-67 + | 1004.3 | 10.014312837827 + | 1.2345678901234e+200 | 4.97933859234765e+66 +(5 rows) + +SELECT '' AS five, * FROM FLOAT8_TBL ORDER BY f1; + five | f1 +------+---------------------- + | -34.84 + | 0 + | 1.2345678901234e-200 + | 1004.3 + | 1.2345678901234e+200 +(5 rows) + +UPDATE FLOAT8_TBL + SET f1 = FLOAT8_TBL.f1 * '-1' + WHERE FLOAT8_TBL.f1 > '0.0'; + +SELECT '' AS bad, f.f1 * '1e200' from FLOAT8_TBL f ORDER BY f1; +ERROR: value out of range: overflow +SELECT '' AS bad, f.f1 ^ '1e200' from FLOAT8_TBL f ORDER BY f1; +ERROR: value out of range: overflow +SELECT 0 ^ 0 + 0 ^ 1 + 0 ^ 0.0 + 0 ^ 0.5; + ?column? +---------- + 2 +(1 row) + +SELECT '' AS bad, ln(f.f1) from FLOAT8_TBL f where f.f1 = '0.0' ; +ERROR: cannot take logarithm of zero +SELECT '' AS bad, ln(f.f1) from FLOAT8_TBL f where f.f1 < '0.0'; +ERROR: cannot take logarithm of a negative number +SELECT '' AS bad, exp(f.f1) from FLOAT8_TBL f ORDER BY f1; +ERROR: value out of range: underflow +SELECT '' AS bad, f.f1 / '0.0' from FLOAT8_TBL f; +ERROR: division by zero +SELECT '' AS five, * FROM FLOAT8_TBL ORDER BY f1; + five | f1 +------+----------------------- + | -1.2345678901234e+200 + | -1004.3 + | -34.84 + | -1.2345678901234e-200 + | 0 +(5 rows) + +-- test for over- and underflow +INSERT INTO FLOAT8_TBL(f1) VALUES ('10e400'); +ERROR: "10e400" is out of range for type double precision +LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('10e400'); + ^ +INSERT INTO FLOAT8_TBL(f1) VALUES ('-10e400'); +ERROR: "-10e400" is out of range for type double precision +LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('-10e400'); + ^ +INSERT INTO FLOAT8_TBL(f1) VALUES ('10e-400'); +ERROR: "10e-400" is out of range for type double precision +LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('10e-400'); + ^ +INSERT INTO FLOAT8_TBL(f1) VALUES ('-10e-400'); +ERROR: "-10e-400" is out of range for type double precision +LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('-10e-400'); + ^ +-- maintain external table consistency across platforms +-- delete all values and reinsert well-behaved ones +DELETE FROM FLOAT8_TBL; +INSERT INTO FLOAT8_TBL(f1) VALUES ('0.0'); +INSERT INTO FLOAT8_TBL(f1) VALUES ('-34.84'); +INSERT INTO FLOAT8_TBL(f1) VALUES ('-1004.30'); +INSERT INTO FLOAT8_TBL(f1) VALUES ('-1.2345678901234e+200'); +INSERT INTO FLOAT8_TBL(f1) VALUES ('-1.2345678901234e-200'); +SELECT '' AS five, * FROM FLOAT8_TBL ORDER BY f1; + five | f1 +------+----------------------- + | -1.2345678901234e+200 + | -1004.3 + | -34.84 + | -1.2345678901234e-200 + | 0 +(5 rows) + commit fa1747011efd3841353894a0500e31411b4f4efa Author: Michael P <mic...@us...> Date: Tue Mar 8 09:56:59 2011 +0900 Fix for regression tests float4 diff --git a/src/test/regress/sql/float4.sql b/src/test/regress/sql/float4.sql index 12ad660..2b42187 100644 --- a/src/test/regress/sql/float4.sql +++ b/src/test/regress/sql/float4.sql @@ -42,8 +42,6 @@ SELECT 'Infinity'::float4 / 'Infinity'::float4; SELECT 'nan'::float4 / 'nan'::float4; SELECT 'nan'::numeric::float4; -SELECT '' AS five, * FROM FLOAT4_TBL; - SELECT '' AS five, * FROM FLOAT4_TBL ORDER BY f1; SELECT '' AS four, f.* FROM FLOAT4_TBL f WHERE f.f1 <> '1004.3' ORDER BY f1; commit 637f46cda04fee2d9d4bd78c1c9f4f07373aece5 Author: Michael P <mic...@us...> Date: Tue Mar 8 09:50:05 2011 +0900 Fix regression tests for int4 diff --git a/src/test/regress/expected/int4_1.out b/src/test/regress/expected/int4_1.out new file mode 100644 index 0000000..d733e90 --- /dev/null +++ b/src/test/regress/expected/int4_1.out @@ -0,0 +1,333 @@ +-- +-- INT4 +-- WARNING: int4 operators never check for over/underflow! +-- Some of these answers are consequently numerically incorrect. +-- +CREATE TABLE INT4_TBL(f1 int4); +INSERT INTO INT4_TBL(f1) VALUES (' 0 '); +INSERT INTO INT4_TBL(f1) VALUES ('123456 '); +INSERT INTO INT4_TBL(f1) VALUES (' -123456'); +INSERT INTO INT4_TBL(f1) VALUES ('34.5'); +ERROR: invalid input syntax for integer: "34.5" +LINE 1: INSERT INTO INT4_TBL(f1) VALUES ('34.5'); + ^ +-- largest and smallest values +INSERT INTO INT4_TBL(f1) VALUES ('2147483647'); +INSERT INTO INT4_TBL(f1) VALUES ('-2147483647'); +-- bad input values -- should give errors +INSERT INTO INT4_TBL(f1) VALUES ('1000000000000'); +ERROR: value "1000000000000" is out of range for type integer +LINE 1: INSERT INTO INT4_TBL(f1) VALUES ('1000000000000'); + ^ +INSERT INTO INT4_TBL(f1) VALUES ('asdf'); +ERROR: invalid input syntax for integer: "asdf" +LINE 1: INSERT INTO INT4_TBL(f1) VALUES ('asdf'); + ^ +INSERT INTO INT4_TBL(f1) VALUES (' '); +ERROR: invalid input syntax for integer: " " +LINE 1: INSERT INTO INT4_TBL(f1) VALUES (' '); + ^ +INSERT INTO INT4_TBL(f1) VALUES (' asdf '); +ERROR: invalid input syntax for integer: " asdf " +LINE 1: INSERT INTO INT4_TBL(f1) VALUES (' asdf '); + ^ +INSERT INTO INT4_TBL(f1) VALUES ('- 1234'); +ERROR: invalid input syntax for integer: "- 1234" +LINE 1: INSERT INTO INT4_TBL(f1) VALUES ('- 1234'); + ^ +INSERT INTO INT4_TBL(f1) VALUES ('123 5'); +ERROR: invalid input syntax for integer: "123 5" +LINE 1: INSERT INTO INT4_TBL(f1) VALUES ('123 5'); + ^ +INSERT INTO INT4_TBL(f1) VALUES (''); +ERROR: invalid input syntax for integer: "" +LINE 1: INSERT INTO INT4_TBL(f1) VALUES (''); + ^ +SELECT '' AS five, * FROM INT4_TBL ORDER BY f1; + five | f1 +------+------------- + | -2147483647 + | -123456 + | 0 + | 123456 + | 2147483647 +(5 rows) + +SELECT '' AS four, i.* FROM INT4_TBL i WHERE i.f1 <> int2 '0' ORDER BY f1; + four | f1 +------+------------- + | -2147483647 + | -123456 + | 123456 + | 2147483647 +(4 rows) + +SELECT '' AS four, i.* FROM INT4_TBL i WHERE i.f1 <> int4 '0' ORDER BY f1; + four | f1 +------+------------- + | -2147483647 + | -123456 + | 123456 + | 2147483647 +(4 rows) + +SELECT '' AS one, i.* FROM INT4_TBL i WHERE i.f1 = int2 '0'; + one | f1 +-----+---- + | 0 +(1 row) + +SELECT '' AS one, i.* FROM INT4_TBL i WHERE i.f1 = int4 '0'; + one | f1 +-----+---- + | 0 +(1 row) + +SELECT '' AS two, i.* FROM INT4_TBL i WHERE i.f1 < int2 '0' ORDER BY f1; + two | f1 +-----+------------- + | -2147483647 + | -123456 +(2 rows) + +SELECT '' AS two, i.* FROM INT4_TBL i WHERE i.f1 < int4 '0' ORDER BY f1; + two | f1 +-----+------------- + | -2147483647 + | -123456 +(2 rows) + +SELECT '' AS three, i.* FROM INT4_TBL i WHERE i.f1 <= int2 '0' ORDER BY f1; + three | f1 +-------+------------- + | -2147483647 + | -123456 + | 0 +(3 rows) + +SELECT '' AS three, i.* FROM INT4_TBL i WHERE i.f1 <= int4 '0' ORDER BY f1; + three | f1 +-------+------------- + | -2147483647 + | -123456 + | 0 +(3 rows) + +SELECT '' AS two, i.* FROM INT4_TBL i WHERE i.f1 > int2 '0' ORDER BY f1; + two | f1 +-----+------------ + | 123456 + | 2147483647 +(2 rows) + +SELECT '' AS two, i.* FROM INT4_TBL i WHERE i.f1 > int4 '0' ORDER BY f1; + two | f1 +-----+------------ + | 123456 + | 2147483647 +(2 rows) + +SELECT '' AS three, i.* FROM INT4_TBL i WHERE i.f1 >= int2 '0' ORDER BY f1; + three | f1 +-------+------------ + | 0 + | 123456 + | 2147483647 +(3 rows) + +SELECT '' AS three, i.* FROM INT4_TBL i WHERE i.f1 >= int4 '0' ORDER BY f1; + three | f1 +-------+------------ + | 0 + | 123456 + | 2147483647 +(3 rows) + +-- positive odds +SELECT '' AS one, i.* FROM INT4_TBL i WHERE (i.f1 % int2 '2') = int2 '1' ORDER BY f1; + one | f1 +-----+------------ + | 2147483647 +(1 row) + +-- any evens +SELECT '' AS three, i.* FROM INT4_TBL i WHERE (i.f1 % int4 '2') = int2 '0' ORDER BY f1; + three | f1 +-------+--------- + | -123456 + | 0 + | 123456 +(3 rows) + +SELECT '' AS five, i.f1, i.f1 * int2 '2' AS x FROM INT4_TBL i ORDER BY f1; +ERROR: integer out of range +SELECT '' AS five, i.f1, i.f1 * int2 '2' AS x FROM INT4_TBL i +WHERE abs(f1) < 1073741824 ORDER BY f1; + five | f1 | x +------+---------+--------- + | -123456 | -246912 + | 0 | 0 + | 123456 | 246912 +(3 rows) + +SELECT '' AS five, i.f1, i.f1 * int4 '2' AS x FROM INT4_TBL i ORDER BY f1; +ERROR: integer out of range +SELECT '' AS five, i.f1, i.f1 * int4 '2' AS x FROM INT4_TBL i +WHERE abs(f1) < 1073741824 ORDER BY f1; + five | f1 | x +------+---------+--------- + | -123456 | -246912 + | 0 | 0 + | 123456 | 246912 +(3 rows) + +SELECT '' AS five, i.f1, i.f1 + int2 '2' AS x FROM INT4_TBL i ORDER BY f1; +ERROR: integer out of range +SELECT '' AS five, i.f1, i.f1 + int2 '2' AS x FROM INT4_TBL i +WHERE f1 < 2147483646 ORDER BY f1; + five | f1 | x +------+-------------+------------- + | -2147483647 | -2147483645 + | -123456 | -123454 + | 0 | 2 + | 123456 | 123458 +(4 rows) + +SELECT '' AS five, i.f1, i.f1 + int4 '2' AS x FROM INT4_TBL i ORDER BY f1; +ERROR: integer out of range +SELECT '' AS five, i.f1, i.f1 + int4 '2' AS x FROM INT4_TBL i +WHERE f1 < 2147483646 ORDER BY f1; + five | f1 | x +------+-------------+------------- + | -2147483647 | -2147483645 + | -123456 | -123454 + | 0 | 2 + | 123456 | 123458 +(4 rows) + +SELECT '' AS five, i.f1, i.f1 - int2 '2' AS x FROM INT4_TBL i ORDER BY f1; +ERROR: integer out of range +SELECT '' AS five, i.f1, i.f1 - int2 '2' AS x FROM INT4_TBL i +WHERE f1 > -2147483647 ORDER BY f1; + five | f1 | x +------+------------+------------ + | -123456 | -123458 + | 0 | -2 + | 123456 | 123454 + | 2147483647 | 2147483645 +(4 rows) + +SELECT '' AS five, i.f1, i.f1 - int4 '2' AS x FROM INT4_TBL i ORDER BY f1; +ERROR: integer out of range +SELECT '' AS five, i.f1, i.f1 - int4 '2' AS x FROM INT4_TBL i +WHERE f1 > -2147483647 ORDER BY f1; + five | f1 | x +------+------------+------------ + | -123456 | -123458 + | 0 | -2 + | 123456 | 123454 + | 2147483647 | 2147483645 +(4 rows) + +SELECT '' AS five, i.f1, i.f1 / int2 '2' AS x FROM INT4_TBL i ORDER BY f1; + five | f1 | x +------+-------------+------------- + | -2147483647 | -1073741823 + | -123456 | -61728 + | 0 | 0 + | 123456 | 61728 + | 2147483647 | 1073741823 +(5 rows) + +SELECT '' AS five, i.f1, i.f1 / int4 '2' AS x FROM INT4_TBL i ORDER BY f1; + five | f1 | x +------+-------------+------------- + | -2147483647 | -1073741823 + | -123456 | -61728 + | 0 | 0 + | 123456 | 61728 + | 2147483647 | 1073741823 +(5 rows) + +-- +-- more complex expressions +-- +-- variations on unary minus parsing +SELECT -2+3 AS one; + one +----- + 1 +(1 row) + +SELECT 4-2 AS two; + two +----- + 2 +(1 row) + +SELECT 2- -1 AS three; + three +------- + 3 +(1 row) + +SELECT 2 - -2 AS four; + four +------ + 4 +(1 row) + +SELECT int2 '2' * int2 '2' = int2 '16' / int2 '4' AS true; + true +------ + t +(1 row) + +SELECT int4 '2' * int2 '2' = int2 '16' / int4 '4' AS true; + true +------ + t +(1 row) + +SELECT int2 '2' * int4 '2' = int4 '16' / int2 '4' AS true; + true +------ + t +(1 row) + +SELECT int4 '1000' < int4 '999' AS false; + false +------- + f +(1 row) + +SELECT 4! AS twenty_four; + twenty_four +------------- + 24 +(1 row) + +SELECT !!3 AS six; + six +----- + 6 +(1 row) + +SELECT 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 AS ten; + ten +----- + 10 +(1 row) + +SELECT 2 + 2 / 2 AS three; + three +------- + 3 +(1 row) + +SELECT (2 + 2) / 2 AS two; + two +----- + 2 +(1 row) + diff --git a/src/test/regress/sql/int4.sql b/src/test/regress/sql/int4.sql index 53104a0..71dd6f7 100644 --- a/src/test/regress/sql/int4.sql +++ b/src/test/regress/sql/int4.sql @@ -29,7 +29,7 @@ INSERT INTO INT4_TBL(f1) VALUES ('123 5'); INSERT INTO INT4_TBL(f1) VALUES (''); -SELECT '' AS five, * FROM INT4_TBL; +SELECT '' AS five, * FROM INT4_TBL ORDER BY f1; SELECT '' AS four, i.* FROM INT4_TBL i WHERE i.f1 <> int2 '0' ORDER BY f1; commit 405874a1f0d9b6634349bcee5165cd7e305ba81a Author: Michael P <mic...@us...> Date: Tue Mar 8 09:34:35 2011 +0900 Addition of correct output for regress test float4. The only difference between the new file and the former float4.out is that a former query of float4 test that has been modified for XC purposes by adding an ORDER BY was still in the results. diff --git a/src/test/regress/expected/float4_1.out b/src/test/regress/expected/float4_1.out new file mode 100644 index 0000000..432d159 --- /dev/null +++ b/src/test/regress/expected/float4_1.out @@ -0,0 +1,269 @@ +-- +-- FLOAT4 +-- +CREATE TABLE FLOAT4_TBL (f1 float4); +INSERT INTO FLOAT4_TBL(f1) VALUES (' 0.0'); +INSERT INTO FLOAT4_TBL(f1) VALUES ('1004.30 '); +INSERT INTO FLOAT4_TBL(f1) VALUES (' -34.84 '); +INSERT INTO FLOAT4_TBL(f1) VALUES ('1.2345678901234e+20'); +INSERT INTO FLOAT4_TBL(f1) VALUES ('1.2345678901234e-20'); +-- test for over and under flow +INSERT INTO FLOAT4_TBL(f1) VALUES ('10e70'); +ERROR: value out of range: overflow +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('10e70'); + ^ +INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e70'); +ERROR: value out of range: overflow +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e70'); + ^ +INSERT INTO FLOAT4_TBL(f1) VALUES ('10e-70'); +ERROR: value out of range: underflow +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('10e-70'); + ^ +INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e-70'); +ERROR: value out of range: underflow +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e-70'); + ^ +-- bad input +INSERT INTO FLOAT4_TBL(f1) VALUES (''); +ERROR: invalid input syntax for type real: "" +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES (''); + ^ +INSERT INTO FLOAT4_TBL(f1) VALUES (' '); +ERROR: invalid input syntax for type real: " " +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES (' '); + ^ +INSERT INTO FLOAT4_TBL(f1) VALUES ('xyz'); +ERROR: invalid input syntax for type real: "xyz" +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('xyz'); + ^ +INSERT INTO FLOAT4_TBL(f1) VALUES ('5.0.0'); +ERROR: invalid input syntax for type real: "5.0.0" +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('5.0.0'); + ^ +INSERT INTO FLOAT4_TBL(f1) VALUES ('5 . 0'); +ERROR: invalid input syntax for type real: "5 . 0" +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('5 . 0'); + ^ +INSERT INTO FLOAT4_TBL(f1) VALUES ('5. 0'); +ERROR: invalid input syntax for type real: "5. 0" +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('5. 0'); + ^ +INSERT INTO FLOAT4_TBL(f1) VALUES (' - 3.0'); +ERROR: invalid input syntax for type real: " - 3.0" +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES (' - 3.0'); + ^ +INSERT INTO FLOAT4_TBL(f1) VALUES ('123 5'); +ERROR: invalid input syntax for type real: "123 5" +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('123 5'); + ^ +-- special inputs +SELECT 'NaN'::float4; + float4 +-------- + NaN +(1 row) + +SELECT 'nan'::float4; + float4 +-------- + NaN +(1 row) + +SELECT ' NAN '::float4; + float4 +-------- + NaN +(1 row) + +SELECT 'infinity'::float4; + float4 +---------- + Infinity +(1 row) + +SELECT ' -INFINiTY '::float4; + float4 +----------- + -Infinity +(1 row) + +-- bad special inputs +SELECT 'N A N'::float4; +ERROR: invalid input syntax for type real: "N A N" +LINE 1: SELECT 'N A N'::float4; + ^ +SELECT 'NaN x'::float4; +ERROR: invalid input syntax for type real: "NaN x" +LINE 1: SELECT 'NaN x'::float4; + ^ +SELECT ' INFINITY x'::float4; +ERROR: invalid input syntax for type real: " INFINITY x" +LINE 1: SELECT ' INFINITY x'::float4; + ^ +SELECT 'Infinity'::float4 + 100.0; + ?column? +---------- + Infinity +(1 row) + +SELECT 'Infinity'::float4 / 'Infinity'::float4; + ?column? +---------- + NaN +(1 row) + +SELECT 'nan'::float4 / 'nan'::float4; + ?column? +---------- + NaN +(1 row) + +SELECT 'nan'::numeric::float4; + float4 +-------- + NaN +(1 row) + +SELECT '' AS five, * FROM FLOAT4_TBL; + five | f1 +------+------------- + | 1004.3 + | 1.23457e+20 + | 0 + | -34.84 + | 1.23457e-20 +(5 rows) + +SELECT '' AS five, * FROM FLOAT4_TBL ORDER BY f1; + five | f1 +------+------------- + | -34.84 + | 0 + | 1.23457e-20 + | 1004.3 + | 1.23457e+20 +(5 rows) + +SELECT '' AS four, f.* FROM FLOAT4_TBL f WHERE f.f1 <> '1004.3' ORDER BY f1; + four | f1 +------+------------- + | -34.84 + | 0 + | 1.23457e-20 + | 1.23457e+20 +(4 rows) + +SELECT '' AS one, f.* FROM FLOAT4_TBL f WHERE f.f1 = '1004.3'; + one | f1 +-----+-------- + | 1004.3 +(1 row) + +SELECT '' AS three, f.* FROM FLOAT4_TBL f WHERE '1004.3' > f.f1 ORDER BY f1; + three | f1 +-------+------------- + | -34.84 + | 0 + | 1.23457e-20 +(3 rows) + +SELECT '' AS three, f.* FROM FLOAT4_TBL f WHERE f.f1 < '1004.3' ORDER BY f1; + three | f1 +-------+------------- + | -34.84 + | 0 + | 1.23457e-20 +(3 rows) + +SELECT '' AS four, f.* FROM FLOAT4_TBL f WHERE '1004.3' >= f.f1 ORDER BY f1; + four | f1 +------+------------- + | -34.84 + | 0 + | 1.23457e-20 + | 1004.3 +(4 rows) + +SELECT '' AS four, f.* FROM FLOAT4_TBL f WHERE f.f1 <= '1004.3' ORDER BY f1; + four | f1 +------+------------- + | -34.84 + | 0 + | 1.23457e-20 + | 1004.3 +(4 rows) + +SELECT '' AS three, f.f1, f.f1 * '-10' AS x FROM FLOAT4_TBL f + WHERE f.f1 > '0.0' ORDER BY f1; + three | f1 | x +-------+-------------+-------------- + | 1.23457e-20 | -1.23457e-19 + | 1004.3 | -10043 + | 1.23457e+20 | -1.23457e+21 +(3 rows) + +SELECT '' AS three, f.f1, f.f1 + '-10' AS x FROM FLOAT4_TBL f + WHERE f.f1 > '0.0' ORDER BY f1; + three | f1 | x +-------+-------------+------------- + | 1.23457e-20 | -10 + | 1004.3 | 994.3 + | 1.23457e+20 | 1.23457e+20 +(3 rows) + +SELECT '' AS three, f.f1, f.f1 / '-10' AS x FROM FLOAT4_TBL f + WHERE f.f1 > '0.0' ORDER BY f1; + three | f1 | x +-------+-------------+-------------- + | 1.23457e-20 | -1.23457e-21 + | 1004.3 | -100.43 + | 1.23457e+20 | -1.23457e+19 +(3 rows) + +SELECT '' AS three, f.f1, f.f1 - '-10' AS x FROM FLOAT4_TBL f + WHERE f.f1 > '0.0' ORDER BY f1; + three | f1 | x +-------+-------------+------------- + | 1.23457e-20 | 10 + | 1004.3 | 1014.3 + | 1.23457e+20 | 1.23457e+20 +(3 rows) + +-- test divide by zero +SELECT '' AS bad, f.f1 / '0.0' from FLOAT4_TBL f; +ERROR: division by zero +SELECT '' AS five, * FROM FLOAT4_TBL ORDER BY f1; + five | f1 +------+------------- + | -34.84 + | 0 + | 1.23457e-20 + | 1004.3 + | 1.23457e+20 +(5 rows) + +-- test the unary float4abs operator +SELECT '' AS five, f.f1, @f.f1 AS abs_f1 FROM FLOAT4_TBL f ORDER BY f1; + five | f1 | abs_f1 +------+-------------+------------- + | -34.84 | 34.84 + | 0 | 0 + | 1.23457e-20 | 1.23457e-20 + | 1004.3 | 1004.3 + | 1.23457e+20 | 1.23457e+20 +(5 rows) + +UPDATE FLOAT4_TBL + SET f1 = FLOAT4_TBL.f1 * '-1' + WHERE FLOAT4_TBL.f1 > '0.0'; +SELECT '' AS five, * FROM FLOAT4_TBL ORDER BY f1; + five | f1 +------+-------------- + | -1.23457e+20 + | -1004.3 + | -34.84 + | -1.23457e-20 + | 0 +(5 rows) + ----------------------------------------------------------------------- Summary of changes: .../regress/expected/{float4.out => float4_1.out} | 10 ++++++++++ .../regress/expected/{float8.out => float8_1.out} | 9 --------- src/test/regress/expected/{int4.out => int4_1.out} | 6 +++--- src/test/regress/sql/float4.sql | 2 -- src/test/regress/sql/int4.sql | 2 +- 5 files changed, 14 insertions(+), 15 deletions(-) copy src/test/regress/expected/{float4.out => float4_1.out} (97%) copy src/test/regress/expected/{float8.out => float8_1.out} (98%) copy src/test/regress/expected/{int4.out => int4_1.out} (99%) hooks/post-receive -- Postgres-XC |
From: Michael P. <mic...@us...> - 2011-03-08 00:07:27
|
Project "Postgres-XC". The branch, merge_postgres_9_0_3 has been updated via 94b048a92d21bd787294cabd3a144cf69badd419 (commit) from acef8b6dde2c598a727e2eaf2cca5b66db0b592f (commit) - Log ----------------------------------------------------------------- commit 94b048a92d21bd787294cabd3a144cf69badd419 Author: Michael P <mic...@us...> Date: Tue Mar 8 09:06:05 2011 +0900 Change back table defalt type to distributed. This is done for performance purposes. diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c index ea20e66..4737067 100644 --- a/src/backend/catalog/heap.c +++ b/src/backend/catalog/heap.c @@ -879,9 +879,28 @@ AddRelationDistribution (Oid relid, { /* * If no distribution was specified, and we have not chosen - * distribute table by replication. + * one based on primary key or foreign key, use first column with + * a supported data type. */ - locatortype = LOCATOR_TYPE_REPLICATED; + Form_pg_attribute attr; + int i; + + locatortype = LOCATOR_TYPE_HASH; + + for (i = 0; i < descriptor->natts; i++) + { + attr = descriptor->attrs[i]; + if (IsHashDistributable(attr->atttypid)) + { + /* distribute on this column */ + attnum = i + 1; + break; + } + } + + /* If we did not find a usable type, fall back to round robin */ + if (attnum == 0) + locatortype = LOCATOR_TYPE_RROBIN; } } else ----------------------------------------------------------------------- Summary of changes: src/backend/catalog/heap.c | 23 +++++++++++++++++++++-- 1 files changed, 21 insertions(+), 2 deletions(-) hooks/post-receive -- Postgres-XC |
From: Abbas B. <ga...@us...> - 2011-03-07 17:11:47
|
Project "Postgres-XC". The branch, merge_postgres_9_0_3 has been updated via acef8b6dde2c598a727e2eaf2cca5b66db0b592f (commit) from df5e5dc0ea901385e7be446ca6d9ebec7e58cb12 (commit) - Log ----------------------------------------------------------------- commit acef8b6dde2c598a727e2eaf2cca5b66db0b592f Author: Abbas <abb...@en...> Date: Mon Mar 7 22:11:14 2011 +0500 To fix a server crash in aggregates as reported in ID 3125430 diff --git a/src/backend/pgxc/pool/execRemote.c b/src/backend/pgxc/pool/execRemote.c index 8a553d4..af2f80c 100644 --- a/src/backend/pgxc/pool/execRemote.c +++ b/src/backend/pgxc/pool/execRemote.c @@ -3550,7 +3550,6 @@ ExecRemoteQuery(RemoteQueryState *node) TupleTableSlot *scanslot = node->ss.ss_ScanTupleSlot; bool have_tuple = false; - if (!node->query_Done) { /* @@ -3677,7 +3676,22 @@ handle_results: */ if (node->simple_aggregates) { + int i, natts; + finish_simple_aggregates(node, resultslot); + + /* + * PGXCTODO :In fact exec_simple_aggregates & finish_simple_aggregates + * should not be resulting in a TupleTableSlot with NULL pointer in + * per attribute value, but for now to fix the crash this check would do + */ + natts = resultslot->tts_tupleDescriptor->natts; + for (i = 0; i < natts; ++i) + { + if (resultslot->tts_values[i] == NULL) + return NULL; + } + if (!TupIsNull(resultslot)) have_tuple = true; } ----------------------------------------------------------------------- Summary of changes: src/backend/pgxc/pool/execRemote.c | 16 +++++++++++++++- 1 files changed, 15 insertions(+), 1 deletions(-) hooks/post-receive -- Postgres-XC |
From: Abbas B. <ga...@us...> - 2011-03-07 15:01:22
|
Project "Postgres-XC". The branch, merge_postgres_9_0_3 has been updated via df5e5dc0ea901385e7be446ca6d9ebec7e58cb12 (commit) from 93781551114d9fe5826459caaf8aa03d63bac001 (commit) - Log ----------------------------------------------------------------- commit df5e5dc0ea901385e7be446ca6d9ebec7e58cb12 Author: Abbas <abb...@en...> Date: Mon Mar 7 20:00:38 2011 +0500 To avoid a crash caused by an insert select statement in vacuum.sql diff --git a/src/backend/pgxc/plan/planner.c b/src/backend/pgxc/plan/planner.c index d3aaaa4..378d9b4 100644 --- a/src/backend/pgxc/plan/planner.c +++ b/src/backend/pgxc/plan/planner.c @@ -603,9 +603,14 @@ get_plan_nodes_insert(PlannerInfo *root, RemoteQuery *step) (errcode(ERRCODE_STATEMENT_TOO_COMPLEX), (errmsg("Could not find relation for oid = %d", rte->relid)))); - if ((strcmp(col_base->colname, source_rel_loc_info->partAttrName) == 0) && - ( (source_rel_loc_info->locatorType == LOCATOR_TYPE_HASH) || - (source_rel_loc_info->locatorType == LOCATOR_TYPE_MODULO) )) + if ( col_base->colname != NULL && + source_rel_loc_info->partAttrName != NULL && + strcmp(col_base->colname, source_rel_loc_info->partAttrName) == 0 && + ( + source_rel_loc_info->locatorType == LOCATOR_TYPE_HASH || + source_rel_loc_info->locatorType == LOCATOR_TYPE_MODULO + ) + ) { /* * Partition columns match, we have a "single-step INSERT SELECT". ----------------------------------------------------------------------- Summary of changes: src/backend/pgxc/plan/planner.c | 11 ++++++++--- 1 files changed, 8 insertions(+), 3 deletions(-) hooks/post-receive -- Postgres-XC |
From: Abbas B. <ga...@us...> - 2011-03-07 11:25:45
|
Project "Postgres-XC". The branch, merge_postgres_9_0_3 has been updated via 93781551114d9fe5826459caaf8aa03d63bac001 (commit) from fa427af446193741501c72e6e026ac493a125137 (commit) - Log ----------------------------------------------------------------- commit 93781551114d9fe5826459caaf8aa03d63bac001 Author: Abbas <abb...@en...> Date: Mon Mar 7 15:42:27 2011 +0500 Block creation of concurrent indices diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c index bf122db..2bb2200 100644 --- a/src/backend/tcop/utility.c +++ b/src/backend/tcop/utility.c @@ -1057,6 +1057,16 @@ standard_ProcessUtility(Node *parsetree, { IndexStmt *stmt = (IndexStmt *) parsetree; +#ifdef PGXC + if (stmt->concurrent) + { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("PGXC does not support concurrent indexes"), + errdetail("The feature is not currently supported"))); + } +#endif + if (stmt->concurrent) PreventTransactionChain(isTopLevel, "CREATE INDEX CONCURRENTLY"); @@ -1087,7 +1097,7 @@ standard_ProcessUtility(Node *parsetree, false, /* quiet */ stmt->concurrent); /* concurrent */ #ifdef PGXC - if (IS_PGXC_COORDINATOR && !stmt->isconstraint) + if (IS_PGXC_COORDINATOR && !stmt->isconstraint && !IsConnFromCoord()) ExecUtilityStmtOnNodes(queryString, NULL, stmt->concurrent, EXEC_ON_ALL_NODES); #endif ----------------------------------------------------------------------- Summary of changes: src/backend/tcop/utility.c | 12 +++++++++++- 1 files changed, 11 insertions(+), 1 deletions(-) hooks/post-receive -- Postgres-XC |
From: Michael P. <mic...@us...> - 2011-03-07 07:15:58
|
Project "Postgres-XC". The branch, merge_postgres_9_0_3 has been updated via fa427af446193741501c72e6e026ac493a125137 (commit) via a6a956e4d1303161750161c0629adb1b4514d345 (commit) via 33342d8d44bb2c36239712d9e7318df0776ce018 (commit) from 9ecde28ad6b6ed00b7588928480bf0bd8a741421 (commit) - Log ----------------------------------------------------------------- commit fa427af446193741501c72e6e026ac493a125137 Author: Michael P <mic...@us...> Date: Mon Mar 7 16:09:54 2011 +0900 CREATE TABLE default distribution to REPLICATED Change distribution type of table to REPLICATED when no distribution is specified like with: CREATE TABLE aa (int a); diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c index 59231a8..ea20e66 100644 --- a/src/backend/catalog/heap.c +++ b/src/backend/catalog/heap.c @@ -874,34 +874,17 @@ AddRelationDistribution (Oid relid, errmsg("Invalid parent table distribution type"))); break; } - } else + } + else { /* * If no distribution was specified, and we have not chosen - * one based on primary key or foreign key, use first column with - * a supported data type. + * distribute table by replication. */ - Form_pg_attribute attr; - int i; - - locatortype = LOCATOR_TYPE_HASH; - - for (i = 0; i < descriptor->natts; i++) - { - attr = descriptor->attrs[i]; - if (IsHashDistributable(attr->atttypid)) - { - /* distribute on this column */ - attnum = i + 1; - break; - } - } - - /* If we did not find a usable type, fall back to round robin */ - if (attnum == 0) - locatortype = LOCATOR_TYPE_RROBIN; + locatortype = LOCATOR_TYPE_REPLICATED; } - } else + } + else { /* * User specified distribution type commit a6a956e4d1303161750161c0629adb1b4514d345 Author: Michael P <mic...@us...> Date: Mon Mar 7 16:00:16 2011 +0900 Block PREPARE and EXECUTE for the time being It has been noticed that PREPARE crashed the server in some pg_regress test cases. diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c index 1240614..bf122db 100644 --- a/src/backend/tcop/utility.c +++ b/src/backend/tcop/utility.c @@ -804,11 +804,21 @@ standard_ProcessUtility(Node *parsetree, break; case T_PrepareStmt: +#ifdef PGXC + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("PREPARE is not supported"))); +#endif CheckRestrictedOperation("PREPARE"); PrepareQuery((PrepareStmt *) parsetree, queryString); break; case T_ExecuteStmt: +#ifdef PGXC + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("EXECUTE is not supported"))); +#endif ExecuteQuery((ExecuteStmt *) parsetree, queryString, params, dest, completionTag); break; commit 33342d8d44bb2c36239712d9e7318df0776ce018 Author: Michael P <mic...@us...> Date: Mon Mar 7 15:58:13 2011 +0900 Fix for CREATE INDEX CONCURRENTLY When an index is created concurrently, Coordinator tried to take a snapshot that had already been deleted. We need to take a new one from Coordinator. Patch written by Abbas Butt diff --git a/src/backend/utils/time/snapmgr.c b/src/backend/utils/time/snapmgr.c index a735fc3..8140321 100644 --- a/src/backend/utils/time/snapmgr.c +++ b/src/backend/utils/time/snapmgr.c @@ -34,7 +34,9 @@ #include "utils/resowner.h" #include "utils/snapmgr.h" #include "utils/tqual.h" - +#ifdef PGXC +#include "pgxc/pgxc.h" +#endif /* * CurrentSnapshot points to the only snapshot taken in a serializable @@ -344,6 +346,14 @@ PopActiveSnapshot(void) Snapshot GetActiveSnapshot(void) { +#ifdef PGXC + /* + * Check if topmost snapshot is null or not, + * if it is, a new one will be taken from GTM. + */ + if (!ActiveSnapshot && IS_PGXC_COORDINATOR && !IsConnFromCoord()) + return NULL; +#endif Assert(ActiveSnapshot != NULL); return ActiveSnapshot->as_snap; ----------------------------------------------------------------------- Summary of changes: src/backend/catalog/heap.c | 29 ++++++----------------------- src/backend/tcop/utility.c | 10 ++++++++++ src/backend/utils/time/snapmgr.c | 12 +++++++++++- 3 files changed, 27 insertions(+), 24 deletions(-) hooks/post-receive -- Postgres-XC |
From: Michael P. <mic...@us...> - 2011-03-07 00:31:45
|
Project "Postgres-XC". The branch, merge_postgres_9_0_3 has been updated via 9ecde28ad6b6ed00b7588928480bf0bd8a741421 (commit) via fbb0afd7b8858b13dc1f1f212742ffafcf4b7d8f (commit) from 062ecec9ab0e36f17f31624307470b8662dcfd80 (commit) - Log ----------------------------------------------------------------- commit 9ecde28ad6b6ed00b7588928480bf0bd8a741421 Merge: 062ecec fbb0afd Author: Michael P <mic...@us...> Date: Mon Mar 7 09:31:27 2011 +0900 Merge branch 'master' into merge_postgres_9_0_3 ----------------------------------------------------------------------- Summary of changes: src/backend/pgxc/pool/execRemote.c | 37 +++++++++++------------------------ 1 files changed, 12 insertions(+), 25 deletions(-) hooks/post-receive -- Postgres-XC |
From: Michael P. <mic...@us...> - 2011-03-07 00:30:18
|
Project "Postgres-XC". The branch, ha_support has been updated via 6ee7a8fb4ca7ca60504b443da95c0c3314b1c88d (commit) via fbb0afd7b8858b13dc1f1f212742ffafcf4b7d8f (commit) from 2876389f146b01bba0654c63e4d5eb635ffa8d8b (commit) - Log ----------------------------------------------------------------- commit 6ee7a8fb4ca7ca60504b443da95c0c3314b1c88d Merge: 2876389 fbb0afd Author: Michael P <mic...@us...> Date: Mon Mar 7 09:30:00 2011 +0900 Merge branch 'master' into ha_support ----------------------------------------------------------------------- Summary of changes: src/backend/pgxc/pool/execRemote.c | 37 +++++++++++------------------------ 1 files changed, 12 insertions(+), 25 deletions(-) hooks/post-receive -- Postgres-XC |
From: Michael P. <mic...@us...> - 2011-03-07 00:20:59
|
Project "Postgres-XC". The branch, master has been updated via fbb0afd7b8858b13dc1f1f212742ffafcf4b7d8f (commit) from 2e8115a86a7660fbb43a1cae72b8e406c7f82054 (commit) - Log ----------------------------------------------------------------- commit fbb0afd7b8858b13dc1f1f212742ffafcf4b7d8f Author: Michael P <mic...@us...> Date: Mon Mar 7 09:17:21 2011 +0900 Improve error handling when launching DDL or utilities This commit corrects a problem with error handling of ExecRemoteUtility in execRemote.c. When an error occurred on a Datanode during a DDL or utility process, connections were sent back to pooler when checking error message in remote state. This assumes that all the connections are clean. However, Coordinator connections whose messages have not been treated were also sent back to pooler. This could have resulted in data inconsistency of dirty coordinator-coordinator connections. Patch written by Benny Wang. diff --git a/src/backend/pgxc/pool/execRemote.c b/src/backend/pgxc/pool/execRemote.c index dc1d68c..711cd1f 100644 --- a/src/backend/pgxc/pool/execRemote.c +++ b/src/backend/pgxc/pool/execRemote.c @@ -4242,19 +4242,6 @@ ExecRemoteUtility(RemoteQuery *node) } } } - - /* - * We have processed all responses from the data nodes and if we have - * error message pending we can report it. All connections should be in - * consistent state now and can be released to the pool after rollback. - */ - if (remotestate->errorMessage) - { - char *code = remotestate->errorCode; - ereport(ERROR, - (errcode(MAKE_SQLSTATE(code[0], code[1], code[2], code[3], code[4])), - errmsg("%s", remotestate->errorMessage))); - } } /* Make the same for Coordinators */ @@ -4295,18 +4282,18 @@ ExecRemoteUtility(RemoteQuery *node) } } } - /* - * We have processed all responses from the data nodes and if we have - * error message pending we can report it. All connections should be in - * consistent state now and can be released to the pool after rollback. - */ - if (remotestate->errorMessage) - { - char *code = remotestate->errorCode; - ereport(ERROR, - (errcode(MAKE_SQLSTATE(code[0], code[1], code[2], code[3], code[4])), - errmsg("%s", remotestate->errorMessage))); - } + } + /* + * We have processed all responses from nodes and if we have + * error message pending we can report it. All connections should be in + * consistent state now and so they can be released to the pool after ROLLBACK. + */ + if (remotestate->errorMessage) + { + char *code = remotestate->errorCode; + ereport(ERROR, + (errcode(MAKE_SQLSTATE(code[0], code[1], code[2], code[3], code[4])), + errmsg("%s", remotestate->errorMessage))); } } ----------------------------------------------------------------------- Summary of changes: src/backend/pgxc/pool/execRemote.c | 37 +++++++++++------------------------ 1 files changed, 12 insertions(+), 25 deletions(-) hooks/post-receive -- Postgres-XC |
From: Koichi S. <koi...@us...> - 2011-03-04 09:08:22
|
Project "Postgres-XC". The branch, ha_support has been updated via 2876389f146b01bba0654c63e4d5eb635ffa8d8b (commit) from 481225ce8dc621e258c872fed33f85fcbc4cbf31 (commit) - Log ----------------------------------------------------------------- commit 2876389f146b01bba0654c63e4d5eb635ffa8d8b Author: Koichi Suzuki <koi...@gm...> Date: Fri Mar 4 18:08:24 2011 +0900 This is to fix the bug 3199366, to remove shared memory when xcm_initmember finds fatal configuration error to continue. diff --git a/src/pgxc/xcm/xcm_initmember.c b/src/pgxc/xcm/xcm_initmember.c index 2e71542..ffa05f1 100644 --- a/src/pgxc/xcm/xcm_initmember.c +++ b/src/pgxc/xcm/xcm_initmember.c @@ -505,11 +505,6 @@ int main(int argc, char *argv[]) dump_datanodes(n_datanode, datanode); #endif /* DEBUG_INITMEMBER */ } - /* - * Dump all the info if specified - */ - - /* ... TBS ... */ /* * Calculate the total size of shared memory @@ -607,7 +602,19 @@ int main(int argc, char *argv[]) rv = check_components(shm_top); if (rv != 0) { - xcm_destroy_shm(); /* Shmem is detatched here */ + char shmid_name[MAXPATH]; + char shmad_name[MAXPATH]; + struct shmid_ds buf; + /* + * Because shared memroy has been already attatched, we should not use + * xcm_destroy_shm(); + */ + rv = shmdt(shm_top); + shmctl(shm_id, IPC_RMID, &buf); + snprintf(shmid_name, MAXPATH, "%s/%s", get_xcmhome(), "shmid"); + unlink(shmid_name); + snprintf(shmad_name, MAXPATH, "%s/%s", get_xcmhome(), "shmad"); + unlink(shmad_name); exit(1); } shm_top->size = totalsize; ----------------------------------------------------------------------- Summary of changes: src/pgxc/xcm/xcm_initmember.c | 19 +++++++++++++------ 1 files changed, 13 insertions(+), 6 deletions(-) hooks/post-receive -- Postgres-XC |
From: Michael P. <mic...@us...> - 2011-03-04 07:19:11
|
Project "Postgres-XC". The branch, merge_postgres_9_0_3 has been updated via 062ecec9ab0e36f17f31624307470b8662dcfd80 (commit) via 2e8115a86a7660fbb43a1cae72b8e406c7f82054 (commit) from 7eb3c73d3750b900c2a8d1ce446c97f2db425ca1 (commit) - Log ----------------------------------------------------------------- commit 062ecec9ab0e36f17f31624307470b8662dcfd80 Merge: 7eb3c73 2e8115a Author: Michael P <mic...@us...> Date: Fri Mar 4 16:18:54 2011 +0900 Merge branch 'master' into merge_postgres_9_0_3 ----------------------------------------------------------------------- Summary of changes: src/gtm/common/elog.c | 5 ++++- src/gtm/main/main.c | 6 ++++++ src/gtm/proxy/proxy_main.c | 5 +++++ src/include/gtm/gtm.h | 2 ++ src/include/gtm/gtm_proxy.h | 1 + 5 files changed, 18 insertions(+), 1 deletions(-) hooks/post-receive -- Postgres-XC |
From: Michael P. <mic...@us...> - 2011-03-04 07:18:31
|
Project "Postgres-XC". The branch, ha_support has been updated via 481225ce8dc621e258c872fed33f85fcbc4cbf31 (commit) via 2e8115a86a7660fbb43a1cae72b8e406c7f82054 (commit) from 29ea460de8f7e66093c7b38cd49e238aed2ba129 (commit) - Log ----------------------------------------------------------------- commit 481225ce8dc621e258c872fed33f85fcbc4cbf31 Merge: 29ea460 2e8115a Author: Michael P <mic...@us...> Date: Fri Mar 4 16:17:57 2011 +0900 Merge branch 'master' into ha_support ----------------------------------------------------------------------- Summary of changes: src/gtm/common/elog.c | 5 ++++- src/gtm/main/main.c | 6 ++++++ src/gtm/proxy/proxy_main.c | 5 +++++ src/include/gtm/gtm.h | 2 ++ src/include/gtm/gtm_proxy.h | 1 + 5 files changed, 18 insertions(+), 1 deletions(-) hooks/post-receive -- Postgres-XC |
From: Michael P. <mic...@us...> - 2011-03-04 07:16:53
|
Project "Postgres-XC". The branch, master has been updated via 2e8115a86a7660fbb43a1cae72b8e406c7f82054 (commit) from 3ba29c3723ed6a31b0faa8fea0b9a7e57ca0c25d (commit) - Log ----------------------------------------------------------------- commit 2e8115a86a7660fbb43a1cae72b8e406c7f82054 Author: Michael P <mic...@us...> Date: Fri Mar 4 16:13:56 2011 +0900 Fix for bug 3199029 GTM/GTM-proxy FATAL error handling Fix to make the main thread of GTM to return a non-null value when in error state. Patch by Pavan Deolasee diff --git a/src/gtm/common/elog.c b/src/gtm/common/elog.c index cc6a9a5..3ca6355 100644 --- a/src/gtm/common/elog.c +++ b/src/gtm/common/elog.c @@ -355,7 +355,10 @@ errfinish(int dummy,...) * FATAL termination. The postmaster may or may not consider this * worthy of panic, depending on which subprocess returns it. */ - pthread_exit(NULL); + if (IsMainThread()) + exit(1); + else + pthread_exit(NULL); } if (elevel >= PANIC) diff --git a/src/gtm/main/main.c b/src/gtm/main/main.c index ea6d5d2..80f35b3 100644 --- a/src/gtm/main/main.c +++ b/src/gtm/main/main.c @@ -55,6 +55,8 @@ int GTMPortNumber; char GTMControlFile[GTM_MAX_PATH]; char *GTMDataDir; +GTM_ThreadID TopMostThreadID; + /* The socket(s) we're listening to. */ #define MAXLISTEN 64 static int ListenSocket[MAXLISTEN]; @@ -113,6 +115,7 @@ MainThreadInit() fprintf(stderr, "malloc failed: %d", errno); fflush(stdout); fflush(stderr); + exit(1); } if (SetMyThreadInfo(thrinfo)) @@ -120,8 +123,11 @@ MainThreadInit() fprintf(stderr, "SetMyThreadInfo failed: %d", errno); fflush(stdout); fflush(stderr); + exit(1); } + TopMostThreadID = pthread_self(); + return thrinfo; } diff --git a/src/gtm/proxy/proxy_main.c b/src/gtm/proxy/proxy_main.c index d892680..f1e8553 100644 --- a/src/gtm/proxy/proxy_main.c +++ b/src/gtm/proxy/proxy_main.c @@ -61,6 +61,7 @@ char *GTMServerHost; int GTMServerPortNumber; GTM_PGXCNodeId GTMProxyID = 0; +GTM_ThreadID TopMostThreadID; /* The socket(s) we're listening to. */ #define MAXLISTEN 64 @@ -145,6 +146,7 @@ MainThreadInit() fprintf(stderr, "malloc failed: %d", errno); fflush(stdout); fflush(stderr); + exit(1); } if (SetMyThreadInfo(thrinfo)) @@ -152,8 +154,11 @@ MainThreadInit() fprintf(stderr, "SetMyThreadInfo failed: %d", errno); fflush(stdout); fflush(stderr); + exit(1); } + TopMostThreadID = pthread_self(); + return thrinfo; } diff --git a/src/include/gtm/gtm.h b/src/include/gtm/gtm.h index 5041b0e..91eff99 100644 --- a/src/include/gtm/gtm.h +++ b/src/include/gtm/gtm.h @@ -93,6 +93,7 @@ GTM_ThreadInfo * GTM_GetThreadInfo(GTM_ThreadID thrid); */ extern pthread_key_t threadinfo_key; extern MemoryContext TopMostMemoryContext; +extern GTM_ThreadID TopMostThreadID; #define SetMyThreadInfo(thrinfo) pthread_setspecific(threadinfo_key, (thrinfo)) #define GetMyThreadInfo ((GTM_ThreadInfo *)pthread_getspecific(threadinfo_key)) @@ -113,6 +114,7 @@ extern MemoryContext TopMostMemoryContext; GetMyThreadInfo->thr_conn->con_port : \ NULL) #define MyThreadID (GetMyThreadInfo->thr_id) +#define IsMainThread() (GetMyThreadInfo->thr_id == TopMostThreadID) #define GTM_CachedTransInfo (GetMyThreadInfo->thr_cached_txninfo) #define GTM_HaveFreeCachedTransInfo() (list_length(GTM_CachedTransInfo)) diff --git a/src/include/gtm/gtm_proxy.h b/src/include/gtm/gtm_proxy.h index 9b721e1..0031d6e 100644 --- a/src/include/gtm/gtm_proxy.h +++ b/src/include/gtm/gtm_proxy.h @@ -200,6 +200,7 @@ typedef struct GTMProxy_CommandInfo extern pthread_key_t threadinfo_key; extern MemoryContext TopMostMemoryContext; extern char *GTMLogFile; +extern GTM_ThreadID TopMostThreadID; #define SetMyThreadInfo(thrinfo) pthread_setspecific(threadinfo_key, (thrinfo)) #define GetMyThreadInfo ((GTMProxy_ThreadInfo *)pthread_getspecific(threadinfo_key)) ----------------------------------------------------------------------- Summary of changes: src/gtm/common/elog.c | 5 ++++- src/gtm/main/main.c | 6 ++++++ src/gtm/proxy/proxy_main.c | 5 +++++ src/include/gtm/gtm.h | 2 ++ src/include/gtm/gtm_proxy.h | 1 + 5 files changed, 18 insertions(+), 1 deletions(-) hooks/post-receive -- Postgres-XC |
From: Michael P. <mic...@us...> - 2011-03-02 08:17:51
|
Project "Postgres-XC". The branch, merge_postgres_9_0_3 has been created at 3ba29c3723ed6a31b0faa8fea0b9a7e57ca0c25d (commit) - Log ----------------------------------------------------------------- ----------------------------------------------------------------------- hooks/post-receive -- Postgres-XC |
From: Koichi S. <koi...@us...> - 2011-03-02 03:58:48
|
Project "Postgres-XC". The branch, ha_support has been updated via 29ea460de8f7e66093c7b38cd49e238aed2ba129 (commit) via b0075708ecefd2a5aadbc2ed6d52ea7172ce1a34 (commit) from 0cb897c8fb3c0948ab75951f33d8d105d25d0912 (commit) - Log ----------------------------------------------------------------- commit 29ea460de8f7e66093c7b38cd49e238aed2ba129 Merge: b007570 0cb897c Author: Koichi Suzuki <koi...@gm...> Date: Wed Mar 2 13:00:49 2011 +0900 Merge branch 'ha_support' of ssh://postgres-xc.git.sourceforge.net/gitroot/postgres-xc/postgres-xc into ha_support commit b0075708ecefd2a5aadbc2ed6d52ea7172ce1a34 Author: Koichi Suzuki <koi...@gm...> Date: Wed Mar 2 12:53:16 2011 +0900 This commit is to fix two issues. 1. Changed assigment of xcm_xxxx() return value so that caller can check error by testing if the returned value is negative. 2. Changed the use of random number to determine the delay time when spin lock was not acquired. Now this is based upon nrand48. The seed and internal status is stored in a separate area so it does not influence coordinator/datanode. diff --git a/src/include/pgxc/xcm/node_membership_struct.h b/src/include/pgxc/xcm/node_membership_struct.h index a39ee3d..baefa00 100644 --- a/src/include/pgxc/xcm/node_membership_struct.h +++ b/src/include/pgxc/xcm/node_membership_struct.h @@ -416,14 +416,14 @@ typedef struct xcm_fault_comps { * of such component, these calls will return XCM_ERR_STATUS. */ #define XCM_OK 0 /* Successful */ +#define XCM_OTHER_UPDATE 1 /* Shared memory updated for some reason */ #define XCM_ERR_NOT_INITIALIZED -1 /* Shared memory not built yet */ #define XCM_ERR_OUT_OF_RANGE -2 /* Specified component does not exist */ #define XCM_ERR_COMP_FAILED -3 /* Specified component is not running */ -#define XCM_OTHER_UPDATE -4 /* Shared memory updated for some reason */ -#define XCM_ERR_MISC -5 /* Other system call error */ -#define XCM_ERR_PARM -6 /* Invalid parameters specified */ -#define XCM_ERR_STATUS -7 /* Cluster status cannot accept the operation */ -#define XCM_NOT_FOUND -8 /* Indicates that the specified object not found */ +#define XCM_ERR_MISC -4 /* Other system call error */ +#define XCM_ERR_PARM -5 /* Invalid parameters specified */ +#define XCM_ERR_STATUS -6 /* Cluster status cannot accept the operation */ +#define XCM_NOT_FOUND -7 /* Indicates that the specified object not found */ /* * Misc. magic numbers/values. diff --git a/src/pgxc/xcm/node_membership.c b/src/pgxc/xcm/node_membership.c index 18a6f5f..aac05db 100644 --- a/src/pgxc/xcm/node_membership.c +++ b/src/pgxc/xcm/node_membership.c @@ -96,6 +96,7 @@ static int check_init(void); static xcm_components *init_shm(void); static xcm_connPoint *copy_connPoints(xcm_connPoint *conn, int n_conn); static xcm_fault_comps *alloc_fault_comps(int num_comps); +static void init_random(void); /* * Internal one ... comes from @@ -194,13 +195,39 @@ int xcm_destroy_shm() /* * Check if the shared memory is attached. If not, try to attach it. + * Also reinitializa the random number series used in the wait of slock + * using pid. */ + +/* + * This is just to be used in spinlock wait. + */ +unsigned short xcm_rand_seed[3]; +#define MY_MAXSHORT (int)(0x7fff) + +/* + * Setup the seed of the random number using my PID + */ +static void init_random() +{ + pid_t mypid; + mypid = getpid(); + if (sizeof(pid_t) > 4) { + mypid &= 0x7fffffff; + } + xcm_rand_seed[0] = 0; + xcm_rand_seed[1] = mypid/MY_MAXSHORT; + xcm_rand_seed[2] = mypid%MY_MAXSHORT; +} + static int check_init() { xcm_components *checkv; - if (shm_top == NULL) + if (shm_top == NULL) { + init_random(); checkv = init_shm(); + } else return(0); if (checkv == NULL) diff --git a/src/pgxc/xcm/xcm_s_lock.c b/src/pgxc/xcm/xcm_s_lock.c index 5ccf1df..eea9ab2 100644 --- a/src/pgxc/xcm/xcm_s_lock.c +++ b/src/pgxc/xcm/xcm_s_lock.c @@ -66,6 +66,12 @@ xcm_s_lock_stuck(volatile xcm_slock_t *lock, const char *file, int line) /* * xcm_s_lock(lock) - platform-independent portion of waiting for a spinlock. */ +/* + * Seed for the random number used to determine wait time. + * Defined in node_membership.c. + */ +extern unsigned short *xcm_rand_seed; + void xcm_s_lock(volatile xcm_slock_t *lock, const char *file, int line) { @@ -140,7 +146,7 @@ xcm_s_lock(volatile xcm_slock_t *lock, const char *file, int line) /* increase delay by a random fraction between 1X and 2X */ cur_delay += (int) (cur_delay * - ((double) random() / (double) MAX_RANDOM_VALUE) + 0.5); + ((double) nrand48(xcm_rand_seed) / (double) MAX_RANDOM_VALUE) + 0.5); /* wrap back to minimum delay when max is exceeded */ if (cur_delay > MAX_DELAY_MSEC) cur_delay = MIN_DELAY_MSEC; ----------------------------------------------------------------------- Summary of changes: src/include/pgxc/xcm/node_membership_struct.h | 10 ++++---- src/pgxc/xcm/node_membership.c | 29 ++++++++++++++++++++++++- src/pgxc/xcm/xcm_s_lock.c | 8 ++++++- 3 files changed, 40 insertions(+), 7 deletions(-) hooks/post-receive -- Postgres-XC |
From: Michael P. <mic...@us...> - 2011-03-02 03:01:52
|
Project "Postgres-XC". The branch, ha_support has been updated via 0cb897c8fb3c0948ab75951f33d8d105d25d0912 (commit) from c2fdec2ef8f70e118bd66d26996689190268845d (commit) - Log ----------------------------------------------------------------- commit 0cb897c8fb3c0948ab75951f33d8d105d25d0912 Author: Michael P <mic...@us...> Date: Wed Mar 2 11:59:31 2011 +0900 Support mirror numbering in 2PC Data for pg_prepared_xact() function This support was still lacking because the node list was set with the list of global IDs used on pooler. Node list is written with the following format: Dn1/Mir1,Dn1/Mir2,..,DnN/MirN Ex: 1/1,1/2,3/1 diff --git a/src/backend/pgxc/pool/execRemote.c b/src/backend/pgxc/pool/execRemote.c index 809d08f..af70052 100644 --- a/src/backend/pgxc/pool/execRemote.c +++ b/src/backend/pgxc/pool/execRemote.c @@ -4640,13 +4640,26 @@ PGXCNode_Build2PCData(bool isimplicit) datanodes = collect_pgxcnode_numbers(dn_conn_count, pgxc_handles->datanode_handles, REMOTE_CONN_DATANODE); - sprintf(buffer, "%d", datanodes[0]); - - for (i = 1; i < dn_conn_count; i++) + if (IsPGXCMirrorMode) { - sprintf(buffer, "%s,%d", buffer, datanodes[i]); + int datanode_id, mirror_id; + + datanode_id = PGXCMirror_GetMirrorIDAndDatanodeID(datanodes[0], &mirror_id); + sprintf(buffer, "%d/%d", datanode_id, mirror_id); + + for (i = 1; i < dn_conn_count; i++) + { + datanode_id = PGXCMirror_GetMirrorIDAndDatanodeID(datanodes[i], &mirror_id); + sprintf(buffer, "%s,%d/%d", buffer, datanode_id, mirror_id); + } } + else + { + sprintf(buffer, "%d", datanodes[0]); + for (i = 1; i < dn_conn_count; i++) + sprintf(buffer, "%s,%d", buffer, datanodes[i]); + } PGXC_2PCData->nodelist = (char *) malloc(strlen(buffer) + 1); memcpy(PGXC_2PCData->nodelist, buffer, strlen(buffer) + 1); } ----------------------------------------------------------------------- Summary of changes: src/backend/pgxc/pool/execRemote.c | 21 +++++++++++++++++---- 1 files changed, 17 insertions(+), 4 deletions(-) hooks/post-receive -- Postgres-XC |
From: Michael P. <mic...@us...> - 2011-03-02 00:57:30
|
Project "Postgres-XC". The branch, ha_support has been updated via c2fdec2ef8f70e118bd66d26996689190268845d (commit) via 3ba29c3723ed6a31b0faa8fea0b9a7e57ca0c25d (commit) from 482825deb075f295fc349fa3bb3fda8ca46d92df (commit) - Log ----------------------------------------------------------------- commit c2fdec2ef8f70e118bd66d26996689190268845d Merge: 482825d 3ba29c3 Author: Michael P <mic...@us...> Date: Wed Mar 2 10:02:06 2011 +0900 Merge branch 'master' into ha_support diff --cc src/backend/access/transam/xact.c index 082431c,4ef1edd..1f3c09f --- a/src/backend/access/transam/xact.c +++ b/src/backend/access/transam/xact.c @@@ -2092,11 -2083,9 +2091,11 @@@ CommitTransaction(bool contact_gtm * an application can use this special prepare. * If PrepareTransaction is called during an implicit 2PC, do not release ressources, * this is made by CommitTransaction when transaction has been committed on Nodes. + * + * In case of an explicit 2PC, 2PC Data of PGXC is set when PGXCNodePrepare is called. */ static void - PrepareTransaction(bool write_2pc_file, bool is_implicit) + PrepareTransaction(bool is_implicit) #else static void PrepareTransaction(void) diff --cc src/include/access/twophase.h index 06b219c,94df1ad..4a5e712 --- a/src/include/access/twophase.h +++ b/src/include/access/twophase.h @@@ -48,15 -37,8 +48,9 @@@ extern GlobalTransaction MarkAsPreparin extern GlobalTransaction MarkAsPreparing(TransactionId xid, const char *gid, TimestampTz prepared_at, Oid owner, Oid databaseid); +#endif - #ifdef PGXC - extern void RemoveGXactCoord(GlobalTransaction gxact); - extern void EndPrepare(GlobalTransaction gxact, bool write_2pc_file); - #else extern void EndPrepare(GlobalTransaction gxact); - #endif - extern void StartPrepare(GlobalTransaction gxact); extern TransactionId PrescanPreparedTransactions(void); ----------------------------------------------------------------------- Summary of changes: src/backend/access/transam/twophase.c | 40 ------- src/backend/access/transam/xact.c | 184 ++++++++++++++++----------------- src/backend/tcop/utility.c | 25 ++++- src/include/access/twophase.h | 6 - src/include/access/xact.h | 6 +- 5 files changed, 112 insertions(+), 149 deletions(-) hooks/post-receive -- Postgres-XC |
From: Michael P. <mic...@us...> - 2011-03-02 00:47:59
|
Project "Postgres-XC". The branch, master has been updated via 3ba29c3723ed6a31b0faa8fea0b9a7e57ca0c25d (commit) from 51cabe99e820989af4634ef7113a294563e8820c (commit) - Log ----------------------------------------------------------------- commit 3ba29c3723ed6a31b0faa8fea0b9a7e57ca0c25d Author: Michael P <mic...@us...> Date: Wed Mar 2 09:46:21 2011 +0900 Fix for bug 3134395, 3086422, 3136230: 2PC locks When a PREPARE was being made on Coordinator for a transaction not using DDL, 2PC file was bypassed so as to make a fake COMMIT on Coordinator. The problem was that locks hold at PREPARE state were not released at COMMIT PREPARED on Coordinator because lock information was obtained from 2PC, which indeed did not exist on Coordinator for non-DDL transactions. With this fix, instead of a fake PREPARE, XC does a Commit on Coordinator without contacting GTM, this permits to release correctly all the locks held by transaction. PrepareTransaction is simplified in xact.c, but CommitTransaction needs an additional parameter to decide if GTM is called at COMMIT or not. diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c index af15e79..1531d44 100644 --- a/src/backend/access/transam/twophase.c +++ b/src/backend/access/transam/twophase.c @@ -892,13 +892,8 @@ StartPrepare(GlobalTransaction gxact) * * Calculates CRC and writes state file to WAL and in pg_twophase directory. */ -#ifdef PGXC -void -EndPrepare(GlobalTransaction gxact, bool write_2pc_file) -#else void EndPrepare(GlobalTransaction gxact) -#endif { TransactionId xid = gxact->proc.xid; TwoPhaseFileHeader *hdr; @@ -935,11 +930,6 @@ EndPrepare(GlobalTransaction gxact) * PANIC anyway. */ -#ifdef PGXC - /* Write 2PC state file on Coordinator side if a DDL is involved in transaction */ - if (write_2pc_file) - { -#endif TwoPhaseFilePath(path, xid); fd = BasicOpenFile(path, @@ -1012,9 +1002,6 @@ EndPrepare(GlobalTransaction gxact) * We save the PREPARE record's location in the gxact for later use by * CheckPointTwoPhase. */ -#ifdef PGXC - } -#endif START_CRIT_SECTION(); @@ -1026,15 +1013,6 @@ EndPrepare(GlobalTransaction gxact) /* If we crash now, we have prepared: WAL replay will fix things */ -#ifdef PGXC - /* - * Just write 2PC state file on Datanodes - * or on Coordinators if DDL queries are involved. - */ - if (write_2pc_file) - { -#endif - /* write correct CRC and close file */ if ((write(fd, &statefile_crc, sizeof(pg_crc32))) != sizeof(pg_crc32)) { @@ -1049,10 +1027,6 @@ EndPrepare(GlobalTransaction gxact) (errcode_for_file_access(), errmsg("could not close two-phase state file: %m"))); -#ifdef PGXC - } -#endif - /* * Mark the prepared transaction as valid. As soon as xact.c marks MyProc * as not running our XID (which it will do immediately after this @@ -1903,17 +1877,3 @@ RecordTransactionAbortPrepared(TransactionId xid, END_CRIT_SECTION(); } - - -#ifdef PGXC -/* - * Remove a gxact on a Coordinator, - * this is used to be able to prepare a commit transaction on another coordinator than the one - * who prepared the transaction, for a transaction that does not include DDLs - */ -void -RemoveGXactCoord(GlobalTransaction gxact) -{ - RemoveGXact(gxact); -} -#endif diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c index 0fa83fa..4ef1edd 100644 --- a/src/backend/access/transam/xact.c +++ b/src/backend/access/transam/xact.c @@ -112,14 +112,13 @@ typedef enum TBlockState TBLOCK_BEGIN, /* starting transaction block */ TBLOCK_INPROGRESS, /* live transaction */ TBLOCK_END, /* COMMIT received */ +#ifdef PGXC + TBLOCK_END_NOT_GTM, /* COMMIT received but do not commit on GTM */ +#endif TBLOCK_ABORT, /* failed xact, awaiting ROLLBACK */ TBLOCK_ABORT_END, /* failed xact, ROLLBACK received */ TBLOCK_ABORT_PENDING, /* live xact, ROLLBACK received */ TBLOCK_PREPARE, /* live xact, PREPARE received */ -#ifdef PGXC - TBLOCK_PREPARE_NO_2PC_FILE, /* PREPARE receive but skip 2PC file creation - * and Commit gxact */ -#endif /* subtransaction states */ TBLOCK_SUBBEGIN, /* starting a subtransaction */ TBLOCK_SUBINPROGRESS, /* live subtransaction */ @@ -287,7 +286,7 @@ static void CallSubXactCallbacks(SubXactEvent event, SubTransactionId mySubid, SubTransactionId parentSubid); static void CleanupTransaction(void); -static void CommitTransaction(void); +static void CommitTransaction(bool contact_gtm); static TransactionId RecordTransactionAbort(bool isSubXact); static void StartTransaction(void); @@ -312,7 +311,7 @@ static const char *TransStateAsString(TransState state); #ifdef PGXC /* PGXC_COORD */ static GlobalTransactionId GetGlobalTransactionId(TransactionState s); -static void PrepareTransaction(bool write_2pc_file, bool is_implicit); +static void PrepareTransaction(bool is_implicit); /* ---------------------------------------------------------------- * PG-XC Functions @@ -1744,7 +1743,7 @@ StartTransaction(void) * NB: if you change this routine, better look at PrepareTransaction too! */ static void -CommitTransaction(void) +CommitTransaction(bool contact_gtm) { TransactionState s = CurrentTransactionState; TransactionId latestXid; @@ -1754,7 +1753,7 @@ CommitTransaction(void) char implicitgid[256]; TransactionId xid = GetCurrentTransactionId(); - if (IS_PGXC_COORDINATOR && !IsConnFromCoord()) + if (IS_PGXC_COORDINATOR && !IsConnFromCoord() && contact_gtm) PreparePGXCNodes = PGXCNodeIsImplicit2PC(&PrepareLocalCoord); if (PrepareLocalCoord || PreparePGXCNodes) @@ -1768,7 +1767,7 @@ CommitTransaction(void) * If current transaction has a DDL, and involves more than 1 Coordinator, * PREPARE first on local Coordinator. */ - PrepareTransaction(true, true); + PrepareTransaction(true); } else { @@ -1865,7 +1864,7 @@ CommitTransaction(void) * * This is called only if it is not necessary to prepare the nodes. */ - if (IS_PGXC_COORDINATOR && !IsConnFromCoord() && !PreparePGXCNodes) + if (IS_PGXC_COORDINATOR && !IsConnFromCoord() && !PreparePGXCNodes && contact_gtm) PGXCNodeCommit(); #endif @@ -1892,12 +1891,12 @@ CommitTransaction(void) * * Also do not commit a transaction that has already been prepared on Datanodes */ - if (IS_PGXC_COORDINATOR && !IsConnFromCoord() && !PreparePGXCNodes) + if (IS_PGXC_COORDINATOR && !IsConnFromCoord() && !PreparePGXCNodes && contact_gtm) { CommitTranGTM(s->globalTransactionId); latestXid = s->globalTransactionId; } - else if (IS_PGXC_DATANODE || IsConnFromCoord()) + else if ((IS_PGXC_DATANODE || IsConnFromCoord()) && contact_gtm) { /* If we are autovacuum, commit on GTM */ if ((IsAutoVacuumWorkerProcess() || GetForceXidFromGTM()) @@ -2086,7 +2085,7 @@ CommitTransaction(void) * this is made by CommitTransaction when transaction has been committed on Nodes. */ static void -PrepareTransaction(bool write_2pc_file, bool is_implicit) +PrepareTransaction(bool is_implicit) #else static void PrepareTransaction(void) @@ -2222,7 +2221,7 @@ PrepareTransaction(void) * updates, because the transaction manager might get confused if we lose * a global transaction. */ - EndPrepare(gxact, write_2pc_file); + EndPrepare(gxact); /* * Now we clean up backend-internal state and release internal resources. @@ -2273,17 +2272,6 @@ PrepareTransaction(void) #ifdef PGXC /* - * We want to be able to commit a prepared transaction from another coordinator, - * so clean up the gxact in shared memory also. - */ - if (!write_2pc_file) - { - RemoveGXactCoord(gxact); - } -#endif - -#ifdef PGXC - /* * In case of an implicit 2PC, ressources are released by CommitTransaction() */ if (!is_implicit) @@ -2665,6 +2653,9 @@ StartTransactionCommand(void) case TBLOCK_BEGIN: case TBLOCK_SUBBEGIN: case TBLOCK_END: +#ifdef PGXC + case TBLOCK_END_NOT_GTM: +#endif case TBLOCK_SUBEND: case TBLOCK_ABORT_END: case TBLOCK_SUBABORT_END: @@ -2673,9 +2664,6 @@ StartTransactionCommand(void) case TBLOCK_SUBRESTART: case TBLOCK_SUBABORT_RESTART: case TBLOCK_PREPARE: -#ifdef PGXC - case TBLOCK_PREPARE_NO_2PC_FILE: -#endif elog(ERROR, "StartTransactionCommand: unexpected state %s", BlockStateAsString(s->blockState)); break; @@ -2714,7 +2702,11 @@ CommitTransactionCommand(void) * transaction commit, and return to the idle state. */ case TBLOCK_STARTED: +#ifdef PGXC + CommitTransaction(true); +#else CommitTransaction(); +#endif s->blockState = TBLOCK_DEFAULT; break; @@ -2743,10 +2735,20 @@ CommitTransactionCommand(void) * idle state. */ case TBLOCK_END: +#ifdef PGXC + CommitTransaction(true); +#else CommitTransaction(); +#endif s->blockState = TBLOCK_DEFAULT; break; +#ifdef PGXC + case TBLOCK_END_NOT_GTM: + CommitTransaction(false); + s->blockState = TBLOCK_DEFAULT; + break; +#endif /* * Here we are in the middle of a transaction block but one of the * commands caused an abort so we do nothing but remain in the @@ -2782,20 +2784,13 @@ CommitTransactionCommand(void) * return to the idle state. */ case TBLOCK_PREPARE: - PrepareTransaction(true, false); - s->blockState = TBLOCK_DEFAULT; - break; - #ifdef PGXC - /* - * We are complieting a PREPARE TRANSACTION for a pgxc transaction - * that involved DDLs on a Coordinator. - */ - case TBLOCK_PREPARE_NO_2PC_FILE: - PrepareTransaction(false, false); + PrepareTransaction(false); +#else + PrepareTransaction(); +#endif s->blockState = TBLOCK_DEFAULT; break; -#endif /* * We were just issued a SAVEPOINT inside a transaction block. @@ -2824,23 +2819,21 @@ CommitTransactionCommand(void) if (s->blockState == TBLOCK_END) { Assert(s->parent == NULL); +#ifdef PGXC + CommitTransaction(true); +#else CommitTransaction(); +#endif s->blockState = TBLOCK_DEFAULT; } else if (s->blockState == TBLOCK_PREPARE) { Assert(s->parent == NULL); - PrepareTransaction(true, false); - s->blockState = TBLOCK_DEFAULT; - } #ifdef PGXC - else if (s->blockState == TBLOCK_PREPARE_NO_2PC_FILE) - { - Assert(s->parent == NULL); - PrepareTransaction(false, false); + PrepareTransaction(false); +#endif s->blockState = TBLOCK_DEFAULT; } -#endif else { Assert(s->blockState == TBLOCK_INPROGRESS || @@ -2998,6 +2991,9 @@ AbortCurrentTransaction(void) * the transaction). */ case TBLOCK_END: +#ifdef PGXC + case TBLOCK_END_NOT_GTM: +#endif AbortTransaction(); CleanupTransaction(); s->blockState = TBLOCK_DEFAULT; @@ -3038,9 +3034,6 @@ AbortCurrentTransaction(void) * the transaction). */ case TBLOCK_PREPARE: -#ifdef PGXC - case TBLOCK_PREPARE_NO_2PC_FILE: -#endif AbortTransaction(); CleanupTransaction(); s->blockState = TBLOCK_DEFAULT; @@ -3384,6 +3377,9 @@ BeginTransactionBlock(void) case TBLOCK_BEGIN: case TBLOCK_SUBBEGIN: case TBLOCK_END: +#ifdef PGXC + case TBLOCK_END_NOT_GTM: +#endif case TBLOCK_SUBEND: case TBLOCK_ABORT_END: case TBLOCK_SUBABORT_END: @@ -3392,9 +3388,6 @@ BeginTransactionBlock(void) case TBLOCK_SUBRESTART: case TBLOCK_SUBABORT_RESTART: case TBLOCK_PREPARE: -#ifdef PGXC - case TBLOCK_PREPARE_NO_2PC_FILE: -#endif elog(FATAL, "BeginTransactionBlock: unexpected state %s", BlockStateAsString(s->blockState)); break; @@ -3413,19 +3406,18 @@ BeginTransactionBlock(void) * We do it this way because it's not convenient to change memory context, * resource owner, etc while executing inside a Portal. */ -#ifdef PGXC -bool -PrepareTransactionBlock(char *gid, bool write_2pc_file) -#else bool PrepareTransactionBlock(char *gid) -#endif { TransactionState s; bool result; /* Set up to commit the current transaction */ +#ifdef PGXC + result = EndTransactionBlock(true); +#else result = EndTransactionBlock(); +#endif /* If successful, change outer tblock state to PREPARE */ if (result) @@ -3440,16 +3432,6 @@ PrepareTransactionBlock(char *gid) /* Save GID where PrepareTransaction can find it again */ prepareGID = MemoryContextStrdup(TopTransactionContext, gid); -#ifdef PGXC - /* - * For a Postgres-XC Coordinator, prepare is done for a transaction - * if and only if a DDL was involved in the transaction. - * If not, it is enough to prepare it on Datanodes involved only. - */ - if (!write_2pc_file) - s->blockState = TBLOCK_PREPARE_NO_2PC_FILE; - else -#endif s->blockState = TBLOCK_PREPARE; } else @@ -3480,7 +3462,11 @@ PrepareTransactionBlock(char *gid) * resource owner, etc while executing inside a Portal. */ bool +#ifdef PGXC +EndTransactionBlock(bool contact_gtm) +#else EndTransactionBlock(void) +#endif { TransactionState s = CurrentTransactionState; bool result = false; @@ -3492,6 +3478,11 @@ EndTransactionBlock(void) * to COMMIT. */ case TBLOCK_INPROGRESS: +#ifdef PGXC + if (!contact_gtm) + s->blockState = TBLOCK_END_NOT_GTM; + else +#endif s->blockState = TBLOCK_END; result = true; break; @@ -3570,6 +3561,9 @@ EndTransactionBlock(void) case TBLOCK_BEGIN: case TBLOCK_SUBBEGIN: case TBLOCK_END: +#ifdef PGXC + case TBLOCK_END_NOT_GTM: +#endif case TBLOCK_SUBEND: case TBLOCK_ABORT_END: case TBLOCK_SUBABORT_END: @@ -3578,9 +3572,6 @@ EndTransactionBlock(void) case TBLOCK_SUBRESTART: case TBLOCK_SUBABORT_RESTART: case TBLOCK_PREPARE: -#ifdef PGXC - case TBLOCK_PREPARE_NO_2PC_FILE: -#endif elog(FATAL, "EndTransactionBlock: unexpected state %s", BlockStateAsString(s->blockState)); break; @@ -3665,6 +3656,9 @@ UserAbortTransactionBlock(void) case TBLOCK_BEGIN: case TBLOCK_SUBBEGIN: case TBLOCK_END: +#ifdef PGXC + case TBLOCK_END_NOT_GTM: +#endif case TBLOCK_SUBEND: case TBLOCK_ABORT_END: case TBLOCK_SUBABORT_END: @@ -3673,9 +3667,6 @@ UserAbortTransactionBlock(void) case TBLOCK_SUBRESTART: case TBLOCK_SUBABORT_RESTART: case TBLOCK_PREPARE: -#ifdef PGXC - case TBLOCK_PREPARE_NO_2PC_FILE: -#endif elog(FATAL, "UserAbortTransactionBlock: unexpected state %s", BlockStateAsString(s->blockState)); break; @@ -3713,6 +3704,9 @@ DefineSavepoint(char *name) case TBLOCK_BEGIN: case TBLOCK_SUBBEGIN: case TBLOCK_END: +#ifdef PGXC + case TBLOCK_END_NOT_GTM: +#endif case TBLOCK_SUBEND: case TBLOCK_ABORT: case TBLOCK_SUBABORT: @@ -3723,9 +3717,6 @@ DefineSavepoint(char *name) case TBLOCK_SUBRESTART: case TBLOCK_SUBABORT_RESTART: case TBLOCK_PREPARE: -#ifdef PGXC - case TBLOCK_PREPARE_NO_2PC_FILE: -#endif elog(FATAL, "DefineSavepoint: unexpected state %s", BlockStateAsString(s->blockState)); break; @@ -3772,6 +3763,9 @@ ReleaseSavepoint(List *options) case TBLOCK_BEGIN: case TBLOCK_SUBBEGIN: case TBLOCK_END: +#ifdef PGXC + case TBLOCK_END_NOT_GTM: +#endif case TBLOCK_SUBEND: case TBLOCK_ABORT: case TBLOCK_SUBABORT: @@ -3782,9 +3776,6 @@ ReleaseSavepoint(List *options) case TBLOCK_SUBRESTART: case TBLOCK_SUBABORT_RESTART: case TBLOCK_PREPARE: -#ifdef PGXC - case TBLOCK_PREPARE_NO_2PC_FILE: -#endif elog(FATAL, "ReleaseSavepoint: unexpected state %s", BlockStateAsString(s->blockState)); break; @@ -3875,6 +3866,9 @@ RollbackToSavepoint(List *options) case TBLOCK_BEGIN: case TBLOCK_SUBBEGIN: case TBLOCK_END: +#ifdef PGXC + case TBLOCK_END_NOT_GTM: +#endif case TBLOCK_SUBEND: case TBLOCK_ABORT_END: case TBLOCK_SUBABORT_END: @@ -3883,9 +3877,6 @@ RollbackToSavepoint(List *options) case TBLOCK_SUBRESTART: case TBLOCK_SUBABORT_RESTART: case TBLOCK_PREPARE: -#ifdef PGXC - case TBLOCK_PREPARE_NO_2PC_FILE: -#endif elog(FATAL, "RollbackToSavepoint: unexpected state %s", BlockStateAsString(s->blockState)); break; @@ -3968,10 +3959,10 @@ BeginInternalSubTransaction(char *name) case TBLOCK_STARTED: case TBLOCK_INPROGRESS: case TBLOCK_END: - case TBLOCK_PREPARE: #ifdef PGXC - case TBLOCK_PREPARE_NO_2PC_FILE: + case TBLOCK_END_NOT_GTM: #endif + case TBLOCK_PREPARE: case TBLOCK_SUBINPROGRESS: /* Normal subtransaction start */ PushTransaction(); @@ -3984,7 +3975,6 @@ BeginInternalSubTransaction(char *name) if (name) s->name = MemoryContextStrdup(TopTransactionContext, name); break; - /* These cases are invalid. */ case TBLOCK_DEFAULT: case TBLOCK_BEGIN: @@ -4055,6 +4045,9 @@ RollbackAndReleaseCurrentSubTransaction(void) case TBLOCK_SUBBEGIN: case TBLOCK_INPROGRESS: case TBLOCK_END: +#ifdef PGXC + case TBLOCK_END_NOT_GTM: +#endif case TBLOCK_SUBEND: case TBLOCK_ABORT: case TBLOCK_ABORT_END: @@ -4064,9 +4057,6 @@ RollbackAndReleaseCurrentSubTransaction(void) case TBLOCK_SUBRESTART: case TBLOCK_SUBABORT_RESTART: case TBLOCK_PREPARE: -#ifdef PGXC - case TBLOCK_PREPARE_NO_2PC_FILE: -#endif elog(FATAL, "RollbackAndReleaseCurrentSubTransaction: unexpected state %s", BlockStateAsString(s->blockState)); break; @@ -4113,11 +4103,11 @@ AbortOutOfAnyTransaction(void) case TBLOCK_BEGIN: case TBLOCK_INPROGRESS: case TBLOCK_END: - case TBLOCK_ABORT_PENDING: - case TBLOCK_PREPARE: #ifdef PGXC - case TBLOCK_PREPARE_NO_2PC_FILE: + case TBLOCK_END_NOT_GTM: #endif + case TBLOCK_ABORT_PENDING: + case TBLOCK_PREPARE: /* In a transaction, so clean up */ AbortTransaction(); CleanupTransaction(); @@ -4207,11 +4197,11 @@ TransactionBlockStatusCode(void) case TBLOCK_INPROGRESS: case TBLOCK_SUBINPROGRESS: case TBLOCK_END: - case TBLOCK_SUBEND: - case TBLOCK_PREPARE: #ifdef PGXC - case TBLOCK_PREPARE_NO_2PC_FILE: + case TBLOCK_END_NOT_GTM: #endif + case TBLOCK_SUBEND: + case TBLOCK_PREPARE: return 'T'; /* in transaction */ case TBLOCK_ABORT: case TBLOCK_SUBABORT: @@ -4707,10 +4697,10 @@ BlockStateAsString(TBlockState blockState) return "ABORT END"; case TBLOCK_ABORT_PENDING: return "ABORT PEND"; + case TBLOCK_PREPARE: #ifdef PGXC - case TBLOCK_PREPARE_NO_2PC_FILE: + case TBLOCK_END_NOT_GTM: #endif - case TBLOCK_PREPARE: return "PREPARE"; case TBLOCK_SUBBEGIN: return "SUB BEGIN"; diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c index 90c1f81..065d880 100644 --- a/src/backend/tcop/utility.c +++ b/src/backend/tcop/utility.c @@ -335,7 +335,11 @@ ProcessUtility(Node *parsetree, break; case TRANS_STMT_COMMIT: +#ifdef PGXC + if (!EndTransactionBlock(true)) +#else if (!EndTransactionBlock()) +#endif { /* report unsuccessful commit in completionTag */ if (completionTag) @@ -361,16 +365,31 @@ ProcessUtility(Node *parsetree, if (IsConnFromCoord()) operation_local = true; - if (!PrepareTransactionBlock(stmt->gid, operation_local)) + if (operation_local) { -#else +#endif if (!PrepareTransactionBlock(stmt->gid)) { -#endif /* report unsuccessful commit in completionTag */ if (completionTag) strcpy(completionTag, "ROLLBACK"); } +#ifdef PGXC + } + else + { + /* + * In this case commit locally to erase the transaction traces + * but do not contact GTM + */ + if (!EndTransactionBlock(false)) + { + /* report unsuccessful commit in completionTag */ + if (completionTag) + strcpy(completionTag, "ROLLBACK"); + } + } +#endif break; case TRANS_STMT_COMMIT_PREPARED: diff --git a/src/include/access/twophase.h b/src/include/access/twophase.h index 7b78a6d..94df1ad 100644 --- a/src/include/access/twophase.h +++ b/src/include/access/twophase.h @@ -38,13 +38,7 @@ extern GlobalTransaction MarkAsPreparing(TransactionId xid, const char *gid, TimestampTz prepared_at, Oid owner, Oid databaseid); -#ifdef PGXC -extern void RemoveGXactCoord(GlobalTransaction gxact); -extern void EndPrepare(GlobalTransaction gxact, bool write_2pc_file); -#else extern void EndPrepare(GlobalTransaction gxact); -#endif - extern void StartPrepare(GlobalTransaction gxact); extern TransactionId PrescanPreparedTransactions(void); diff --git a/src/include/access/xact.h b/src/include/access/xact.h index 9b2be9f..e7f1089 100644 --- a/src/include/access/xact.h +++ b/src/include/access/xact.h @@ -172,12 +172,12 @@ extern void AbortCurrentTransactionOnce(void); #endif extern void AbortCurrentTransaction(void); extern void BeginTransactionBlock(void); -extern bool EndTransactionBlock(void); #ifdef PGXC -extern bool PrepareTransactionBlock(char *gid, bool write_2pc_file); +extern bool EndTransactionBlock(bool contact_gtm); #else -extern bool PrepareTransactionBlock(char *gid); +extern bool EndTransactionBlock(void); #endif +extern bool PrepareTransactionBlock(char *gid); extern void UserAbortTransactionBlock(void); extern void ReleaseSavepoint(List *options); extern void DefineSavepoint(char *name); ----------------------------------------------------------------------- Summary of changes: src/backend/access/transam/twophase.c | 40 ------- src/backend/access/transam/xact.c | 184 ++++++++++++++++----------------- src/backend/tcop/utility.c | 25 ++++- src/include/access/twophase.h | 6 - src/include/access/xact.h | 6 +- 5 files changed, 112 insertions(+), 149 deletions(-) hooks/post-receive -- Postgres-XC |
From: Michael P. <mic...@us...> - 2011-03-01 23:27:10
|
Project "Postgres-XC". The branch, ha_support has been updated via 482825deb075f295fc349fa3bb3fda8ca46d92df (commit) from 0941c9d8ff92110c176f8c2a01011b473c2b7951 (commit) - Log ----------------------------------------------------------------- commit 482825deb075f295fc349fa3bb3fda8ca46d92df Author: Michael P <mic...@us...> Date: Wed Mar 2 08:31:27 2011 +0900 Correction of error messages for XCM interface. diff --git a/src/backend/pgxc/pool/mirror.c b/src/backend/pgxc/pool/mirror.c index b7bfbc5..9b22747 100644 --- a/src/backend/pgxc/pool/mirror.c +++ b/src/backend/pgxc/pool/mirror.c @@ -2,10 +2,10 @@ * * mirror.c * - * File containing API to interact with Fault Sync module + * File containing API to interact with XCM module * It is necessary to activate the GUC parameter mirror_mode * to call the APIs of this file. - * Only this file is authorized to call APIs of Fault Sync + * Only this file is authorized to call APIs of XCM * * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group * Portions Copyright (c) 2010-2011 Nippon Telegraph and Telephone Corporation @@ -90,7 +90,7 @@ PGXCMirror_GetPrimaryDatanodeID(void) { int primary_id = 0; /* - * Get the primary node parameters from Fault Sync module + * Get the primary node parameters from XCM module * In other cases GUC params have all the necessary data. */ if (IsXCM) @@ -98,7 +98,7 @@ PGXCMirror_GetPrimaryDatanodeID(void) if (get_xcm_primary_datanode(&primary_id) < 0) ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("Fault Sync ERROR: could not get primary node ID"))); + errmsg("XCM ERROR: could not get primary node ID"))); } return primary_id; @@ -123,7 +123,7 @@ PGXCMirror_GetPrimaryMirrorID(void) if (get_xcm_primary_mirror(datanode_id, &mirror_id) < 0) ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("Fault Sync ERROR: could not get primary mirror ID"))); + errmsg("XCM ERROR: could not get primary mirror ID"))); Assert(datanode_id <= NumDataNodes && datanode_id > 0); Assert(mirror_id < PGXCMirror_GetMirrorCount(datanode_id)); @@ -148,7 +148,7 @@ PGXCMirror_GetPreferredNodeID(bool is_datanode) if (get_xcm_preferred_mirror(PGXCNodeId, &datanode_id, &mirror_id) < 0) ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("Fault Sync ERROR: could not get primary mirror ID"))); + errmsg("XCM ERROR: could not get primary mirror ID"))); Assert(datanode_id <= NumDataNodes && datanode_id > 0); Assert(mirror_id < PGXCMirror_GetMirrorCount(datanode_id)); @@ -191,7 +191,7 @@ PGXCMirror_SetMirrorCountList(void) if (get_xcm_mirror_count(i + 1, &mirror_count) < 0) ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("Fault Sync ERROR: could not get Mirror count"))); + errmsg("XCM ERROR: could not get Mirror count"))); PGXCNodeMirrorCount[i] = mirror_count; MirrorTotalCount += PGXCNodeMirrorCount[i]; } @@ -683,7 +683,7 @@ PGXCMirror_GetLocalGTMHost(void) if (err < 0) ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("Fault Sync ERROR: could not get GTM information"))); + errmsg("XCM ERROR: could not get GTM information"))); /* Need Connection points for GTM or Proxy */ if (gtm_id > 0) @@ -720,7 +720,7 @@ PGXCMirror_GetLocalGTMPort(void) if (err < 0) ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("Fault Sync ERROR: could not get GTM information"))); + errmsg("XCM ERROR: could not get GTM information"))); /* Need Connection points for GTM or Proxy */ if (gtm_id > 0) @@ -778,7 +778,7 @@ PGXCMirror_ReportCoordFail(int pgxc_node_id) if (PGXCMirror_ReportFail(REMOTE_CONN_COORD, pgxc_node_id, 0) < 0) ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("Fault Sync ERROR: could not report failed Coordinator"))); + errmsg("XCM ERROR: could not report failed Coordinator"))); } void @@ -790,7 +790,7 @@ PGXCMirror_ReportDataNodeFail(int pgxc_node_id, int mirror_id) if (PGXCMirror_ReportFail(REMOTE_CONN_DATANODE, pgxc_node_id, mirror_id) < 0) ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("Fault Sync ERROR: could not report failed Datanode"))); + errmsg("XCM ERROR: could not report failed Datanode"))); } /* @@ -824,7 +824,7 @@ report_error: if (err < 0) ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("Fault Sync ERROR: could not report failed GTM"))); + errmsg("XCM ERROR: could not report failed GTM"))); } /* @@ -860,7 +860,7 @@ PGXCMirror_CheckStatus(RemoteConnTypes conn_type, int pgxc_node_id, int mirror_i if (err < 0) ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("Fault Sync ERROR: could not get Node Status"))); + errmsg("XCM ERROR: could not get Node Status"))); return status; } @@ -1065,7 +1065,7 @@ PGXCMirror_GetConnPoint(RemoteConnTypes conn_type, if (err < 0) ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("Fault Sync ERROR: could not get Node Host data"))); + errmsg("XCM ERROR: could not get Node Host data"))); return conn_pts; } ----------------------------------------------------------------------- Summary of changes: src/backend/pgxc/pool/mirror.c | 28 ++++++++++++++-------------- 1 files changed, 14 insertions(+), 14 deletions(-) hooks/post-receive -- Postgres-XC |
From: Michael P. <mic...@us...> - 2011-03-01 07:51:07
|
Project "Postgres-XC". The branch, ha_support has been updated via 0941c9d8ff92110c176f8c2a01011b473c2b7951 (commit) via 21639943c3c4402d029261e143b27733e39c7c07 (commit) from 180e27c030b192a782dc5312420b38caf83abcb8 (commit) - Log ----------------------------------------------------------------- commit 0941c9d8ff92110c176f8c2a01011b473c2b7951 Author: Michael P <mic...@us...> Date: Tue Mar 1 16:53:42 2011 +0900 Fix for XCM used without mirror mode In Normal mode, the static array holding mirror count for each datanode is not set, causing problems a crash on pooler side when XCM is used to build the host and port arrays for Datanodes. diff --git a/src/backend/pgxc/pool/mirror.c b/src/backend/pgxc/pool/mirror.c index fd51b4d..b7bfbc5 100644 --- a/src/backend/pgxc/pool/mirror.c +++ b/src/backend/pgxc/pool/mirror.c @@ -534,12 +534,17 @@ PGXCMirror_GetHostTotalString(RemoteConnTypes conn_type) } else if (conn_type == REMOTE_CONN_DATANODE) { + /* + * If mirror mode is not active, it means that there is only + * 1 single mirror for a datanode, which is the normal mode... + */ + int num_mirrors = IsPGXCMirrorMode ? PGXCNodeMirrorCount[i] : 1; int count; /* Build Datanode Host string */ - Assert(PGXCNodeMirrorCount[i] > 0); + Assert(num_mirrors > 0); - for (count = 0; count < PGXCNodeMirrorCount[i]; count++) + for (count = 0; count < num_mirrors; count++) { int local_len; char *buf; @@ -614,12 +619,17 @@ PGXCMirror_GetPortTotalString(RemoteConnTypes conn_type) } else if (conn_type == REMOTE_CONN_DATANODE) { + /* + * If mirror mode is not active, it means that there is only + * 1 single mirror for a datanode, which is the normal mode... + */ + int num_mirrors = IsPGXCMirrorMode ? PGXCNodeMirrorCount[i] : 1; int count; /* Build Datanode Host string */ - Assert(PGXCNodeMirrorCount[i] > 0); + Assert(num_mirrors > 0); - for (count = 0; count < PGXCNodeMirrorCount[i]; count++) + for (count = 0; count < num_mirrors; count++) { int local_len; char *buf; commit 21639943c3c4402d029261e143b27733e39c7c07 Author: Michael P <mic...@us...> Date: Tue Mar 1 16:43:20 2011 +0900 Fix to avoid Datanodes to unregister if they are not primary diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c index 71da4c2..984b3be 100644 --- a/src/backend/postmaster/postmaster.c +++ b/src/backend/postmaster/postmaster.c @@ -2215,7 +2215,7 @@ pmdie(SIGNAL_ARGS) /* Unregister Node on GTM */ if (IS_PGXC_COORDINATOR) UnregisterGTM(PGXC_NODE_COORDINATOR); - else if (IS_PGXC_DATANODE) + else if (IS_PGXC_DATANODE && IsPrimaryMirror) UnregisterGTM(PGXC_NODE_DATANODE); #endif pmState = PM_WAIT_BACKUP; @@ -2273,7 +2273,7 @@ pmdie(SIGNAL_ARGS) /* Unregister Node on GTM */ if (IS_PGXC_COORDINATOR) UnregisterGTM(PGXC_NODE_COORDINATOR); - else if (IS_PGXC_DATANODE) + else if (IS_PGXC_DATANODE && IsPrimaryMirror) UnregisterGTM(PGXC_NODE_DATANODE); #endif pmState = PM_WAIT_BACKENDS; ----------------------------------------------------------------------- Summary of changes: src/backend/pgxc/pool/mirror.c | 18 ++++++++++++++---- src/backend/postmaster/postmaster.c | 4 ++-- 2 files changed, 16 insertions(+), 6 deletions(-) hooks/post-receive -- Postgres-XC |
From: Michael P. <mic...@us...> - 2011-03-01 01:44:56
|
Project "Postgres-XC". The branch, ha_support has been updated via 180e27c030b192a782dc5312420b38caf83abcb8 (commit) via 162d967054136baf6bc9cc35f286c771197af71b (commit) via 51cabe99e820989af4634ef7113a294563e8820c (commit) from bd357d3c92dcd3d7bb82ba63d942ba3841cff751 (commit) - Log ----------------------------------------------------------------- commit 180e27c030b192a782dc5312420b38caf83abcb8 Merge: 162d967 51cabe9 Author: Michael P <mic...@us...> Date: Tue Mar 1 10:49:45 2011 +0900 Merge branch 'master' into ha_support Conflicts: src/backend/pgxc/pool/execRemote.c diff --cc src/backend/pgxc/pool/execRemote.c index 4570bdf,dc1d68c..809d08f --- a/src/backend/pgxc/pool/execRemote.c +++ b/src/backend/pgxc/pool/execRemote.c @@@ -4607,93 -4572,54 +4607,146 @@@ PGXCNodeGetNodeList(PGXC_NodeId **datan } /* + * Build 2PC Data. + * This is made by a remote Coordinator that is preparing in PrepareTransaction() + * or in CommitTransaction for a Coordinator not preparing. + * + * Data has to be built before sending it to backend nodes. + */ +void +PGXCNode_Build2PCData(bool isimplicit) +{ + PGXCNodeAllHandles *pgxc_handles = pgxc_get_all_transaction_nodes(false); + int co_conn_count = pgxc_handles->co_conn_count; + int dn_conn_count = pgxc_handles->dn_conn_count; + PGXC_NodeId *datanodes = NULL; + int i; + + PGXC_2PCData = (Remote2PCData *) malloc(sizeof(Remote2PCData)); + + PGXC_2PCData->isddl = co_conn_count > 0; + PGXC_2PCData->isimplicit = isimplicit; + PGXC_2PCData->coordnum = PGXCNodeId; + PGXC_2PCData->nodelist = NULL; + + /* + * Build the node list string. + * Format is guc based nodenum1,nodenum2,...,nodenumN + */ + if (dn_conn_count != 0) + { + char buffer[NODELISTSIZE]; + + datanodes = collect_pgxcnode_numbers(dn_conn_count, + pgxc_handles->datanode_handles, REMOTE_CONN_DATANODE); + + sprintf(buffer, "%d", datanodes[0]); + + for (i = 1; i < dn_conn_count; i++) + { + sprintf(buffer, "%s,%d", buffer, datanodes[i]); + } + + PGXC_2PCData->nodelist = (char *) malloc(strlen(buffer) + 1); + memcpy(PGXC_2PCData->nodelist, buffer, strlen(buffer) + 1); + } + else + { + /* This case corresponds to Sequence DDL where only Coordinators are prepared */ + PGXC_2PCData->nodelist = (char *) malloc(2); + sprintf(PGXC_2PCData->nodelist,"n"); + PGXC_2PCData->nodelist[1] = '\0'; + } +} + +/* + * Set 2PC Data received from remote Coordinator. + * This can just be called by a backend node. + */ +void +PGXCNode_Set2PCData(bool isddl, bool isimplicit, int coordnum, const char *nodelist) +{ + PGXC_2PCData = (Remote2PCData *) malloc(sizeof(Remote2PCData)); + + /* Fill in 2PC Data received from other node */ + PGXC_2PCData->isddl = isddl; + PGXC_2PCData->isimplicit = isimplicit; + PGXC_2PCData->coordnum = coordnum; + + PGXC_2PCData->nodelist = (char *) malloc(strlen(nodelist) + 1); + memcpy(PGXC_2PCData->nodelist, nodelist, strlen(nodelist) + 1); +} + +void +PGXCNode_Unset2PCData(void) +{ + if (PGXC_2PCData) + { + if (PGXC_2PCData->nodelist) + free(PGXC_2PCData->nodelist); + free(PGXC_2PCData); + } +} + +/* + * Return 2PC Data necessary to mark the transaction as preparing + * This data is saved in the static list of prepared xacts when marked as preparing. + */ +Remote2PCData* +PGXCNode_Get2PCData(void) +{ + return PGXC_2PCData; +} ++ ++/* + * DataNodeCopyInBinaryForAll + * + * In a COPY TO, send to all datanodes PG_HEADER for a COPY TO in binary mode. + */ + int DataNodeCopyInBinaryForAll(char *msg_buf, int len, PGXCNodeHandle** copy_connections) + { + int i; + int conn_count = 0; + PGXCNodeHandle *connections[NumDataNodes]; + int msgLen = 4 + len + 1; + int nLen = htonl(msgLen); + + for (i = 0; i < NumDataNodes; i++) + { + PGXCNodeHandle *handle = copy_connections[i]; + + if (!handle) + continue; + + connections[conn_count++] = handle; + } + + for (i = 0; i < conn_count; i++) + { + PGXCNodeHandle *handle = connections[i]; + if (handle->state == DN_CONNECTION_STATE_COPY_IN) + { + /* msgType + msgLen */ + if (ensure_out_buffer_capacity(handle->outEnd + 1 + msgLen, handle) != 0) + { + ereport(ERROR, + (errcode(ERRCODE_OUT_OF_MEMORY), + errmsg("out of memory"))); + } + + handle->outBuffer[handle->outEnd++] = 'd'; + memcpy(handle->outBuffer + handle->outEnd, &nLen, 4); + handle->outEnd += 4; + memcpy(handle->outBuffer + handle->outEnd, msg_buf, len); + handle->outEnd += len; + handle->outBuffer[handle->outEnd++] = '\n'; + } + else + { + add_error_message(handle, "Invalid data node connection"); + return EOF; + } + } + + return 0; + } commit 162d967054136baf6bc9cc35f286c771197af71b Author: Michael P <mic...@us...> Date: Tue Mar 1 10:41:40 2011 +0900 Correction for EXECUTE DIRECT For an EXECUTE DIRECT on a backend Coordinator, the node list was not set correctly, causing XCM to return an error due to an incorrect node number. There is also a fix for utility statements on backend mirrors so as to launch the utility on all the mirrors when not choosing a node number. diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c index 9cd45a1..ca715e0 100644 --- a/src/backend/parser/analyze.c +++ b/src/backend/parser/analyze.c @@ -2115,12 +2115,14 @@ transformExecDirectStmt(ParseState *pstate, ExecDirectStmt *stmt) (errcode(ERRCODE_INTERNAL_ERROR), errmsg("Cannot use Mirror ID format for Coordinator"))); - if (node->data_node_id > NumDataNodes) + if (node->data_node_id > NumDataNodes || + node->data_node_id < 1) ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("Node Number %d is incorrect", node->data_node_id))); - if (node->mirror_id > PGXCMirror_GetMirrorCount(node->data_node_id)) + if (node->mirror_id > PGXCMirror_GetMirrorCount(node->data_node_id) || + node->mirror_id < 1) ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("Mirror Number %d is incorrect", node->mirror_id))); @@ -2144,6 +2146,7 @@ transformExecDirectStmt(ParseState *pstate, ExecDirectStmt *stmt) else { int nodenum = intVal(lfirst(nodeitem)); + if (nodenum < 1 || (!is_coordinator && nodenum > total_num_nodes) || (is_coordinator && nodenum > total_num_nodes)) @@ -2266,14 +2269,17 @@ transformExecDirectStmt(ParseState *pstate, ExecDirectStmt *stmt) * for a SELECT command pick up only one node in the subset * for a UTILITY command pick up the whole subset * If node number is mirror format dn_id/mirror_id, keep it as it is. + * For a Coordinator, just use the node number. */ - if (step->exec_direct_type == EXEC_DIRECT_LOCAL_UTILITY && + if (step->exec_direct_type == EXEC_DIRECT_UTILITY && IsPGXCMirrorMode && - !nodenum_defined) + !nodenum_defined && + !is_coordinator) step->exec_nodes->nodelist = PGXCMirror_GetSubsetMirrors(nodenum, true); else if (step->exec_direct_type == EXEC_DIRECT_SELECT && IsPGXCMirrorMode && - !nodenum_defined) + !nodenum_defined && + !is_coordinator) step->exec_nodes->nodelist = PGXCMirror_GetSubsetMirrors(nodenum, false); else if (nodenum_defined) /* Node Number where to run has already been calculated */ step->exec_nodes->nodelist = lappend_int(step->exec_nodes->nodelist, nodenum_real); ----------------------------------------------------------------------- Summary of changes: src/backend/commands/copy.c | 90 ++++++++++++++++++++++++++++++++++++ src/backend/parser/analyze.c | 16 ++++-- src/backend/pgxc/pool/execRemote.c | 53 +++++++++++++++++++++ src/include/pgxc/execRemote.h | 1 + 4 files changed, 155 insertions(+), 5 deletions(-) hooks/post-receive -- Postgres-XC |
From: Michael P. <mic...@us...> - 2011-02-28 01:32:23
|
Project "Postgres-XC". The branch, master has been updated via 51cabe99e820989af4634ef7113a294563e8820c (commit) from b260079109da14ed446f18eb008fe6b873a322f0 (commit) - Log ----------------------------------------------------------------- commit 51cabe99e820989af4634ef7113a294563e8820c Author: Michael P <mic...@us...> Date: Mon Feb 28 10:33:45 2011 +0900 Fix for bug 3151626: Support for COPY BINARY This implements support for BINARY format for both COPY TO and COPY FROM. For COPY TO, PG_HEADER part is only generated by Coordinator, and Datanodes only generate the data in itself. Datanodes were generating the whole data, including header and send it back to client directly. Patch has been written by xabc1000, with some editorialization by me. diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c index 2d0fb13..a6cb877 100644 --- a/src/backend/commands/copy.c +++ b/src/backend/commands/copy.c @@ -609,6 +609,13 @@ CopyGetData(CopyState cstate, void *databuf, int minread, int maxread) break; } } +#ifdef PGXC + /* A PGXC Datanode does not need to read the header data received from Coordinator */ + if (IS_PGXC_DATANODE && + cstate->binary && + cstate->fe_msgbuf->data[cstate->fe_msgbuf->len-1] == '\n') + cstate->fe_msgbuf->len--; +#endif avail = cstate->fe_msgbuf->len - cstate->fe_msgbuf->cursor; if (avail > maxread) avail = maxread; @@ -1551,6 +1558,10 @@ CopyTo(CopyState cstate) if (cstate->binary) { +#ifdef PGXC + if (IS_PGXC_COORDINATOR) + { +#endif /* Generate header for a binary copy */ int32 tmp; @@ -1564,6 +1575,12 @@ CopyTo(CopyState cstate) /* No header extension */ tmp = 0; CopySendInt32(cstate, tmp); + +#ifdef PGXC + /* Need to flush out the trailer */ + CopySendEndOfRow(cstate); + } +#endif } else { @@ -1646,7 +1663,15 @@ CopyTo(CopyState cstate) } #endif +#ifdef PGXC + /* + * In PGXC, it is not necessary for a datanode to generate + * the trailer as Coordinator is in charge of it + */ + if (cstate->binary && IS_PGXC_COORDINATOR) +#else if (cstate->binary) +#endif { /* Generate trailer for a binary copy */ CopySendInt16(cstate, -1); @@ -2099,6 +2124,31 @@ CopyFrom(CopyState cstate) (errcode(ERRCODE_BAD_COPY_FILE_FORMAT), errmsg("invalid COPY file header (wrong length)"))); } +#ifdef PGXC + if (IS_PGXC_COORDINATOR) + { + /* Empty buffer info and send header to all the backends involved in COPY */ + resetStringInfo(&cstate->line_buf); + + enlargeStringInfo(&cstate->line_buf, 19); + appendBinaryStringInfo(&cstate->line_buf, BinarySignature, 11); + tmp = 0; + + if (cstate->oids) + tmp |= (1 << 16); + tmp = htonl(tmp); + + appendBinaryStringInfo(&cstate->line_buf, &tmp, 4); + tmp = 0; + tmp = htonl(tmp); + appendBinaryStringInfo(&cstate->line_buf, &tmp, 4); + + if(DataNodeCopyInBinaryForAll(cstate->line_buf.data, 19, cstate->connections)) + ereport(ERROR, + (errcode(ERRCODE_BAD_COPY_FILE_FORMAT), + errmsg("invalid COPY file header (COPY SEND)"))); + } +#endif } if (file_has_oids && cstate->binary) @@ -2254,6 +2304,18 @@ CopyFrom(CopyState cstate) fld_count == -1) { done = true; +#ifdef PGXC + if (IS_PGXC_COORDINATOR) + { + /* Empty buffer */ + resetStringInfo(&cstate->line_buf); + + enlargeStringInfo(&cstate->line_buf, sizeof(uint16)); + /* Receive field count directly from datanodes */ + fld_count = htons(fld_count); + appendBinaryStringInfo(&cstate->line_buf, &fld_count, sizeof(uint16)); + } +#endif break; } @@ -2262,7 +2324,17 @@ CopyFrom(CopyState cstate) (errcode(ERRCODE_BAD_COPY_FILE_FORMAT), errmsg("row field count is %d, expected %d", (int) fld_count, attr_count))); +#ifdef PGXC + if (IS_PGXC_COORDINATOR) + { + /* Empty buffer */ + resetStringInfo(&cstate->line_buf); + enlargeStringInfo(&cstate->line_buf, sizeof(uint16)); + fld_count = htons(fld_count); + appendBinaryStringInfo(&cstate->line_buf, &fld_count, sizeof(uint16)); + } +#endif if (file_has_oids) { cstate->cur_attname = "oid"; @@ -3302,6 +3374,7 @@ CopyReadBinaryAttribute(CopyState cstate, bool *isnull) { int32 fld_size; + int32 nSize; Datum result; if (!CopyGetInt32(cstate, &fld_size)) @@ -3317,6 +3390,15 @@ CopyReadBinaryAttribute(CopyState cstate, ereport(ERROR, (errcode(ERRCODE_BAD_COPY_FILE_FORMAT), errmsg("invalid field size"))); +#ifdef PGXC + if (IS_PGXC_COORDINATOR) + { + /* Get the field size from Datanode */ + enlargeStringInfo(&cstate->line_buf, sizeof(int32)); + nSize = htonl(fld_size); + appendBinaryStringInfo(&cstate->line_buf, &nSize, sizeof(int32)); + } +#endif /* reset attribute_buf to empty, and load raw data in it */ resetStringInfo(&cstate->attribute_buf); @@ -3330,6 +3412,14 @@ CopyReadBinaryAttribute(CopyState cstate, cstate->attribute_buf.len = fld_size; cstate->attribute_buf.data[fld_size] = '\0'; +#ifdef PGXC + if (IS_PGXC_COORDINATOR) + { + /* Get binary message from Datanode */ + enlargeStringInfo(&cstate->line_buf, fld_size); + appendBinaryStringInfo(&cstate->line_buf, cstate->attribute_buf.data, fld_size); + } +#endif /* Call the column type's binary input converter */ result = ReceiveFunctionCall(flinfo, &cstate->attribute_buf, diff --git a/src/backend/pgxc/pool/execRemote.c b/src/backend/pgxc/pool/execRemote.c index 1d41a5c..dc1d68c 100644 --- a/src/backend/pgxc/pool/execRemote.c +++ b/src/backend/pgxc/pool/execRemote.c @@ -4570,3 +4570,56 @@ PGXCNodeGetNodeList(PGXC_NodeId **datanodes, if (!PersistentConnections) release_handles(); } + +/* + * DataNodeCopyInBinaryForAll + * + * In a COPY TO, send to all datanodes PG_HEADER for a COPY TO in binary mode. + */ +int DataNodeCopyInBinaryForAll(char *msg_buf, int len, PGXCNodeHandle** copy_connections) +{ + int i; + int conn_count = 0; + PGXCNodeHandle *connections[NumDataNodes]; + int msgLen = 4 + len + 1; + int nLen = htonl(msgLen); + + for (i = 0; i < NumDataNodes; i++) + { + PGXCNodeHandle *handle = copy_connections[i]; + + if (!handle) + continue; + + connections[conn_count++] = handle; + } + + for (i = 0; i < conn_count; i++) + { + PGXCNodeHandle *handle = connections[i]; + if (handle->state == DN_CONNECTION_STATE_COPY_IN) + { + /* msgType + msgLen */ + if (ensure_out_buffer_capacity(handle->outEnd + 1 + msgLen, handle) != 0) + { + ereport(ERROR, + (errcode(ERRCODE_OUT_OF_MEMORY), + errmsg("out of memory"))); + } + + handle->outBuffer[handle->outEnd++] = 'd'; + memcpy(handle->outBuffer + handle->outEnd, &nLen, 4); + handle->outEnd += 4; + memcpy(handle->outBuffer + handle->outEnd, msg_buf, len); + handle->outEnd += len; + handle->outBuffer[handle->outEnd++] = '\n'; + } + else + { + add_error_message(handle, "Invalid data node connection"); + return EOF; + } + } + + return 0; +} diff --git a/src/include/pgxc/execRemote.h b/src/include/pgxc/execRemote.h index aaa12a1..23ea4a8 100644 --- a/src/include/pgxc/execRemote.h +++ b/src/include/pgxc/execRemote.h @@ -139,6 +139,7 @@ extern PGXCNodeHandle** DataNodeCopyBegin(const char *query, List *nodelist, Sna extern int DataNodeCopyIn(char *data_row, int len, ExecNodes *exec_nodes, PGXCNodeHandle** copy_connections); extern uint64 DataNodeCopyOut(ExecNodes *exec_nodes, PGXCNodeHandle** copy_connections, FILE* copy_file); extern void DataNodeCopyFinish(PGXCNodeHandle** copy_connections, int primary_data_node, CombineType combine_type); +extern int DataNodeCopyInBinaryForAll(char *msg_buf, int len, PGXCNodeHandle** copy_connections); extern int ExecCountSlotsRemoteQuery(RemoteQuery *node); extern RemoteQueryState *ExecInitRemoteQuery(RemoteQuery *node, EState *estate, int eflags); ----------------------------------------------------------------------- Summary of changes: src/backend/commands/copy.c | 90 ++++++++++++++++++++++++++++++++++++ src/backend/pgxc/pool/execRemote.c | 53 +++++++++++++++++++++ src/include/pgxc/execRemote.h | 1 + 3 files changed, 144 insertions(+), 0 deletions(-) hooks/post-receive -- Postgres-XC |