<programlisting>
CREATE TABLE cities (
- name varchar(80) primary key,
+ name varchar(80) PRIMARY KEY,
location point
);
CREATE TABLE weather (
- city varchar(80) references cities(name),
+ city varchar(80) REFERENCES cities (name),
temp_lo int,
temp_hi int,
prcp real,
join sequence. The planner preferentially considers joins between any
two relations for which there exists a corresponding join clause in the
<literal>WHERE</literal> qualification (i.e., for
- which a restriction like <literal>where rel1.attr1=rel2.attr2</literal>
+ which a restriction like <literal>WHERE rel1.attr1 = rel2.attr2</literal>
exists). Join pairs with no join clause are considered only when there
is no other choice, that is, a particular relation has no available
join clauses to any other relation. All possible plans are generated for
<programlisting>
=# CREATE TABLE tbloom AS
SELECT
- (random() * 1000000)::int as i1,
- (random() * 1000000)::int as i2,
- (random() * 1000000)::int as i3,
- (random() * 1000000)::int as i4,
- (random() * 1000000)::int as i5,
- (random() * 1000000)::int as i6
+ (random() * 1000000)::int AS i1,
+ (random() * 1000000)::int AS i2,
+ (random() * 1000000)::int AS i3,
+ (random() * 1000000)::int AS i4,
+ (random() * 1000000)::int AS i5,
+ (random() * 1000000)::int AS i6
FROM
generate_series(1,10000000);
SELECT 10000000
<screen>
CREATE COLLATION mycollation5 (provider = icu, deterministic = false, locale = 'en-US-u-kn-ks-level2');
-SELECT 'aB' = 'Ab' COLLATE mycollation5 as result;
+SELECT 'aB' = 'Ab' COLLATE mycollation5 AS result;
result
--------
t
(1 row)
-SELECT 'N-45' < 'N-123' COLLATE mycollation5 as result;
+SELECT 'N-45' < 'N-123' COLLATE mycollation5 AS result;
result
--------
t
<programlisting>
CREATE TABLE parent(key integer, ...);
-CREATE TABLE child1000(check (key between 1000 and 1999)) INHERITS(parent);
-CREATE TABLE child2000(check (key between 2000 and 2999)) INHERITS(parent);
+CREATE TABLE child1000(CHECK (key BETWEEN 1000 AND 1999)) INHERITS(parent);
+CREATE TABLE child2000(CHECK (key BETWEEN 2000 AND 2999)) INHERITS(parent);
...
SELECT * FROM parent WHERE key = 2400;
</programlisting>
For example, the nearest neighbor of the 3-D point (0.5, 0.5, 0.5)
could be found efficiently with:
<programlisting>
-SELECT c FROM test ORDER BY c <-> cube(array[0.5,0.5,0.5]) LIMIT 1;
+SELECT c FROM test ORDER BY c <-> cube(ARRAY[0.5, 0.5, 0.5]) LIMIT 1;
</programlisting>
</para>
This union:
</para>
<programlisting>
-select cube_union('(0,5,2),(2,3,1)', '0');
+SELECT cube_union('(0,5,2),(2,3,1)', '0');
cube_union
-------------------
(0, 0, 0),(2, 5, 2)
</para>
<programlisting>
-select cube_inter('(0,-1),(1,1)', '(-2),(2)');
+SELECT cube_inter('(0,-1),(1,1)', '(-2),(2)');
cube_inter
-------------
(0, 0),(1, 0)
</para>
<programlisting>
-select cube_contains('(0,0),(1,1)', '0.5,0.5');
+SELECT cube_contains('(0,0),(1,1)', '0.5,0.5');
cube_contains
--------------
t
SELECT x,
round(x::numeric) AS num_round,
round(x::double precision) AS dbl_round
-FROM generate_series(-3.5, 3.5, 1) as x;
+FROM generate_series(-3.5, 3.5, 1) AS x;
x | num_round | dbl_round
------+-----------+-----------
-3.5 | -4 | -4
semantically insignificant and disregarded when comparing two values
of type <type>character</type>. In collations where whitespace
is significant, this behavior can produce unexpected results;
- for example <command>SELECT 'a '::CHAR(2) collate "C" <
+ for example <command>SELECT 'a '::CHAR(2) COLLATE "C" <
E'a\n'::CHAR(2)</command> returns true, even though <literal>C</literal>
locale would consider a space to be greater than a newline.
Trailing spaces are removed when converting a <type>character</type> value
definition when you need it: do the arithmetic in time
zone <literal>UTC+12</literal>. For example,
<programlisting>
-=> SELECT extract(julian from '2021-06-23 7:00:00-04'::timestamptz at time zone 'UTC+12');
+=> SELECT extract(julian FROM '2021-06-23 7:00:00-04'::timestamptz AT TIME ZONE 'UTC+12');
extract
------------------------------
2459388.95833333333333333333
(1 row)
-=> SELECT extract(julian from '2021-06-23 8:00:00-04'::timestamptz at time zone 'UTC+12');
+=> SELECT extract(julian FROM '2021-06-23 8:00:00-04'::timestamptz AT TIME ZONE 'UTC+12');
extract
--------------------------------------
2459389.0000000000000000000000000000
(1 row)
-=> SELECT extract(julian from date '2021-06-23');
+=> SELECT extract(julian FROM date '2021-06-23');
extract
---------
2459389
<listitem>
<para>
The SQL query that you wish to execute in the remote database,
- for example <literal>select * from foo</literal>.
+ for example <literal>SELECT * FROM foo</literal>.
</para>
</listitem>
</varlistentry>
<programlisting>
SELECT *
FROM dblink('dbname=mydb options=-csearch_path=',
- 'select proname, prosrc from pg_proc')
+ 'SELECT proname, prosrc FROM pg_proc')
AS t1(proname name, prosrc text)
WHERE proname LIKE 'bytea%';
</programlisting>
CREATE VIEW myremote_pg_proc AS
SELECT *
FROM dblink('dbname=postgres options=-csearch_path=',
- 'select proname, prosrc from pg_proc')
+ 'SELECT proname, prosrc FROM pg_proc')
AS t1(proname name, prosrc text);
SELECT * FROM myremote_pg_proc WHERE proname LIKE 'bytea%';
<screen>
SELECT * FROM dblink('dbname=postgres options=-csearch_path=',
- 'select proname, prosrc from pg_proc')
+ 'SELECT proname, prosrc FROM pg_proc')
AS t1(proname name, prosrc text) WHERE proname LIKE 'bytea%';
proname | prosrc
------------+------------
OK
(1 row)
-SELECT * FROM dblink('select proname, prosrc from pg_proc')
+SELECT * FROM dblink('SELECT proname, prosrc FROM pg_proc')
AS t1(proname name, prosrc text) WHERE proname LIKE 'bytea%';
proname | prosrc
------------+------------
OK
(1 row)
-SELECT * FROM dblink('myconn', 'select proname, prosrc from pg_proc')
+SELECT * FROM dblink('myconn', 'SELECT proname, prosrc FROM pg_proc')
AS t1(proname name, prosrc text) WHERE proname LIKE 'bytea%';
proname | prosrc
------------+------------
<para>
The SQL command that you wish to execute in the remote database,
for example
- <literal>insert into foo values(0, 'a', '{"a0","b0","c0"}')</literal>.
+ <literal>INSERT INTO foo VALUES (0, 'a', '{"a0","b0","c0"}')</literal>.
</para>
</listitem>
</varlistentry>
<listitem>
<para>
The <command>SELECT</command> statement that you wish to execute in the remote
- database, for example <literal>select * from pg_class</literal>.
+ database, for example <literal>SELECT * FROM pg_class</literal>.
</para>
</listitem>
</varlistentry>
OK
(1 row)
-SELECT dblink_open('foo', 'select proname, prosrc from pg_proc');
+SELECT dblink_open('foo', 'SELECT proname, prosrc FROM pg_proc');
dblink_open
-------------
OK
OK
(1 row)
-SELECT dblink_open('foo', 'select proname, prosrc from pg_proc where proname like ''bytea%''');
+SELECT dblink_open('foo', 'SELECT proname, prosrc FROM pg_proc WHERE proname LIKE ''bytea%''');
dblink_open
-------------
OK
OK
(1 row)
-SELECT dblink_open('foo', 'select proname, prosrc from pg_proc');
+SELECT dblink_open('foo', 'SELECT proname, prosrc FROM pg_proc');
dblink_open
-------------
OK
<listitem>
<para>
The SQL statement that you wish to execute in the remote database,
- for example <literal>select * from pg_class</literal>.
+ for example <literal>SELECT * FROM pg_class</literal>.
</para>
</listitem>
</varlistentry>
(1 row)
contrib_regression=# SELECT * FROM
-contrib_regression-# dblink_send_query('dtest1', 'select * from foo where f1 < 3') AS t1;
+contrib_regression-# dblink_send_query('dtest1', 'SELECT * FROM foo WHERE f1 < 3') AS t1;
t1
----
1
(0 rows)
contrib_regression=# SELECT * FROM
-contrib_regression-# dblink_send_query('dtest1', 'select * from foo where f1 < 3; select * from foo where f1 > 6') AS t1;
+contrib_regression-# dblink_send_query('dtest1', 'SELECT * FROM foo WHERE f1 < 3; SELECT * FROM foo WHERE f1 > 6') AS t1;
t1
----
1
<programlisting>
-- admin can view all rows and fields
-postgres=> set role admin;
+postgres=> SET ROLE admin;
SET
-postgres=> table passwd;
+postgres=> TABLE passwd;
user_name | pwhash | uid | gid | real_name | home_phone | extra_info | home_dir | shell
-----------+--------+-----+-----+-----------+--------------+------------+-------------+-----------
admin | xxx | 0 | 0 | Admin | 111-222-3333 | | /root | /bin/dash
(3 rows)
-- Test what Alice is able to do
-postgres=> set role alice;
+postgres=> SET ROLE alice;
SET
-postgres=> table passwd;
+postgres=> TABLE passwd;
ERROR: permission denied for table passwd
-postgres=> select user_name,real_name,home_phone,extra_info,home_dir,shell from passwd;
+postgres=> SELECT user_name, real_name, home_phone, extra_info, home_dir, shell FROM passwd;
user_name | real_name | home_phone | extra_info | home_dir | shell
-----------+-----------+--------------+------------+-------------+-----------
admin | Admin | 111-222-3333 | | /root | /bin/dash
alice | Alice | 098-765-4321 | | /home/alice | /bin/zsh
(3 rows)
-postgres=> update passwd set user_name = 'joe';
+postgres=> UPDATE passwd SET user_name = 'joe';
ERROR: permission denied for table passwd
-- Alice is allowed to change her own real_name, but no others
-postgres=> update passwd set real_name = 'Alice Doe';
+postgres=> UPDATE passwd SET real_name = 'Alice Doe';
UPDATE 1
-postgres=> update passwd set real_name = 'John Doe' where user_name = 'admin';
+postgres=> UPDATE passwd SET real_name = 'John Doe' WHERE user_name = 'admin';
UPDATE 0
-postgres=> update passwd set shell = '/bin/xx';
+postgres=> UPDATE passwd SET shell = '/bin/xx';
ERROR: new row violates WITH CHECK OPTION for "passwd"
-postgres=> delete from passwd;
+postgres=> DELETE FROM passwd;
ERROR: permission denied for table passwd
-postgres=> insert into passwd (user_name) values ('xxx');
+postgres=> INSERT INTO passwd (user_name) VALUES ('xxx');
ERROR: permission denied for table passwd
-- Alice can change her own password; RLS silently prevents updating other rows
-postgres=> update passwd set pwhash = 'abc';
+postgres=> UPDATE passwd SET pwhash = 'abc';
UPDATE 1
</programlisting>
admin
(1 row)
-=> select inet_client_addr();
+=> SELECT inet_client_addr();
inet_client_addr
------------------
127.0.0.1
-----------+--------+-----+-----+-----------+------------+------------+----------+-------
(0 rows)
-=> UPDATE passwd set pwhash = NULL;
+=> UPDATE passwd SET pwhash = NULL;
UPDATE 0
</programlisting>
To test the dictionary, you can try
<programlisting>
-mydb# select ts_lexize('intdict', '12345678');
+mydb# SELECT ts_lexize('intdict', '12345678');
ts_lexize
-----------
{123456}
column to provide unique identifiers, <literal>RETURNING</literal> can return
the ID assigned to a new row:
<programlisting>
-CREATE TABLE users (firstname text, lastname text, id serial primary key);
+CREATE TABLE users (firstname text, lastname text, id serial PRIMARY KEY);
INSERT INTO users (firstname, lastname) VALUES ('Joe', 'Cool') RETURNING id;
</programlisting>
EXEC SQL BEGIN DECLARE SECTION;
char dbaname[128];
char datname[128];
-char *stmt = "SELECT u.usename as dbaname, d.datname "
+char *stmt = "SELECT u.usename AS dbaname, d.datname "
" FROM pg_database d, pg_user u "
" WHERE d.datdba = u.usesysid";
EXEC SQL END DECLARE SECTION;
EXEC SQL CONNECT TO :db AS :id;
EXEC SQL CONNECT TO :db USER connectuser USING :pw;
EXEC SQL CONNECT TO @localhost AS main USER connectdb;
-EXEC SQL CONNECT TO REGRESSDB1 as main;
+EXEC SQL CONNECT TO REGRESSDB1 AS main;
EXEC SQL CONNECT TO AS main USER connectdb;
EXEC SQL CONNECT TO connectdb AS :id;
EXEC SQL CONNECT TO connectdb AS main USER connectuser/connectdb;
---
DECLARE
table_oid oid := pg_event_trigger_table_rewrite_oid();
- current_hour integer := extract('hour' from current_time);
+ current_hour integer := extract('hour' FROM current_time);
pages integer;
max_pages integer := 100;
BEGIN
in a query's FROM clause; see <xref linkend="queries-tablefunctions"/>.
</para>
<para>
- <literal>select * from unnest(ARRAY[1,2], ARRAY['foo','bar','baz']) as x(a,b)</literal>
+ <literal>SELECT * FROM unnest(ARRAY[1, 2], ARRAY['foo', 'bar', 'baz']) AS x(a, b)</literal>
<returnvalue></returnvalue>
<programlisting>
a | b
of <parameter>newsubstring</parameter>.
</para>
<para>
- <literal>overlay('\x1234567890'::bytea placing '\002\003'::bytea from 2 for 3)</literal>
+ <literal>overlay('\x1234567890'::bytea PLACING '\002\003'::bytea FROM 2 FOR 3)</literal>
<returnvalue>\x12020390</returnvalue>
</para></entry>
</row>
<parameter>bytes</parameter>, or zero if it's not present.
</para>
<para>
- <literal>position('\x5678'::bytea in '\x1234567890'::bytea)</literal>
+ <literal>position('\x5678'::bytea IN '\x1234567890'::bytea)</literal>
<returnvalue>3</returnvalue>
</para></entry>
</row>
and <parameter>count</parameter>.
</para>
<para>
- <literal>substring('\x1234567890'::bytea from 3 for 2)</literal>
+ <literal>substring('\x1234567890'::bytea FROM 3 FOR 2)</literal>
<returnvalue>\x5678</returnvalue>
</para></entry>
</row>
significant byte first. Some examples:
<programlisting>
1234::smallint::bytea <lineannotation>\x04d2</lineannotation>
-cast(1234 as bytea) <lineannotation>\x000004d2</lineannotation>
-cast(-1234 as bytea) <lineannotation>\xfffffb2e</lineannotation>
+cast(1234 AS bytea) <lineannotation>\x000004d2</lineannotation>
+cast(-1234 AS bytea) <lineannotation>\xfffffb2e</lineannotation>
'\x8000'::bytea::smallint <lineannotation>-32768</lineannotation>
'\x8000'::bytea::integer <lineannotation>32768</lineannotation>
</programlisting>
of <parameter>newsubstring</parameter>.
</para>
<para>
- <literal>overlay(B'01010101010101010' placing B'11111' from 2 for 3)</literal>
+ <literal>overlay(B'01010101010101010' PLACING B'11111' FROM 2 FOR 3)</literal>
<returnvalue>0111110101010101010</returnvalue>
</para></entry>
</row>
within <parameter>bits</parameter>, or zero if it's not present.
</para>
<para>
- <literal>position(B'010' in B'000001101011')</literal>
+ <literal>position(B'010' IN B'000001101011')</literal>
<returnvalue>8</returnvalue>
</para></entry>
</row>
and <parameter>count</parameter>.
</para>
<para>
- <literal>substring(B'110010111111' from 3 for 2)</literal>
+ <literal>substring(B'110010111111' FROM 3 FOR 2)</literal>
<returnvalue>00</returnvalue>
</para></entry>
</row>
<programlisting>
44::bit(10) <lineannotation>0000101100</lineannotation>
44::bit(3) <lineannotation>100</lineannotation>
-cast(-44 as bit(12)) <lineannotation>111111010100</lineannotation>
+cast(-44 AS bit(12)) <lineannotation>111111010100</lineannotation>
'1110'::bit(4)::integer <lineannotation>14</lineannotation>
</programlisting>
Note that casting to just <quote>bit</quote> means casting to
<indexterm>
<primary>extract</primary>
</indexterm>
- <function>extract</function> ( <parameter>field</parameter> <literal>from</literal> <type>timestamp</type> )
+ <function>extract</function> ( <parameter>field</parameter> <literal>FROM</literal> <type>timestamp</type> )
<returnvalue>numeric</returnvalue>
</para>
<para>
Get timestamp subfield; see <xref linkend="functions-datetime-extract"/>
</para>
<para>
- <literal>extract(hour from timestamp '2001-02-16 20:38:40')</literal>
+ <literal>extract(hour FROM timestamp '2001-02-16 20:38:40')</literal>
<returnvalue>20</returnvalue>
</para></entry>
</row>
<row>
<entry role="func_table_entry"><para role="func_signature">
- <function>extract</function> ( <parameter>field</parameter> <literal>from</literal> <type>interval</type> )
+ <function>extract</function> ( <parameter>field</parameter> <literal>FROM</literal> <type>interval</type> )
<returnvalue>numeric</returnvalue>
</para>
<para>
Get interval subfield; see <xref linkend="functions-datetime-extract"/>
</para>
<para>
- <literal>extract(month from interval '2 years 3 months')</literal>
+ <literal>extract(month FROM interval '2 years 3 months')</literal>
<returnvalue>3</returnvalue>
</para></entry>
</row>
value is in the named time zone.
</para>
<para>
- <literal>timestamp '2001-02-16 20:38:40' at time zone 'America/Denver'</literal>
+ <literal>timestamp '2001-02-16 20:38:40' AT TIME ZONE 'America/Denver'</literal>
<returnvalue>2001-02-17 03:38:40+00</returnvalue>
</para></entry>
</row>
appear in that zone.
</para>
<para>
- <literal>timestamp with time zone '2001-02-16 20:38:40-05' at time zone 'America/Denver'</literal>
+ <literal>timestamp with time zone '2001-02-16 20:38:40-05' AT TIME ZONE 'America/Denver'</literal>
<returnvalue>2001-02-16 18:38:40</returnvalue>
</para></entry>
</row>
offset for the named destination zone.
</para>
<para>
- <literal>time with time zone '05:34:17-05' at time zone 'UTC'</literal>
+ <literal>time with time zone '05:34:17-05' AT TIME ZONE 'UTC'</literal>
<returnvalue>10:34:17+00</returnvalue>
</para></entry>
</row>
<listitem>
<para>
<function>to_char(..., 'ID')</function>'s day of the week numbering
- matches the <function>extract(isodow from ...)</function> function, but
+ matches the <function>extract(isodow FROM ...)</function> function, but
<function>to_char(..., 'D')</function>'s does not match
- <function>extract(dow from ...)</function>'s day numbering.
+ <function>extract(dow FROM ...)</function>'s day numbering.
</para>
</listitem>
collatable data type, then an error is raised.
</para>
<para>
- <literal>collation for ('foo'::text)</literal>
+ <literal>COLLATION FOR ('foo'::text)</literal>
<returnvalue>"default"</returnvalue>
</para>
<para>
- <literal>collation for ('foo' COLLATE "de_DE")</literal>
+ <literal>COLLATION FOR ('foo' COLLATE "de_DE")</literal>
<returnvalue>"de_DE"</returnvalue>
</para></entry>
</row>
Expands the top-level JSON array into a set of JSON values.
</para>
<para>
- <literal>select * from json_array_elements('[1,true, [2,false]]')</literal>
+ <literal>SELECT * FROM json_array_elements('[1,true, [2,false]]')</literal>
<returnvalue></returnvalue>
<programlisting>
value
Expands the top-level JSON array into a set of <type>text</type> values.
</para>
<para>
- <literal>select * from json_array_elements_text('["foo", "bar"]')</literal>
+ <literal>SELECT * FROM json_array_elements_text('["foo", "bar"]')</literal>
<returnvalue></returnvalue>
<programlisting>
value
Expands the top-level JSON object into a set of key/value pairs.
</para>
<para>
- <literal>select * from json_each('{"a":"foo", "b":"bar"}')</literal>
+ <literal>SELECT * FROM json_each('{"a":"foo", "b":"bar"}')</literal>
<returnvalue></returnvalue>
<programlisting>
key | value
type <type>text</type>.
</para>
<para>
- <literal>select * from json_each_text('{"a":"foo", "b":"bar"}')</literal>
+ <literal>SELECT * FROM json_each_text('{"a":"foo", "b":"bar"}')</literal>
<returnvalue></returnvalue>
<programlisting>
key | value
Returns the set of keys in the top-level JSON object.
</para>
<para>
- <literal>select * from json_object_keys('{"f1":"abc","f2":{"f3":"a", "f4":"b"}}')</literal>
+ <literal>SELECT * FROM json_object_keys('{"f1":"abc","f2":{"f3":"a", "f4":"b"}}')</literal>
<returnvalue></returnvalue>
<programlisting>
json_object_keys
calls.
</para>
<para>
- <literal>create type subrowtype as (d int, e text);</literal>
- <literal>create type myrowtype as (a int, b text[], c subrowtype);</literal>
+ <literal>CREATE TYPE subrowtype AS (d int, e text);</literal>
+ <literal>CREATE TYPE myrowtype AS (a int, b text[], c subrowtype);</literal>
</para>
<para>
- <literal>select * from json_populate_record(null::myrowtype,
+ <literal>SELECT * FROM json_populate_record(NULL::myrowtype,
'{"a": 1, "b": ["2", "a b"], "c": {"d": 4, "e": "a b c"}, "x": "foo"}')</literal>
<returnvalue></returnvalue>
<programlisting>
valid input, <literal>false</literal> otherwise.
</para>
<para>
- <literal>create type jsb_char2 as (a char(2));</literal>
+ <literal>CREATE TYPE jsb_char2 AS (a char(2));</literal>
</para>
<para>
- <literal>select jsonb_populate_record_valid(NULL::jsb_char2, '{"a": "aaa"}');</literal>
+ <literal>SELECT jsonb_populate_record_valid(NULL::jsb_char2, '{"a": "aaa"}');</literal>
<returnvalue></returnvalue>
<programlisting>
jsonb_populate_record_valid
(1 row)
</programlisting>
- <literal>select * from jsonb_populate_record(NULL::jsb_char2, '{"a": "aaa"}') q;</literal>
+ <literal>SELECT * FROM jsonb_populate_record(NULL::jsb_char2, '{"a": "aaa"}') q;</literal>
<returnvalue></returnvalue>
<programlisting>
ERROR: value too long for type character(2)
</programlisting>
- <literal>select jsonb_populate_record_valid(NULL::jsb_char2, '{"a": "aa"}');</literal>
+ <literal>SELECT jsonb_populate_record_valid(NULL::jsb_char2, '{"a": "aa"}');</literal>
<returnvalue></returnvalue>
<programlisting>
jsonb_populate_record_valid
(1 row)
</programlisting>
- <literal>select * from jsonb_populate_record(NULL::jsb_char2, '{"a": "aa"}') q;</literal>
+ <literal>SELECT * FROM jsonb_populate_record(NULL::jsb_char2, '{"a": "aa"}') q;</literal>
<returnvalue></returnvalue>
<programlisting>
a
for <function>json[b]_populate_record</function>.
</para>
<para>
- <literal>create type twoints as (a int, b int);</literal>
+ <literal>CREATE TYPE twoints AS (a int, b int);</literal>
</para>
<para>
- <literal>select * from json_populate_recordset(null::twoints, '[{"a":1,"b":2}, {"a":3,"b":4}]')</literal>
+ <literal>SELECT * FROM json_populate_recordset(NULL::twoints, '[{"a":1,"b":2}, {"a":3,"b":4}]')</literal>
<returnvalue></returnvalue>
<programlisting>
a | b
input record value, unmatched columns are always filled with nulls.
</para>
<para>
- <literal>create type myrowtype as (a int, b text);</literal>
+ <literal>CREATE TYPE myrowtype AS (a int, b text);</literal>
</para>
<para>
- <literal>select * from json_to_record('{"a":1,"b":[1,2,3],"c":[1,2,3],"e":"bar","r": {"a": 123, "b": "a b c"}}') as x(a int, b text, c int[], d text, r myrowtype)</literal>
+ <literal>SELECT * FROM json_to_record('{"a":1,"b":[1,2,3],"c":[1,2,3],"e":"bar","r": {"a": 123, "b": "a b c"}}') AS x(a int, b text, c int[], d text, r myrowtype)</literal>
<returnvalue></returnvalue>
<programlisting>
a | b | c | d | r
for <function>json[b]_populate_record</function>.
</para>
<para>
- <literal>select * from json_to_recordset('[{"a":1,"b":"foo"}, {"a":"2","c":"bar"}]') as x(a int, b text)</literal>
+ <literal>SELECT * FROM json_to_recordset('[{"a":1,"b":"foo"}, {"a":"2","c":"bar"}]') AS x(a int, b text)</literal>
<returnvalue></returnvalue>
<programlisting>
a | b
for <function>jsonb_path_exists</function>.
</para>
<para>
- <literal>select * from jsonb_path_query('{"a":[1,2,3,4,5]}', '$.a[*] ? (@ >= $min && @ <= $max)', '{"min":2, "max":4}')</literal>
+ <literal>SELECT * FROM jsonb_path_query('{"a":[1,2,3,4,5]}', '$.a[*] ? (@ >= $min && @ <= $max)', '{"min":2, "max":4}')</literal>
<returnvalue></returnvalue>
<programlisting>
jsonb_path_query
<literal>.<replaceable>key</replaceable></literal> accessor
operator to descend through surrounding JSON objects, for example:
<screen>
-<prompt>=></prompt> <userinput>select jsonb_path_query(:'json', '$.track.segments');</userinput>
+<prompt>=></prompt> <userinput>SELECT jsonb_path_query(:'json', '$.track.segments');</userinput>
jsonb_path_query
-----------------------------------------------------------&zwsp;-----------------------------------------------------------&zwsp;---------------------------------------------
[{"HR": 73, "location": [47.763, 13.4034], "start time": "2018-10-14 10:05:14"}, {"HR": 135, "location": [47.706, 13.2635], "start time": "2018-10-14 10:39:21"}]
The following example will return the location coordinates for all
the available track segments:
<screen>
-<prompt>=></prompt> <userinput>select jsonb_path_query(:'json', '$.track.segments[*].location');</userinput>
+<prompt>=></prompt> <userinput>SELECT jsonb_path_query(:'json', '$.track.segments[*].location');</userinput>
jsonb_path_query
-------------------
[47.763, 13.4034]
specify the corresponding subscript in the <literal>[]</literal>
accessor operator. Recall that JSON array indexes are 0-relative:
<screen>
-<prompt>=></prompt> <userinput>select jsonb_path_query(:'json', '$.track.segments[0].location');</userinput>
+<prompt>=></prompt> <userinput>SELECT jsonb_path_query(:'json', '$.track.segments[0].location');</userinput>
jsonb_path_query
-------------------
[47.763, 13.4034]
Each method name must be preceded by a dot. For example,
you can get the size of an array:
<screen>
-<prompt>=></prompt> <userinput>select jsonb_path_query(:'json', '$.track.segments.size()');</userinput>
+<prompt>=></prompt> <userinput>SELECT jsonb_path_query(:'json', '$.track.segments.size()');</userinput>
jsonb_path_query
------------------
2
produce <literal>true</literal>, <literal>false</literal>,
or <literal>unknown</literal>. The <literal>unknown</literal> value
plays the same role as SQL <literal>NULL</literal> and can be tested
- for with the <literal>is unknown</literal> predicate. Further path
+ for with the <literal>IS UNKNOWN</literal> predicate. Further path
evaluation steps use only those items for which the filter expression
returned <literal>true</literal>.
</para>
For example, suppose you would like to retrieve all heart rate values higher
than 130. You can achieve this as follows:
<screen>
-<prompt>=></prompt> <userinput>select jsonb_path_query(:'json', '$.track.segments[*].HR ? (@ > 130)');</userinput>
+<prompt>=></prompt> <userinput>SELECT jsonb_path_query(:'json', '$.track.segments[*].HR ? (@ > 130)');</userinput>
jsonb_path_query
------------------
135
filter expression is applied to the previous step, and the path used
in the condition is different:
<screen>
-<prompt>=></prompt> <userinput>select jsonb_path_query(:'json', '$.track.segments[*] ? (@.HR > 130)."start time"');</userinput>
+<prompt>=></prompt> <userinput>SELECT jsonb_path_query(:'json', '$.track.segments[*] ? (@.HR > 130)."start time"');</userinput>
jsonb_path_query
-----------------------
"2018-10-14 10:39:21"
The following example selects start times of all segments that
contain locations with relevant coordinates and high heart rate values:
<screen>
-<prompt>=></prompt> <userinput>select jsonb_path_query(:'json', '$.track.segments[*] ? (@.location[1] < 13.4) ? (@.HR > 130)."start time"');</userinput>
+<prompt>=></prompt> <userinput>SELECT jsonb_path_query(:'json', '$.track.segments[*] ? (@.location[1] < 13.4) ? (@.HR > 130)."start time"');</userinput>
jsonb_path_query
-----------------------
"2018-10-14 10:39:21"
The following example first filters all segments by location, and then
returns high heart rate values for these segments, if available:
<screen>
-<prompt>=></prompt> <userinput>select jsonb_path_query(:'json', '$.track.segments[*] ? (@.location[1] < 13.4).HR ? (@ > 130)');</userinput>
+<prompt>=></prompt> <userinput>SELECT jsonb_path_query(:'json', '$.track.segments[*] ? (@.location[1] < 13.4).HR ? (@ > 130)');</userinput>
jsonb_path_query
------------------
135
This example returns the size of the track if it contains any
segments with high heart rate values, or an empty sequence otherwise:
<screen>
-<prompt>=></prompt> <userinput>select jsonb_path_query(:'json', '$.track ? (exists(@.segments[*] ? (@.HR > 130))).segments.size()');</userinput>
+<prompt>=></prompt> <userinput>SELECT jsonb_path_query(:'json', '$.track ? (exists(@.segments[*] ? (@.HR > 130))).segments.size()');</userinput>
jsonb_path_query
------------------
2
<literal>false</literal>, or <literal>null</literal>.
For example, we could write this SQL-standard filter expression:
<screen>
-<prompt>=></prompt> <userinput>select jsonb_path_query(:'json', '$.track.segments ?(@[*].HR > 130)');</userinput>
+<prompt>=></prompt> <userinput>SELECT jsonb_path_query(:'json', '$.track.segments ?(@[*].HR > 130)');</userinput>
jsonb_path_query
-----------------------------------------------------------&zwsp;----------------------
{"HR": 135, "location": [47.706, 13.2635], "start time": "2018-10-14 10:39:21"}
The similar predicate check expression simply
returns <literal>true</literal>, indicating that a match exists:
<screen>
-<prompt>=></prompt> <userinput>select jsonb_path_query(:'json', '$.track.segments[*].HR > 130');</userinput>
+<prompt>=></prompt> <userinput>SELECT jsonb_path_query(:'json', '$.track.segments[*].HR > 130');</userinput>
jsonb_path_query
------------------
true
abstract from the fact that it stores an array of segments
when using lax mode:
<screen>
-<prompt>=></prompt> <userinput>select jsonb_path_query(:'json', 'lax $.track.segments.location');</userinput>
+<prompt>=></prompt> <userinput>SELECT jsonb_path_query(:'json', 'lax $.track.segments.location');</userinput>
jsonb_path_query
-------------------
[47.763, 13.4034]
the queried JSON document, so using this path
expression will cause an error:
<screen>
-<prompt>=></prompt> <userinput>select jsonb_path_query(:'json', 'strict $.track.segments.location');</userinput>
+<prompt>=></prompt> <userinput>SELECT jsonb_path_query(:'json', 'strict $.track.segments.location');</userinput>
ERROR: jsonpath member accessor can only be applied to an object
</screen>
To get the same result as in lax mode, you have to explicitly unwrap the
<literal>segments</literal> array:
<screen>
-<prompt>=></prompt> <userinput>select jsonb_path_query(:'json', 'strict $.track.segments[*].location');</userinput>
+<prompt>=></prompt> <userinput>SELECT jsonb_path_query(:'json', 'strict $.track.segments[*].location');</userinput>
jsonb_path_query
-------------------
[47.763, 13.4034]
instance, the following query using the <literal>.**</literal> accessor
selects every <literal>HR</literal> value twice:
<screen>
-<prompt>=></prompt> <userinput>select jsonb_path_query(:'json', 'lax $.**.HR');</userinput>
+<prompt>=></prompt> <userinput>SELECT jsonb_path_query(:'json', 'lax $.**.HR');</userinput>
jsonb_path_query
------------------
73
the <literal>.**</literal> accessor only in strict mode. The
following query selects each <literal>HR</literal> value just once:
<screen>
-<prompt>=></prompt> <userinput>select jsonb_path_query(:'json', 'strict $.**.HR');</userinput>
+<prompt>=></prompt> <userinput>SELECT jsonb_path_query(:'json', 'strict $.**.HR');</userinput>
jsonb_path_query
------------------
73
The unwrapping of arrays can also lead to unexpected results. Consider this
example, which selects all the <literal>location</literal> arrays:
<screen>
-<prompt>=></prompt> <userinput>select jsonb_path_query(:'json', 'lax $.track.segments[*].location');</userinput>
+<prompt>=></prompt> <userinput>SELECT jsonb_path_query(:'json', 'lax $.track.segments[*].location');</userinput>
jsonb_path_query
-------------------
[47.763, 13.4034]
causes the arrays to be unwrapped to evaluate each item, returning only the
items that match the expression:
<screen>
-<prompt>=></prompt> <userinput>select jsonb_path_query(:'json', 'lax $.track.segments[*].location ?(@[*] > 15)');</userinput>
+<prompt>=></prompt> <userinput>SELECT jsonb_path_query(:'json', 'lax $.track.segments[*].location ?(@[*] > 15)');</userinput>
jsonb_path_query
------------------
47.763
This despite the fact that the full arrays are selected by the path
expression. Use strict mode to restore selecting the arrays:
<screen>
-<prompt>=></prompt> <userinput>select jsonb_path_query(:'json', 'strict $.track.segments[*].location ?(@[*] > 15)');</userinput>
+<prompt>=></prompt> <userinput>SELECT jsonb_path_query(:'json', 'strict $.track.segments[*].location ?(@[*] > 15)');</userinput>
jsonb_path_query
-------------------
[47.763, 13.4034]
<returnvalue>2015-02-01</returnvalue>
</para>
<para>
- <literal>JSON_VALUE(jsonb '[1,2]', 'strict $[$off]' PASSING 1 as off)</literal>
+ <literal>JSON_VALUE(jsonb '[1,2]', 'strict $[$off]' PASSING 1 AS off)</literal>
<returnvalue>2</returnvalue>
</para>
<para>
<para>
Some examples, with <literal>#"</literal> delimiting the return string:
<programlisting>
-substring('foobar' similar '%#"o_b#"%' escape '#') <lineannotation>oob</lineannotation>
-substring('foobar' similar '#"o_b#"%' escape '#') <lineannotation>NULL</lineannotation>
+substring('foobar' SIMILAR '%#"o_b#"%' ESCAPE '#') <lineannotation>oob</lineannotation>
+substring('foobar' SIMILAR '#"o_b#"%' ESCAPE '#') <lineannotation>NULL</lineannotation>
</programlisting>
</para>
</sect2>
<para>
Some examples:
<programlisting>
-substring('foobar' from 'o.b') <lineannotation>oob</lineannotation>
-substring('foobar' from 'o(.)b') <lineannotation>o</lineannotation>
+substring('foobar' FROM 'o.b') <lineannotation>oob</lineannotation>
+substring('foobar' FROM 'o(.)b') <lineannotation>o</lineannotation>
</programlisting>
</para>
-- unnest a 2D array:
CREATE OR REPLACE FUNCTION unnest2(anyarray)
RETURNS SETOF anyelement AS $$
-select $1[i][j]
- from generate_subscripts($1,1) g1(i),
+SELECT $1[i][j]
+ FROM generate_subscripts($1,1) g1(i),
generate_subscripts($1,2) g2(j);
$$ LANGUAGE sql IMMUTABLE;
CREATE FUNCTION
of <parameter>newsubstring</parameter>.
</para>
<para>
- <literal>overlay('Txxxxas' placing 'hom' from 2 for 4)</literal>
+ <literal>overlay('Txxxxas' PLACING 'hom' FROM 2 FOR 4)</literal>
<returnvalue>Thomas</returnvalue>
</para></entry>
</row>
<parameter>string</parameter>, or zero if it's not present.
</para>
<para>
- <literal>position('om' in 'Thomas')</literal>
+ <literal>position('om' IN 'Thomas')</literal>
<returnvalue>3</returnvalue>
</para></entry>
</row>
and <parameter>count</parameter>.
</para>
<para>
- <literal>substring('Thomas' from 2 for 3)</literal>
+ <literal>substring('Thomas' FROM 2 FOR 3)</literal>
<returnvalue>hom</returnvalue>
</para>
<para>
- <literal>substring('Thomas' from 3)</literal>
+ <literal>substring('Thomas' FROM 3)</literal>
<returnvalue>omas</returnvalue>
</para>
<para>
- <literal>substring('Thomas' for 2)</literal>
+ <literal>substring('Thomas' FOR 2)</literal>
<returnvalue>Th</returnvalue>
</para></entry>
</row>
<xref linkend="functions-posix-regexp"/>.
</para>
<para>
- <literal>substring('Thomas' from '...$')</literal>
+ <literal>substring('Thomas' FROM '...$')</literal>
<returnvalue>mas</returnvalue>
</para></entry>
</row>
and should be considered obsolete.
</para>
<para>
- <literal>substring('Thomas' similar '%#"o_a#"_' escape '#')</literal>
+ <literal>substring('Thomas' SIMILAR '%#"o_a#"_' ESCAPE '#')</literal>
<returnvalue>oma</returnvalue>
</para></entry>
</row>
Expands a <type>tsvector</type> into a set of rows, one per lexeme.
</para>
<para>
- <literal>select * from unnest('cat:3 fat:2,4 rat:5A'::tsvector)</literal>
+ <literal>SELECT * FROM unnest('cat:3 fat:2,4 rat:5A'::tsvector)</literal>
<returnvalue></returnvalue>
<programlisting>
lexeme | positions | weights
<para>
Examples:
<screen><![CDATA[
-SELECT xmlelement(name foo);
+SELECT xmlelement(NAME foo);
xmlelement
------------
<foo/>
-SELECT xmlelement(name foo, xmlattributes('xyz' as bar));
+SELECT xmlelement(NAME foo, xmlattributes('xyz' AS bar));
xmlelement
------------------
<foo bar="xyz"/>
-SELECT xmlelement(name foo, xmlattributes(current_date as bar), 'cont', 'ent');
+SELECT xmlelement(NAME foo, xmlattributes(current_date AS bar), 'cont', 'ent');
xmlelement
-------------------------------------
<replaceable>HHHH</replaceable> is the character's Unicode
codepoint in hexadecimal notation. For example:
<screen><![CDATA[
-SELECT xmlelement(name "foo$bar", xmlattributes('xyz' as "a&b"));
+SELECT xmlelement(NAME "foo$bar", xmlattributes('xyz' AS "a&b"));
xmlelement
----------------------------------
valid:
<screen>
CREATE TABLE test (a xml, b xml);
-SELECT xmlelement(name test, xmlattributes(a, b)) FROM test;
+SELECT xmlelement(NAME test, xmlattributes(a, b)) FROM test;
</screen>
But these are not:
<screen>
-SELECT xmlelement(name test, xmlattributes('constant'), a, b) FROM test;
-SELECT xmlelement(name test, xmlattributes(func(a, b))) FROM test;
+SELECT xmlelement(NAME test, xmlattributes('constant'), a, b) FROM test;
+SELECT xmlelement(NAME test, xmlattributes(func(a, b))) FROM test;
</screen>
</para>
its data type. If the content is itself of type <type>xml</type>,
complex XML documents can be constructed. For example:
<screen><![CDATA[
-SELECT xmlelement(name foo, xmlattributes('xyz' as bar),
- xmlelement(name abc),
+SELECT xmlelement(NAME foo, xmlattributes('xyz' AS bar),
+ xmlelement(NAME abc),
xmlcomment('test'),
- xmlelement(name xyz));
+ xmlelement(NAME xyz));
xmlelement
----------------------------------------------
Extracts an <type>hstore</type>'s keys and values as a set of records.
</para>
<para>
- <literal>select * from each('a=>1,b=>2')</literal>
+ <literal>SELECT * FROM each('a=>1,b=>2')</literal>
<returnvalue></returnvalue>
<programlisting>
key | value
If multiple keys are to be added or changed in one operation,
the concatenation approach is more efficient than subscripting:
<programlisting>
-UPDATE tab SET h = h || hstore(array['q', 'w'], array['11', '12']);
+UPDATE tab SET h = h || hstore(ARRAY['q', 'w'], ARRAY['11', '12']);
</programlisting>
</para>
command to create the index would look like this:
<programlisting>
CREATE INDEX orders_unbilled_index ON orders (order_nr)
- WHERE billed is not true;
+ WHERE billed IS NOT TRUE;
</programlisting>
</para>
<para>
A possible query to use this index would be:
<programlisting>
-SELECT * FROM orders WHERE billed is not true AND order_nr < 10000;
+SELECT * FROM orders WHERE billed IS NOT TRUE AND order_nr < 10000;
</programlisting>
However, the index can also be used in queries that do not involve
<structfield>order_nr</structfield> at all, e.g.:
<programlisting>
-SELECT * FROM orders WHERE billed is not true AND amount > 5000.00;
+SELECT * FROM orders WHERE billed IS NOT TRUE AND amount > 5000.00;
</programlisting>
This is not as efficient as a partial index on the
<structfield>amount</structfield> column would be, since the system has to
--Casting types:
-- note that you can only cast from ean13 to another type when the
-- number would be valid in the realm of the target type;
--- thus, the following will NOT work: select isbn(ean13('0220356483481'));
+-- thus, the following will NOT work: SELECT isbn(ean13('0220356483481'));
-- but these will:
SELECT upc(ean13('0220356483481'));
SELECT ean13(upc('220356483481'));
/*
* Our test case here involves using a cursor, for which we must be inside
* a transaction block. We could do the whole thing with a single
- * PQexec() of "select * from pg_database", but that's too trivial to make
+ * PQexec() of "SELECT * FROM pg_database", but that's too trivial to make
* a good example.
*/
/*
* Fetch rows from pg_database, the system catalog of databases
*/
- res = PQexec(conn, "DECLARE myportal CURSOR FOR select * from pg_database");
+ res = PQexec(conn, "DECLARE myportal CURSOR FOR SELECT * FROM pg_database");
if (PQresultStatus(res) != PGRES_COMMAND_OK)
{
fprintf(stderr, "DECLARE CURSOR failed: %s", PQerrorMessage(conn));
/* sub # */ CREATE TABLE tab_gen_to_gen (a int, b int GENERATED ALWAYS AS (a * 100) STORED);
/* sub # */ CREATE SUBSCRIPTION sub1 CONNECTION 'dbname=test_pub' PUBLICATION pub1;
-/* sub # */ SELECT * from tab_gen_to_gen;
+/* sub # */ SELECT * FROM tab_gen_to_gen;
a | b
---+----
1 | 100
-----+-----+------
(0 rows)
-postgres=# CREATE TABLE data(id serial primary key, data text);
+postgres=# CREATE TABLE data(id serial PRIMARY KEY, data text);
CREATE TABLE
postgres=# -- DDL isn't replicated, so all you'll see is the transaction
(3 rows)
postgres=# COMMIT PREPARED 'test_prepared1';
-postgres=# select * from pg_logical_slot_get_changes('regression_slot', NULL, NULL);
+postgres=# SELECT * from pg_logical_slot_get_changes('regression_slot', NULL, NULL);
lsn | xid | data
------------+-----+--------------------------------------------
0/0168A060 | 529 | COMMIT PREPARED 'test_prepared1', txid 529
postgres=# BEGIN;
postgres=*# INSERT INTO data(data) VALUES('6');
postgres=*# PREPARE TRANSACTION 'test_prepared2';
-postgres=# select * from pg_logical_slot_get_changes('regression_slot', NULL, NULL);
+postgres=# SELECT * from pg_logical_slot_get_changes('regression_slot', NULL, NULL);
lsn | xid | data
------------+-----+---------------------------------------------------------
0/0168A180 | 530 | BEGIN 530
(3 rows)
postgres=# ROLLBACK PREPARED 'test_prepared2';
-postgres=# select * from pg_logical_slot_get_changes('regression_slot', NULL, NULL);
+postgres=# SELECT * from pg_logical_slot_get_changes('regression_slot', NULL, NULL);
lsn | xid | data
------------+-----+----------------------------------------------
0/0168A4B8 | 530 | ROLLBACK PREPARED 'test_prepared2', txid 530
at a specified position in a path:
<screen>
CREATE FUNCTION ins_label(ltree, int, text) RETURNS ltree
- AS 'select subpath($1,0,$2) || $3 || subpath($1,$2);'
+ AS 'SELECT subpath($1, 0, $2) || $3 || subpath($1, $2);'
LANGUAGE SQL IMMUTABLE;
ltreetest=> SELECT ins_label(path,2,'Space') FROM test WHERE path <@ 'Top.Science.Astronomy';
examine this information is to execute queries such as:
<programlisting>
-SELECT c.oid::regclass as table_name,
- greatest(age(c.relfrozenxid),age(t.relfrozenxid)) as age
+SELECT c.oid::regclass AS table_name,
+ greatest(age(c.relfrozenxid), age(t.relfrozenxid)) AS age
FROM pg_class c
LEFT JOIN pg_class t ON c.reltoastrelid = t.oid
WHERE c.relkind IN ('r', 'm');
Here are examples of how wait events can be viewed:
<programlisting>
-SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event is NOT NULL;
+SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event IS NOT NULL;
pid | wait_event_type | wait_event
------+-----------------+------------
2540 | Lock | relation
FROM pg_stat_activity a JOIN
pg_wait_events w ON (a.wait_event_type = w.type AND
a.wait_event = w.name)
- WHERE a.wait_event is NOT NULL and a.state = 'active';
+ WHERE a.wait_event IS NOT NULL AND a.state = 'active';
-[ RECORD 1 ]------------------------------------------------------&zwsp;------------
pid | 686674
wait_event | WALInitSync
<screen>
test=# SELECT magic, version, ntuples, ffactor, bsize, bmsize, bmshift,
test-# maxbucket, highmask, lowmask, ovflpoint, firstfree, nmaps, procid,
-test-# regexp_replace(spares::text, '(,0)*}', '}') as spares,
-test-# regexp_replace(mapp::text, '(,0)*}', '}') as mapp
+test-# regexp_replace(spares::text, '(,0)*}', '}') AS spares,
+test-# regexp_replace(mapp::text, '(,0)*}', '}') AS mapp
test-# FROM hash_metapage_info(get_raw_page('con_hash_index', 0));
-[ RECORD 1 ]-------------------------------------------------&zwsp;------------------------------
magic | 105121344
If you want the digest as a hexadecimal string, use
<function>encode()</function> on the result. For example:
<programlisting>
-CREATE OR REPLACE FUNCTION sha1(bytea) returns text AS $$
+CREATE OR REPLACE FUNCTION sha1(bytea) RETURNS text AS $$
SELECT encode(digest($1, 'sha1'), 'hex')
$$ LANGUAGE SQL STRICT IMMUTABLE;
</programlisting>
<function>pgstathashindex</function> returns a record showing information
about a HASH index. For example:
<programlisting>
-test=> select * from pgstathashindex('con_hash_index');
+test=> SELECT * FROM pgstathashindex('con_hash_index');
-[ RECORD 1 ]--+-----------------
version | 4
bucket_pages | 33081
intended use of this function is to forcibly remove tuples that are not
otherwise accessible. For example:
<programlisting>
-test=> select * from t1 where ctid = '(0, 1)';
+test=> SELECT * FROM t1 WHERE ctid = '(0, 1)';
ERROR: could not access status of transaction 4007513275
DETAIL: Could not open file "pg_xact/0EED": No such file or directory.
-test=# select heap_force_kill('t1'::regclass, ARRAY['(0, 1)']::tid[]);
+test=# SELECT heap_force_kill('t1'::regclass, ARRAY['(0, 1)']::tid[]);
heap_force_kill
-----------------
(1 row)
-test=# select * from t1 where ctid = '(0, 1)';
+test=# SELECT * FROM t1 WHERE ctid = '(0, 1)';
(0 rows)
</programlisting>
ERROR: found xmin 507 from before relfrozenxid 515
CONTEXT: while scanning block 0 of relation "public.t1"
-test=# select ctid from t1 where xmin = 507;
+test=# SELECT ctid FROM t1 WHERE xmin = 507;
ctid
-------
(0,3)
(1 row)
-test=# select heap_force_freeze('t1'::regclass, ARRAY['(0, 3)']::tid[]);
+test=# SELECT heap_force_freeze('t1'::regclass, ARRAY['(0, 3)']::tid[]);
heap_force_freeze
-------------------
(1 row)
-test=# select ctid from t1 where xmin = 2;
+test=# SELECT ctid FROM t1 WHERE xmin = 2;
ctid
-------
(0,3)
<function>pg_mcv_list_items</function> set-returning function.
<programlisting>
-SELECT m.* FROM pg_statistic_ext join pg_statistic_ext_data on (oid = stxoid),
+SELECT m.* FROM pg_statistic_ext JOIN pg_statistic_ext_data ON (oid = stxoid),
pg_mcv_list_items(stxdmcv) m WHERE stxname = 'stts2';
index | values | nulls | frequency | base_frequency
-------+----------+-------+-----------+----------------
references to Perl arrays. Here is an example:
<programlisting>
-CREATE OR REPLACE function returns_array()
+CREATE OR REPLACE FUNCTION returns_array()
RETURNS text[][] AS $$
return [['a"b','c,d'],['e\\f','g']];
$$ LANGUAGE plperl;
-select returns_array();
+SELECT returns_array();
</programlisting>
</para>
INSERT INTO test (i, v) VALUES (4, 'immortal');
CREATE OR REPLACE FUNCTION test_munge() RETURNS SETOF test AS $$
- my $rv = spi_exec_query('select i, v from test;');
+ my $rv = spi_exec_query('SELECT i, v FROM test;');
my $status = $rv->{status};
my $nrows = $rv->{processed};
foreach my $rn (0 .. $nrows - 1) {
return;
$$ LANGUAGE plperlu;
-SELECT * from lotsa_md5(500);
+SELECT * FROM lotsa_md5(500);
</programlisting>
</para>
tax := subtotal * 0.06;
my_record.user_id := 20;
my_array[j] := 20;
-my_array[1:3] := array[1,2,3];
+my_array[1:3] := ARRAY[1, 2, 3];
complex_array[n].realpart = 12.3;
</programlisting>
</para>
within a <application>PL/pgSQL</application> function just by writing
the command. For example, you could create and fill a table by writing
<programlisting>
-CREATE TABLE mytable (id int primary key, data text);
+CREATE TABLE mytable (id int PRIMARY KEY, data text);
INSERT INTO mytable VALUES (1,'one'), (2,'two');
</programlisting>
</para>
<xref linkend="plpgsql-porting-ex2"/>.
For example:
<programlisting>
-a_output := a_output || '' if v_'' ||
- referrer_keys.kind || '' like ''''''''''
+a_output := a_output || '' IF v_'' ||
+ referrer_keys.kind || '' LIKE ''''''''''
|| referrer_keys.key_string || ''''''''''
- then return '''''' || referrer_keys.referrer_type
- || ''''''; end if;'';
+ THEN RETURN '''''' || referrer_keys.referrer_type
+ || ''''''; END IF;'';
</programlisting>
The value of <literal>a_output</literal> would then be:
<programlisting>
-if v_... like ''...'' then return ''...''; end if;
+IF v_... LIKE ''...'' THEN RETURN ''...''; END IF;
</programlisting>
</para>
<para>
In the dollar-quoting approach, this becomes:
<programlisting>
-a_output := a_output || $$ if v_$$ || referrer_keys.kind || $$ like '$$
+a_output := a_output || $$ IF v_$$ || referrer_keys.kind || $$ LIKE '$$
|| referrer_keys.key_string || $$'
- then return '$$ || referrer_keys.referrer_type
- || $$'; end if;$$;
+ THEN RETURN '$$ || referrer_keys.referrer_type
+ || $$'; END IF;$$;
</programlisting>
where we assume we only need to put single quote marks into
<literal>a_output</literal>, because it will be re-quoted before use.
<programlisting>
CREATE FUNCTION count_odd_iterator() RETURNS integer AS $$
odd = 0
-for row in plpy.cursor("select num from largetable"):
+for row in plpy.cursor("SELECT num FROM largetable"):
if row['num'] % 2:
odd += 1
return odd
CREATE FUNCTION count_odd_fetch(batch_size integer) RETURNS integer AS $$
odd = 0
-cursor = plpy.cursor("select num from largetable")
+cursor = plpy.cursor("SELECT num FROM largetable")
while True:
rows = cursor.fetch(batch_size)
if not rows:
CREATE FUNCTION count_odd_prepared() RETURNS integer AS $$
odd = 0
-plan = plpy.prepare("select num from largetable where num % $1 <> 0", ["integer"])
+plan = plpy.prepare("SELECT num FROM largetable WHERE num % $1 <> 0", ["integer"])
rows = list(plpy.cursor(plan, [2])) # or: = list(plan.cursor([2]))
return len(rows)
column names. Here is an example:
<programlisting>
-CREATE FUNCTION square_cube(in int, out squared int, out cubed int) AS $$
+CREATE FUNCTION square_cube(IN int, OUT squared int, OUT cubed int) AS $$
return [list squared [expr {$1 * $1}] cubed [expr {$1 * $1 * $1}]]
$$ LANGUAGE pltcl;
</programlisting>
FROM included_parts pr, parts p
WHERE p.part = pr.sub_part
)
-SELECT sub_part, SUM(quantity) as total_quantity
+SELECT sub_part, SUM(quantity) AS total_quantity
FROM included_parts
GROUP BY sub_part
</programlisting>
undesirable is
<programlisting>
WITH w AS (
- SELECT key, very_expensive_function(val) as f FROM some_table
+ SELECT key, very_expensive_function(val) AS f FROM some_table
)
SELECT * FROM w AS w1 JOIN w AS w2 ON w1.f = w2.f;
</programlisting>
<programlisting>
ALTER TABLE transactions
ADD COLUMN status varchar(30) DEFAULT 'old',
- ALTER COLUMN status SET default 'current';
+ ALTER COLUMN status SET DEFAULT 'current';
</programlisting>
Existing rows will be filled with <literal>old</literal>, but then
the default for subsequent commands will be <literal>current</literal>.
parameters. Thus for example these declarations conflict:
<programlisting>
CREATE FUNCTION foo(int) ...
-CREATE FUNCTION foo(int, out text) ...
+CREATE FUNCTION foo(int, OUT text) ...
</programlisting>
</para>
Add two integers using an SQL function:
<programlisting>
CREATE FUNCTION add(integer, integer) RETURNS integer
- AS 'select $1 + $2;'
+ AS 'SELECT $1 + $2;'
LANGUAGE SQL
IMMUTABLE
RETURNS NULL ON NULL INPUT;
<para>
Return a record containing multiple output parameters:
<programlisting>
-CREATE FUNCTION dup(in int, out f1 int, out f2 text)
+CREATE FUNCTION dup(IN int, OUT f1 int, OUT f2 text)
AS $$ SELECT $1, CAST($1 AS text) || ' is text' $$
LANGUAGE SQL;
Create a range partitioned table:
<programlisting>
CREATE TABLE measurement (
- logdate date not null,
+ logdate date NOT NULL,
peaktemp int,
unitsales int
) PARTITION BY RANGE (logdate);
Create a range partitioned table with multiple columns in the partition key:
<programlisting>
CREATE TABLE measurement_year_month (
- logdate date not null,
+ logdate date NOT NULL,
peaktemp int,
unitsales int
) PARTITION BY RANGE (EXTRACT(YEAR FROM logdate), EXTRACT(MONTH FROM logdate));
Create a list partitioned table:
<programlisting>
CREATE TABLE cities (
- city_id bigserial not null,
- name text not null,
+ city_id bigserial NOT NULL,
+ name text NOT NULL,
population bigint
) PARTITION BY LIST (left(lower(name), 1));
</programlisting></para>
Create a hash partitioned table:
<programlisting>
CREATE TABLE orders (
- order_id bigint not null,
- cust_id bigint not null,
+ order_id bigint NOT NULL,
+ cust_id bigint NOT NULL,
status text
) PARTITION BY HASH (order_id);
</programlisting></para>
a role, named <literal>rewind_user</literal> here:
<programlisting>
CREATE USER rewind_user LOGIN;
-GRANT EXECUTE ON function pg_catalog.pg_ls_dir(text, boolean, boolean) TO rewind_user;
-GRANT EXECUTE ON function pg_catalog.pg_stat_file(text, boolean) TO rewind_user;
-GRANT EXECUTE ON function pg_catalog.pg_read_binary_file(text) TO rewind_user;
-GRANT EXECUTE ON function pg_catalog.pg_read_binary_file(text, bigint, bigint, boolean) TO rewind_user;
+GRANT EXECUTE ON FUNCTION pg_catalog.pg_ls_dir(text, boolean, boolean) TO rewind_user;
+GRANT EXECUTE ON FUNCTION pg_catalog.pg_stat_file(text, boolean) TO rewind_user;
+GRANT EXECUTE ON FUNCTION pg_catalog.pg_read_binary_file(text) TO rewind_user;
+GRANT EXECUTE ON FUNCTION pg_catalog.pg_read_binary_file(text, bigint, bigint, boolean) TO rewind_user;
</programlisting>
</para>
statement to be executed. For example, to create an index on each
column of <structname>my_table</structname>:
<programlisting>
-=> <userinput>SELECT format('create index on my_table(%I)', attname)</userinput>
+=> <userinput>SELECT format('CREATE INDEX ON my_table (%I)', attname)</userinput>
-> <userinput>FROM pg_attribute</userinput>
-> <userinput>WHERE attrelid = 'my_table'::regclass AND attnum > 0</userinput>
-> <userinput>ORDER BY attnum</userinput>
-- check for the existence of two separate records in the database and store
-- the results in separate psql variables
SELECT
- EXISTS(SELECT 1 FROM customer WHERE customer_id = 123) as is_customer,
- EXISTS(SELECT 1 FROM employee WHERE employee_id = 456) as is_employee
+ EXISTS (SELECT 1 FROM customer WHERE customer_id = 123) AS is_customer,
+ EXISTS (SELECT 1 FROM employee WHERE employee_id = 456) AS is_employee
\gset
\if :is_customer
SELECT * FROM customer WHERE customer_id = 123;
server as soon as it reaches the command-ending semicolon, even if
more input remains on the current line. Thus for example entering
<programlisting>
-select 1; select 2; select 3;
+SELECT 1; SELECT 2; SELECT 3;
</programlisting>
will result in the three SQL commands being individually sent to
the server, with each one's results being displayed before
command before it and the one after are effectively combined and
sent to the server in one request. So for example
<programlisting>
-select 1\; select 2\; select 3;
+SELECT 1\; SELECT 2\; SELECT 3;
</programlisting>
results in sending the three SQL commands to the server in a single
request, when the non-backslashed semicolon is reached.
input. Notice the changing prompt:
<programlisting>
testdb=> <userinput>CREATE TABLE my_table (</userinput>
-testdb(> <userinput> first integer not null default 0,</userinput>
+testdb(> <userinput> first integer NOT NULL DEFAULT 0,</userinput>
testdb(> <userinput> second text)</userinput>
testdb-> <userinput>;</userinput>
CREATE TABLE
This second example shows a multiplication table with rows sorted in reverse
numerical order and columns with an independent, ascending numerical order.
<programlisting>
-testdb=> <userinput>SELECT t1.first as "A", t2.first+100 AS "B", t1.first*(t2.first+100) as "AxB",</userinput>
-testdb-> <userinput>row_number() over(order by t2.first) AS ord</userinput>
+testdb=> <userinput>SELECT t1.first AS "A", t2.first+100 AS "B", t1.first*(t2.first+100) AS "AxB",</userinput>
+testdb-> <userinput>row_number() OVER (ORDER BY t2.first) AS ord</userinput>
testdb-> <userinput>FROM my_table t1 CROSS JOIN my_table t2 ORDER BY 1 DESC</userinput>
testdb-> <userinput>\crosstabview "A" "B" "AxB" ord</userinput>
A | 101 | 102 | 103 | 104
<programlisting>
WITH t AS (
- SELECT random() as x FROM generate_series(1, 3)
+ SELECT random() AS x FROM generate_series(1, 3)
)
SELECT * FROM t
UNION ALL
SELECT
seller_no,
invoice_date,
- sum(invoice_amt)::numeric(13,2) as sales_amt
+ sum(invoice_amt)::numeric(13,2) AS sales_amt
FROM invoice
WHERE invoice_date < CURRENT_DATE
GROUP BY
when you fetch it? Watch:
<screen>
-test=> select 6.50 :: float8 as "pH";
+test=> SELECT 6.50::float8 AS "pH";
pH
---
6.5
Check this out:
<screen>
-test=> select '6.25 .. 6.50'::seg as "pH";
+test=> SELECT '6.25 .. 6.50'::seg AS "pH";
pH
------------
6.25 .. 6.50
boundary if the resulting interval includes a power of ten:
<screen>
-postgres=> select '10(+-)1'::seg as seg;
+postgres=> SELECT '10(+-)1'::seg AS seg;
seg
---------
9.0 .. 11 -- should be: 9 .. 11
the original one. For example:
</para>
<screen>
-regression=# select sepgsql_getcon();
+regression=# SELECT sepgsql_getcon();
sepgsql_getcon
-------------------------------------------------------
unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023
SELECT *
FROM crosstab(
- 'select rowid, attribute, value
- from ct
- where attribute = ''att2'' or attribute = ''att3''
- order by 1,2')
+ 'SELECT rowid, attribute, value
+ FROM ct
+ WHERE attribute = ''att2'' OR attribute = ''att3''
+ ORDER BY 1, 2')
AS ct(row_name text, category_1 text, category_2 text, category_3 text);
row_name | category_1 | category_2 | category_3
<programlisting>
SELECT *
FROM crosstab3(
- 'select rowid, attribute, value
- from ct
- where attribute = ''att2'' or attribute = ''att3''
- order by 1,2');
+ 'SELECT rowid, attribute, value
+ FROM ct
+ WHERE attribute = ''att2'' OR attribute = ''att3''
+ ORDER BY 1, 2');
</programlisting>
</para>
);
CREATE OR REPLACE FUNCTION crosstab_float8_5_cols(text)
- RETURNS setof my_crosstab_float8_5_cols
+ RETURNS SETOF my_crosstab_float8_5_cols
AS '$libdir/tablefunc','crosstab' LANGUAGE C STABLE STRICT;
</programlisting>
</para>
OUT my_category_3 float8,
OUT my_category_4 float8,
OUT my_category_5 float8)
- RETURNS setof record
+ RETURNS SETOF record
AS '$libdir/tablefunc','crosstab' LANGUAGE C STABLE STRICT;
</programlisting>
</para>
<para>
Here are two complete examples:
<programlisting>
-create table sales(year int, month int, qty int);
-insert into sales values(2007, 1, 1000);
-insert into sales values(2007, 2, 1500);
-insert into sales values(2007, 7, 500);
-insert into sales values(2007, 11, 1500);
-insert into sales values(2007, 12, 2000);
-insert into sales values(2008, 1, 1000);
-
-select * from crosstab(
- 'select year, month, qty from sales order by 1',
- 'select m from generate_series(1,12) m'
-) as (
+CREATE TABLE sales (year int, month int, qty int);
+INSERT INTO sales VALUES (2007, 1, 1000);
+INSERT INTO sales VALUES (2007, 2, 1500);
+INSERT INTO sales VALUES (2007, 7, 500);
+INSERT INTO sales VALUES (2007, 11, 1500);
+INSERT INTO sales VALUES (2007, 12, 2000);
+INSERT INTO sales VALUES (2008, 1, 1000);
+
+SELECT * FROM crosstab(
+ 'SELECT year, month, qty FROM sales ORDER BY 1',
+ 'SELECT m FROM generate_series(1, 12) m'
+) AS (
year int,
"Jan" int,
"Feb" int,
A brief example of using the extension follows.
<programlisting>
-test=# create table tcndata
+test=# CREATE TABLE tcndata
test-# (
-test(# a int not null,
-test(# b date not null,
+test(# a int NOT NULL,
+test(# b date NOT NULL,
test(# c text,
-test(# primary key (a, b)
+test(# PRIMARY KEY (a, b)
test(# );
CREATE TABLE
-test=# create trigger tcndata_tcn_trigger
-test-# after insert or update or delete on tcndata
-test-# for each row execute function triggered_change_notification();
+test=# CREATE TRIGGER tcndata_tcn_trigger
+test-# AFTER INSERT OR UPDATE OR DELETE ON tcndata
+test-# FOR EACH ROW EXECUTE FUNCTION triggered_change_notification();
CREATE TRIGGER
-test=# listen tcn;
+test=# LISTEN tcn;
LISTEN
-test=# insert into tcndata values (1, date '2012-12-22', 'one'),
+test=# INSERT INTO tcndata VALUES (1, date '2012-12-22', 'one'),
test-# (1, date '2012-12-23', 'another'),
test-# (2, date '2012-12-23', 'two');
INSERT 0 3
Asynchronous notification "tcn" with payload ""tcndata",I,"a"='1',"b"='2012-12-22'" received from server process with PID 22770.
Asynchronous notification "tcn" with payload ""tcndata",I,"a"='1',"b"='2012-12-23'" received from server process with PID 22770.
Asynchronous notification "tcn" with payload ""tcndata",I,"a"='2',"b"='2012-12-23'" received from server process with PID 22770.
-test=# update tcndata set c = 'uno' where a = 1;
+test=# UPDATE tcndata SET c = 'uno' WHERE a = 1;
UPDATE 2
Asynchronous notification "tcn" with payload ""tcndata",U,"a"='1',"b"='2012-12-22'" received from server process with PID 22770.
Asynchronous notification "tcn" with payload ""tcndata",U,"a"='1',"b"='2012-12-23'" received from server process with PID 22770.
-test=# delete from tcndata where a = 1 and b = date '2012-12-22';
+test=# DELETE FROM tcndata WHERE a = 1 AND b = date '2012-12-22';
DELETE 1
Asynchronous notification "tcn" with payload ""tcndata",D,"a"='1',"b"='2012-12-22'" received from server process with PID 22770.
</programlisting>
<programlisting>
CREATE FUNCTION messages_trigger() RETURNS trigger AS $$
-begin
+BEGIN
new.tsv :=
setweight(to_tsvector('pg_catalog.english', coalesce(new.title,'')), 'A') ||
setweight(to_tsvector('pg_catalog.english', coalesce(new.body,'')), 'D');
- return new;
-end
+ RETURN new;
+END
$$ LANGUAGE plpgsql;
CREATE TRIGGER tsvectorupdate BEFORE INSERT OR UPDATE
Here is another example of resolving an operator with one known and one
unknown input:
<screen>
-SELECT array[1,2] <@ '{1,2,3}' as "is subset";
+SELECT ARRAY[1, 2] <@ '{1,2,3}' AS "is subset";
is subset
-----------
<para>
To test the dictionary, you can try:
<programlisting>
-mydb=# select ts_lexize('unaccent','Hôtel');
+mydb=# SELECT ts_lexize('unaccent', 'Hôtel');
ts_lexize
-----------
{Hotel}
mydb=# ALTER TEXT SEARCH CONFIGURATION fr
ALTER MAPPING FOR hword, hword_part, word
WITH unaccent, french_stem;
-mydb=# select to_tsvector('fr','Hôtels de la Mer');
+mydb=# SELECT to_tsvector('fr', 'Hôtels de la Mer');
to_tsvector
-------------------
'hotel':1 'mer':4
(1 row)
-mydb=# select to_tsvector('fr','Hôtel de la Mer') @@ to_tsquery('fr','Hotels');
+mydb=# SELECT to_tsvector('fr', 'Hôtel de la Mer') @@ to_tsquery('fr', 'Hotels');
?column?
----------
t
(1 row)
-mydb=# select ts_headline('fr','Hôtel de la Mer',to_tsquery('fr','Hotels'));
+mydb=# SELECT ts_headline('fr', 'Hôtel de la Mer', to_tsquery('fr', 'Hotels'));
ts_headline
------------------------
<b>Hôtel</b> de la Mer
For example:
<screen>
CREATE FUNCTION dup (f1 anyelement, OUT f2 anyelement, OUT f3 anyarray)
-AS 'select $1, array[$1,$1]' LANGUAGE SQL;
+AS 'SELECT $1, ARRAY[$1, $1]' LANGUAGE SQL;
SELECT * FROM dup(22);
f2 | f3