*/
if (rel->consider_startup && childrel->cheapest_startup_path != NULL)
{
+ Path *cheapest_path;
+
+ /*
+ * With an indication of how many tuples the query should provide,
+ * the optimizer tries to choose the path optimal for that
+ * specific number of tuples.
+ */
+ if (root->tuple_fraction > 0.0)
+ cheapest_path =
+ get_cheapest_fractional_path(childrel,
+ root->tuple_fraction);
+ else
+ cheapest_path = childrel->cheapest_startup_path;
+
/* cheapest_startup_path must not be a parameterized path. */
- Assert(childrel->cheapest_startup_path->param_info == NULL);
- accumulate_append_subpath(childrel->cheapest_startup_path,
+ Assert(cheapest_path->param_info == NULL);
+ accumulate_append_subpath(cheapest_path,
&startup_subpaths,
NULL);
}
* Find the cheapest path for retrieving a specified fraction of all
* the tuples expected to be returned by the given relation.
*
+ * Do not consider parameterized paths. If the caller needs a path for upper
+ * rel, it can't have parameterized paths. If the caller needs an append
+ * subpath, it could become limited by the treatment of similar
+ * parameterization of all the subpaths.
+ *
* We interpret tuple_fraction the same way as grouping_planner.
*
* We assume set_cheapest() has been run on the given rel.
{
Path *path = (Path *) lfirst(l);
+ if (path->param_info)
+ continue;
+
if (path == rel->cheapest_total_path ||
compare_fractional_path_costs(best_path, path, tuple_fraction) <= 0)
continue;
Index Cond: (id = x_2.id)
(11 rows)
+--
+-- Test Append's fractional paths
+--
+CREATE INDEX pht1_c_idx ON pht1(c);
+-- SeqScan might be the best choice if we need one single tuple
+EXPLAIN (COSTS OFF) SELECT * FROM pht1 p1 JOIN pht1 p2 USING (c) LIMIT 1;
+ QUERY PLAN
+--------------------------------------------------
+ Limit
+ -> Append
+ -> Nested Loop
+ Join Filter: (p1_1.c = p2_1.c)
+ -> Seq Scan on pht1_p1 p1_1
+ -> Materialize
+ -> Seq Scan on pht1_p1 p2_1
+ -> Nested Loop
+ Join Filter: (p1_2.c = p2_2.c)
+ -> Seq Scan on pht1_p2 p1_2
+ -> Materialize
+ -> Seq Scan on pht1_p2 p2_2
+ -> Nested Loop
+ Join Filter: (p1_3.c = p2_3.c)
+ -> Seq Scan on pht1_p3 p1_3
+ -> Materialize
+ -> Seq Scan on pht1_p3 p2_3
+(17 rows)
+
+-- Increase number of tuples requested and an IndexScan will be chosen
+EXPLAIN (COSTS OFF) SELECT * FROM pht1 p1 JOIN pht1 p2 USING (c) LIMIT 100;
+ QUERY PLAN
+------------------------------------------------------------------------
+ Limit
+ -> Append
+ -> Nested Loop
+ -> Seq Scan on pht1_p1 p1_1
+ -> Memoize
+ Cache Key: p1_1.c
+ Cache Mode: logical
+ -> Index Scan using pht1_p1_c_idx on pht1_p1 p2_1
+ Index Cond: (c = p1_1.c)
+ -> Nested Loop
+ -> Seq Scan on pht1_p2 p1_2
+ -> Memoize
+ Cache Key: p1_2.c
+ Cache Mode: logical
+ -> Index Scan using pht1_p2_c_idx on pht1_p2 p2_2
+ Index Cond: (c = p1_2.c)
+ -> Nested Loop
+ -> Seq Scan on pht1_p3 p1_3
+ -> Memoize
+ Cache Key: p1_3.c
+ Cache Mode: logical
+ -> Index Scan using pht1_p3_c_idx on pht1_p3 p2_3
+ Index Cond: (c = p1_3.c)
+(23 rows)
+
+-- If almost all the data should be fetched - prefer SeqScan
+EXPLAIN (COSTS OFF) SELECT * FROM pht1 p1 JOIN pht1 p2 USING (c) LIMIT 1000;
+ QUERY PLAN
+--------------------------------------------------
+ Limit
+ -> Append
+ -> Hash Join
+ Hash Cond: (p1_1.c = p2_1.c)
+ -> Seq Scan on pht1_p1 p1_1
+ -> Hash
+ -> Seq Scan on pht1_p1 p2_1
+ -> Hash Join
+ Hash Cond: (p1_2.c = p2_2.c)
+ -> Seq Scan on pht1_p2 p1_2
+ -> Hash
+ -> Seq Scan on pht1_p2 p2_2
+ -> Hash Join
+ Hash Cond: (p1_3.c = p2_3.c)
+ -> Seq Scan on pht1_p3 p1_3
+ -> Hash
+ -> Seq Scan on pht1_p3 p2_3
+(17 rows)
+
+SET max_parallel_workers_per_gather = 1;
+SET debug_parallel_query = on;
+-- Partial paths should also be smart enough to employ limits
+EXPLAIN (COSTS OFF) SELECT * FROM pht1 p1 JOIN pht1 p2 USING (c) LIMIT 100;
+ QUERY PLAN
+------------------------------------------------------------------------------
+ Gather
+ Workers Planned: 1
+ Single Copy: true
+ -> Limit
+ -> Append
+ -> Nested Loop
+ -> Seq Scan on pht1_p1 p1_1
+ -> Memoize
+ Cache Key: p1_1.c
+ Cache Mode: logical
+ -> Index Scan using pht1_p1_c_idx on pht1_p1 p2_1
+ Index Cond: (c = p1_1.c)
+ -> Nested Loop
+ -> Seq Scan on pht1_p2 p1_2
+ -> Memoize
+ Cache Key: p1_2.c
+ Cache Mode: logical
+ -> Index Scan using pht1_p2_c_idx on pht1_p2 p2_2
+ Index Cond: (c = p1_2.c)
+ -> Nested Loop
+ -> Seq Scan on pht1_p3 p1_3
+ -> Memoize
+ Cache Key: p1_3.c
+ Cache Mode: logical
+ -> Index Scan using pht1_p3_c_idx on pht1_p3 p2_3
+ Index Cond: (c = p1_3.c)
+(26 rows)
+
+RESET debug_parallel_query;
+-- Remove indexes from the partitioned table and its partitions
+DROP INDEX pht1_c_idx CASCADE;
-- cleanup
DROP TABLE fract_t;
RESET max_parallel_workers_per_gather;
EXPLAIN (COSTS OFF)
SELECT x.id, y.id FROM fract_t x LEFT JOIN fract_t y USING (id) ORDER BY x.id DESC LIMIT 10;
+--
+-- Test Append's fractional paths
+--
+
+CREATE INDEX pht1_c_idx ON pht1(c);
+-- SeqScan might be the best choice if we need one single tuple
+EXPLAIN (COSTS OFF) SELECT * FROM pht1 p1 JOIN pht1 p2 USING (c) LIMIT 1;
+-- Increase number of tuples requested and an IndexScan will be chosen
+EXPLAIN (COSTS OFF) SELECT * FROM pht1 p1 JOIN pht1 p2 USING (c) LIMIT 100;
+-- If almost all the data should be fetched - prefer SeqScan
+EXPLAIN (COSTS OFF) SELECT * FROM pht1 p1 JOIN pht1 p2 USING (c) LIMIT 1000;
+
+SET max_parallel_workers_per_gather = 1;
+SET debug_parallel_query = on;
+-- Partial paths should also be smart enough to employ limits
+EXPLAIN (COSTS OFF) SELECT * FROM pht1 p1 JOIN pht1 p2 USING (c) LIMIT 100;
+RESET debug_parallel_query;
+
+-- Remove indexes from the partitioned table and its partitions
+DROP INDEX pht1_c_idx CASCADE;
+
-- cleanup
DROP TABLE fract_t;