From: Robert Haas Date: Tue, 10 Feb 2026 22:55:59 +0000 (-0500) Subject: Store information about Append node consolidation in the final plan. X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=7358abcc6076f4b2530d10126ab379f8aea612a5;p=thirdparty%2Fpostgresql.git Store information about Append node consolidation in the final plan. An extension (or core code) might want to reconstruct the planner's decisions about whether and where to perform partitionwise joins from the final plan. To do so, it must be possible to find all of the RTIs of partitioned tables appearing in the plan. But when an AppendPath or MergeAppendPath pulls up child paths from a subordinate AppendPath or MergeAppendPath, the RTIs of the subordinate path do not appear in the final plan, making this kind of reconstruction impossible. To avoid this, propagate the RTI sets that would have been present in the 'apprelids' field of the subordinate Append or MergeAppend nodes that would have been created into the surviving Append or MergeAppend node, using a new 'child_append_relid_sets' field for that purpose. The value of this field is a list of Bitmapsets, because each relation whose append-list was pulled up had its own set of RTIs: just one, if it was a partitionwise scan, or more than one, if it was a partitionwise join. Since our goal is to see where partitionwise joins were done, it is essential to avoid losing the information about how the RTIs were grouped in the pulled-up relations. This commit also updates pg_overexplain so that EXPLAIN (RANGE_TABLE) will display the saved RTI sets. Co-authored-by: Robert Haas Co-authored-by: Lukas Fittl Reviewed-by: Lukas Fittl Reviewed-by: Jakub Wartak Reviewed-by: Greg Burd Reviewed-by: Jacob Champion Reviewed-by: Amit Langote Reviewed-by: Haibo Yan Reviewed-by: Alexandra Wang Discussion: http://postgr.es/m/CA+TgmoZ-Jh1T6QyWoCODMVQdhTUPYkaZjWztzP1En4=ZHoKPzw@mail.gmail.com --- diff --git a/contrib/pg_overexplain/expected/pg_overexplain.out b/contrib/pg_overexplain/expected/pg_overexplain.out index 198bbe10d73..f376d2e7996 100644 --- a/contrib/pg_overexplain/expected/pg_overexplain.out +++ b/contrib/pg_overexplain/expected/pg_overexplain.out @@ -104,6 +104,7 @@ $$); Parallel Safe: true Plan Node ID: 2 Append RTIs: 1 + Child Append RTIs: none -> Seq Scan on brassica vegetables_1 Disabled Nodes: 0 Parallel Safe: true @@ -142,7 +143,7 @@ $$); Relation Kind: relation Relation Lock Mode: AccessShareLock Unprunable RTIs: 1 3 4 -(53 rows) +(54 rows) -- Test a different output format. SELECT explain_filter($$ @@ -197,6 +198,7 @@ $$); none + none + 1 + + none + 0 + + + @@ -507,6 +509,7 @@ SELECT * FROM vegetables v, Elided Node RTIs: 2 -> Append Append RTIs: 1 + Child Append RTIs: none -> Seq Scan on brassica v_1 Scan RTI: 3 -> Seq Scan on daucus v_2 @@ -548,7 +551,7 @@ SELECT * FROM vegetables v, Relation Kind: relation Relation Lock Mode: AccessShareLock Unprunable RTIs: 1 3 4 5 6 -(51 rows) +(52 rows) -- should show "Subplan: unnamed_subquery" EXPLAIN (RANGE_TABLE, COSTS OFF) @@ -566,6 +569,7 @@ SELECT * FROM vegetables v, Elided Node RTIs: 2 -> Append Append RTIs: 1 + Child Append RTIs: none -> Seq Scan on brassica v_1 Scan RTI: 3 -> Seq Scan on daucus v_2 @@ -606,5 +610,5 @@ SELECT * FROM vegetables v, Relation Kind: relation Relation Lock Mode: AccessShareLock Unprunable RTIs: 1 3 4 5 6 -(50 rows) +(51 rows) diff --git a/contrib/pg_overexplain/pg_overexplain.c b/contrib/pg_overexplain/pg_overexplain.c index e0184ba314a..36e6aac0e2c 100644 --- a/contrib/pg_overexplain/pg_overexplain.c +++ b/contrib/pg_overexplain/pg_overexplain.c @@ -54,6 +54,8 @@ static void overexplain_alias(const char *qlabel, Alias *alias, ExplainState *es); static void overexplain_bitmapset(const char *qlabel, Bitmapset *bms, ExplainState *es); +static void overexplain_bitmapset_list(const char *qlabel, List *bms_list, + ExplainState *es); static void overexplain_intlist(const char *qlabel, List *list, ExplainState *es); @@ -232,11 +234,17 @@ overexplain_per_node_hook(PlanState *planstate, List *ancestors, overexplain_bitmapset("Append RTIs", ((Append *) plan)->apprelids, es); + overexplain_bitmapset_list("Child Append RTIs", + ((Append *) plan)->child_append_relid_sets, + es); break; case T_MergeAppend: overexplain_bitmapset("Append RTIs", ((MergeAppend *) plan)->apprelids, es); + overexplain_bitmapset_list("Child Append RTIs", + ((MergeAppend *) plan)->child_append_relid_sets, + es); break; case T_Result: @@ -815,6 +823,54 @@ overexplain_bitmapset(const char *qlabel, Bitmapset *bms, ExplainState *es) pfree(buf.data); } +/* + * Emit a text property describing the contents of a list of bitmapsets. + * If a bitmapset contains exactly 1 member, we just print an integer; + * otherwise, we surround the list of members by parentheses. + * + * If there are no bitmapsets in the list, we print the word "none". + */ +static void +overexplain_bitmapset_list(const char *qlabel, List *bms_list, + ExplainState *es) +{ + StringInfoData buf; + + initStringInfo(&buf); + + foreach_node(Bitmapset, bms, bms_list) + { + if (bms_membership(bms) == BMS_SINGLETON) + appendStringInfo(&buf, " %d", bms_singleton_member(bms)); + else + { + int x = -1; + bool first = true; + + appendStringInfoString(&buf, " ("); + while ((x = bms_next_member(bms, x)) >= 0) + { + if (first) + first = false; + else + appendStringInfoChar(&buf, ' '); + appendStringInfo(&buf, "%d", x); + } + appendStringInfoChar(&buf, ')'); + } + } + + if (buf.len == 0) + { + ExplainPropertyText(qlabel, "none", es); + return; + } + + Assert(buf.data[0] == ' '); + ExplainPropertyText(qlabel, buf.data + 1, es); + pfree(buf.data); +} + /* * Emit a text property describing the contents of a list of integers, OIDs, * or XIDs -- either a space-separated list of integer members, or the word diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c index b4581e54d93..90275e25872 100644 --- a/src/backend/optimizer/path/allpaths.c +++ b/src/backend/optimizer/path/allpaths.c @@ -128,8 +128,10 @@ static Path *get_cheapest_parameterized_child_path(PlannerInfo *root, Relids required_outer); static void accumulate_append_subpath(Path *path, List **subpaths, - List **special_subpaths); -static Path *get_singleton_append_subpath(Path *path); + List **special_subpaths, + List **child_append_relid_sets); +static Path *get_singleton_append_subpath(Path *path, + List **child_append_relid_sets); static void set_dummy_rel_pathlist(RelOptInfo *rel); static void set_subquery_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte); @@ -1404,22 +1406,21 @@ void add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, List *live_childrels) { - List *subpaths = NIL; - bool subpaths_valid = true; - List *startup_subpaths = NIL; - bool startup_subpaths_valid = true; - List *partial_subpaths = NIL; - List *pa_partial_subpaths = NIL; - List *pa_nonpartial_subpaths = NIL; - bool partial_subpaths_valid = true; - bool pa_subpaths_valid; + AppendPathInput unparameterized = {0}; + AppendPathInput startup = {0}; + AppendPathInput partial_only = {0}; + AppendPathInput parallel_append = {0}; + bool unparameterized_valid = true; + bool startup_valid = true; + bool partial_only_valid = true; + bool parallel_append_valid = true; List *all_child_pathkeys = NIL; List *all_child_outers = NIL; ListCell *l; double partial_rows = -1; /* If appropriate, consider parallel append */ - pa_subpaths_valid = enable_parallel_append && rel->consider_parallel; + parallel_append_valid = enable_parallel_append && rel->consider_parallel; /* * For every non-dummy child, remember the cheapest path. Also, identify @@ -1443,9 +1444,9 @@ add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, if (childrel->pathlist != NIL && childrel->cheapest_total_path->param_info == NULL) accumulate_append_subpath(childrel->cheapest_total_path, - &subpaths, NULL); + &unparameterized.subpaths, NULL, &unparameterized.child_append_relid_sets); else - subpaths_valid = false; + unparameterized_valid = false; /* * When the planner is considering cheap startup plans, we'll also @@ -1471,11 +1472,12 @@ add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, /* cheapest_startup_path must not be a parameterized path. */ Assert(cheapest_path->param_info == NULL); accumulate_append_subpath(cheapest_path, - &startup_subpaths, - NULL); + &startup.subpaths, + NULL, + &startup.child_append_relid_sets); } else - startup_subpaths_valid = false; + startup_valid = false; /* Same idea, but for a partial plan. */ @@ -1483,16 +1485,17 @@ add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, { cheapest_partial_path = linitial(childrel->partial_pathlist); accumulate_append_subpath(cheapest_partial_path, - &partial_subpaths, NULL); + &partial_only.partial_subpaths, NULL, + &partial_only.child_append_relid_sets); } else - partial_subpaths_valid = false; + partial_only_valid = false; /* * Same idea, but for a parallel append mixing partial and non-partial * paths. */ - if (pa_subpaths_valid) + if (parallel_append_valid) { Path *nppath = NULL; @@ -1502,7 +1505,7 @@ add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, if (cheapest_partial_path == NULL && nppath == NULL) { /* Neither a partial nor a parallel-safe path? Forget it. */ - pa_subpaths_valid = false; + parallel_append_valid = false; } else if (nppath == NULL || (cheapest_partial_path != NULL && @@ -1511,8 +1514,9 @@ add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, /* Partial path is cheaper or the only option. */ Assert(cheapest_partial_path != NULL); accumulate_append_subpath(cheapest_partial_path, - &pa_partial_subpaths, - &pa_nonpartial_subpaths); + ¶llel_append.partial_subpaths, + ¶llel_append.subpaths, + ¶llel_append.child_append_relid_sets); } else { @@ -1530,8 +1534,9 @@ add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, * figure that out. */ accumulate_append_subpath(nppath, - &pa_nonpartial_subpaths, - NULL); + ¶llel_append.subpaths, + NULL, + ¶llel_append.child_append_relid_sets); } } @@ -1605,28 +1610,28 @@ add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, * unparameterized Append path for the rel. (Note: this is correct even * if we have zero or one live subpath due to constraint exclusion.) */ - if (subpaths_valid) - add_path(rel, (Path *) create_append_path(root, rel, subpaths, NIL, + if (unparameterized_valid) + add_path(rel, (Path *) create_append_path(root, rel, unparameterized, NIL, NULL, 0, false, -1)); /* build an AppendPath for the cheap startup paths, if valid */ - if (startup_subpaths_valid) - add_path(rel, (Path *) create_append_path(root, rel, startup_subpaths, - NIL, NIL, NULL, 0, false, -1)); + if (startup_valid) + add_path(rel, (Path *) create_append_path(root, rel, startup, + NIL, NULL, 0, false, -1)); /* * Consider an append of unordered, unparameterized partial paths. Make * it parallel-aware if possible. */ - if (partial_subpaths_valid && partial_subpaths != NIL) + if (partial_only_valid && partial_only.partial_subpaths != NIL) { AppendPath *appendpath; ListCell *lc; int parallel_workers = 0; /* Find the highest number of workers requested for any subpath. */ - foreach(lc, partial_subpaths) + foreach(lc, partial_only.partial_subpaths) { Path *path = lfirst(lc); @@ -1653,7 +1658,7 @@ add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, Assert(parallel_workers > 0); /* Generate a partial append path. */ - appendpath = create_append_path(root, rel, NIL, partial_subpaths, + appendpath = create_append_path(root, rel, partial_only, NIL, NULL, parallel_workers, enable_parallel_append, -1); @@ -1674,7 +1679,7 @@ add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, * a non-partial path that is substantially cheaper than any partial path; * otherwise, we should use the append path added in the previous step.) */ - if (pa_subpaths_valid && pa_nonpartial_subpaths != NIL) + if (parallel_append_valid && parallel_append.subpaths != NIL) { AppendPath *appendpath; ListCell *lc; @@ -1684,7 +1689,7 @@ add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, * Find the highest number of workers requested for any partial * subpath. */ - foreach(lc, pa_partial_subpaths) + foreach(lc, parallel_append.partial_subpaths) { Path *path = lfirst(lc); @@ -1702,8 +1707,7 @@ add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, max_parallel_workers_per_gather); Assert(parallel_workers > 0); - appendpath = create_append_path(root, rel, pa_nonpartial_subpaths, - pa_partial_subpaths, + appendpath = create_append_path(root, rel, parallel_append, NIL, NULL, parallel_workers, true, partial_rows); add_partial_path(rel, (Path *) appendpath); @@ -1713,7 +1717,7 @@ add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, * Also build unparameterized ordered append paths based on the collected * list of child pathkeys. */ - if (subpaths_valid) + if (unparameterized_valid) generate_orderedappend_paths(root, rel, live_childrels, all_child_pathkeys); @@ -1734,10 +1738,10 @@ add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, { Relids required_outer = (Relids) lfirst(l); ListCell *lcr; + AppendPathInput parameterized = {0}; + bool parameterized_valid = true; /* Select the child paths for an Append with this parameterization */ - subpaths = NIL; - subpaths_valid = true; foreach(lcr, live_childrels) { RelOptInfo *childrel = (RelOptInfo *) lfirst(lcr); @@ -1746,7 +1750,7 @@ add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, if (childrel->pathlist == NIL) { /* failed to make a suitable path for this child */ - subpaths_valid = false; + parameterized_valid = false; break; } @@ -1756,15 +1760,16 @@ add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, if (subpath == NULL) { /* failed to make a suitable path for this child */ - subpaths_valid = false; + parameterized_valid = false; break; } - accumulate_append_subpath(subpath, &subpaths, NULL); + accumulate_append_subpath(subpath, ¶meterized.subpaths, NULL, + ¶meterized.child_append_relid_sets); } - if (subpaths_valid) + if (parameterized_valid) add_path(rel, (Path *) - create_append_path(root, rel, subpaths, NIL, + create_append_path(root, rel, parameterized, NIL, required_outer, 0, false, -1)); } @@ -1785,13 +1790,14 @@ add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, { Path *path = (Path *) lfirst(l); AppendPath *appendpath; + AppendPathInput append = {0}; /* skip paths with no pathkeys. */ if (path->pathkeys == NIL) continue; - appendpath = create_append_path(root, rel, NIL, list_make1(path), - NIL, NULL, + append.partial_subpaths = list_make1(path); + appendpath = create_append_path(root, rel, append, NIL, NULL, path->parallel_workers, true, partial_rows); add_partial_path(rel, (Path *) appendpath); @@ -1873,9 +1879,9 @@ generate_orderedappend_paths(PlannerInfo *root, RelOptInfo *rel, foreach(lcp, all_child_pathkeys) { List *pathkeys = (List *) lfirst(lcp); - List *startup_subpaths = NIL; - List *total_subpaths = NIL; - List *fractional_subpaths = NIL; + AppendPathInput startup = {0}; + AppendPathInput total = {0}; + AppendPathInput fractional = {0}; bool startup_neq_total = false; bool fraction_neq_total = false; bool match_partition_order; @@ -2038,16 +2044,23 @@ generate_orderedappend_paths(PlannerInfo *root, RelOptInfo *rel, * just a single subpath (and hence aren't doing anything * useful). */ - cheapest_startup = get_singleton_append_subpath(cheapest_startup); - cheapest_total = get_singleton_append_subpath(cheapest_total); + cheapest_startup = + get_singleton_append_subpath(cheapest_startup, + &startup.child_append_relid_sets); + cheapest_total = + get_singleton_append_subpath(cheapest_total, + &total.child_append_relid_sets); - startup_subpaths = lappend(startup_subpaths, cheapest_startup); - total_subpaths = lappend(total_subpaths, cheapest_total); + startup.subpaths = lappend(startup.subpaths, cheapest_startup); + total.subpaths = lappend(total.subpaths, cheapest_total); if (cheapest_fractional) { - cheapest_fractional = get_singleton_append_subpath(cheapest_fractional); - fractional_subpaths = lappend(fractional_subpaths, cheapest_fractional); + cheapest_fractional = + get_singleton_append_subpath(cheapest_fractional, + &fractional.child_append_relid_sets); + fractional.subpaths = + lappend(fractional.subpaths, cheapest_fractional); } } else @@ -2057,13 +2070,16 @@ generate_orderedappend_paths(PlannerInfo *root, RelOptInfo *rel, * child paths for the MergeAppend. */ accumulate_append_subpath(cheapest_startup, - &startup_subpaths, NULL); + &startup.subpaths, NULL, + &startup.child_append_relid_sets); accumulate_append_subpath(cheapest_total, - &total_subpaths, NULL); + &total.subpaths, NULL, + &total.child_append_relid_sets); if (cheapest_fractional) accumulate_append_subpath(cheapest_fractional, - &fractional_subpaths, NULL); + &fractional.subpaths, NULL, + &fractional.child_append_relid_sets); } } @@ -2073,8 +2089,7 @@ generate_orderedappend_paths(PlannerInfo *root, RelOptInfo *rel, /* We only need Append */ add_path(rel, (Path *) create_append_path(root, rel, - startup_subpaths, - NIL, + startup, pathkeys, NULL, 0, @@ -2083,19 +2098,17 @@ generate_orderedappend_paths(PlannerInfo *root, RelOptInfo *rel, if (startup_neq_total) add_path(rel, (Path *) create_append_path(root, rel, - total_subpaths, - NIL, + total, pathkeys, NULL, 0, false, -1)); - if (fractional_subpaths && fraction_neq_total) + if (fractional.subpaths && fraction_neq_total) add_path(rel, (Path *) create_append_path(root, rel, - fractional_subpaths, - NIL, + fractional, pathkeys, NULL, 0, @@ -2107,20 +2120,23 @@ generate_orderedappend_paths(PlannerInfo *root, RelOptInfo *rel, /* We need MergeAppend */ add_path(rel, (Path *) create_merge_append_path(root, rel, - startup_subpaths, + startup.subpaths, + startup.child_append_relid_sets, pathkeys, NULL)); if (startup_neq_total) add_path(rel, (Path *) create_merge_append_path(root, rel, - total_subpaths, + total.subpaths, + total.child_append_relid_sets, pathkeys, NULL)); - if (fractional_subpaths && fraction_neq_total) + if (fractional.subpaths && fraction_neq_total) add_path(rel, (Path *) create_merge_append_path(root, rel, - fractional_subpaths, + fractional.subpaths, + fractional.child_append_relid_sets, pathkeys, NULL)); } @@ -2223,7 +2239,8 @@ get_cheapest_parameterized_child_path(PlannerInfo *root, RelOptInfo *rel, * paths). */ static void -accumulate_append_subpath(Path *path, List **subpaths, List **special_subpaths) +accumulate_append_subpath(Path *path, List **subpaths, List **special_subpaths, + List **child_append_relid_sets) { if (IsA(path, AppendPath)) { @@ -2232,6 +2249,11 @@ accumulate_append_subpath(Path *path, List **subpaths, List **special_subpaths) if (!apath->path.parallel_aware || apath->first_partial_path == 0) { *subpaths = list_concat(*subpaths, apath->subpaths); + *child_append_relid_sets = + lappend(*child_append_relid_sets, path->parent->relids); + *child_append_relid_sets = + list_concat(*child_append_relid_sets, + apath->child_append_relid_sets); return; } else if (special_subpaths != NULL) @@ -2246,6 +2268,11 @@ accumulate_append_subpath(Path *path, List **subpaths, List **special_subpaths) apath->first_partial_path); *special_subpaths = list_concat(*special_subpaths, new_special_subpaths); + *child_append_relid_sets = + lappend(*child_append_relid_sets, path->parent->relids); + *child_append_relid_sets = + list_concat(*child_append_relid_sets, + apath->child_append_relid_sets); return; } } @@ -2254,6 +2281,11 @@ accumulate_append_subpath(Path *path, List **subpaths, List **special_subpaths) MergeAppendPath *mpath = (MergeAppendPath *) path; *subpaths = list_concat(*subpaths, mpath->subpaths); + *child_append_relid_sets = + lappend(*child_append_relid_sets, path->parent->relids); + *child_append_relid_sets = + list_concat(*child_append_relid_sets, + mpath->child_append_relid_sets); return; } @@ -2265,10 +2297,15 @@ accumulate_append_subpath(Path *path, List **subpaths, List **special_subpaths) * Returns the single subpath of an Append/MergeAppend, or just * return 'path' if it's not a single sub-path Append/MergeAppend. * + * As a side effect, whenever we return a single subpath rather than the + * original path, add the relid sets for the original path to + * child_append_relid_sets, so that those relids don't entirely disappear + * from the final plan. + * * Note: 'path' must not be a parallel-aware path. */ static Path * -get_singleton_append_subpath(Path *path) +get_singleton_append_subpath(Path *path, List **child_append_relid_sets) { Assert(!path->parallel_aware); @@ -2277,14 +2314,28 @@ get_singleton_append_subpath(Path *path) AppendPath *apath = (AppendPath *) path; if (list_length(apath->subpaths) == 1) + { + *child_append_relid_sets = + lappend(*child_append_relid_sets, path->parent->relids); + *child_append_relid_sets = + list_concat(*child_append_relid_sets, + apath->child_append_relid_sets); return (Path *) linitial(apath->subpaths); + } } else if (IsA(path, MergeAppendPath)) { MergeAppendPath *mpath = (MergeAppendPath *) path; if (list_length(mpath->subpaths) == 1) + { + *child_append_relid_sets = + lappend(*child_append_relid_sets, path->parent->relids); + *child_append_relid_sets = + list_concat(*child_append_relid_sets, + mpath->child_append_relid_sets); return (Path *) linitial(mpath->subpaths); + } } return path; @@ -2304,6 +2355,8 @@ get_singleton_append_subpath(Path *path) static void set_dummy_rel_pathlist(RelOptInfo *rel) { + AppendPathInput in = {0}; + /* Set dummy size estimates --- we leave attr_widths[] as zeroes */ rel->rows = 0; rel->reltarget->width = 0; @@ -2313,7 +2366,7 @@ set_dummy_rel_pathlist(RelOptInfo *rel) rel->partial_pathlist = NIL; /* Set up the dummy path */ - add_path(rel, (Path *) create_append_path(NULL, rel, NIL, NIL, + add_path(rel, (Path *) create_append_path(NULL, rel, in, NIL, rel->lateral_relids, 0, false, -1)); diff --git a/src/backend/optimizer/path/joinrels.c b/src/backend/optimizer/path/joinrels.c index 2615651c073..443e2dca7c0 100644 --- a/src/backend/optimizer/path/joinrels.c +++ b/src/backend/optimizer/path/joinrels.c @@ -1513,6 +1513,7 @@ void mark_dummy_rel(RelOptInfo *rel) { MemoryContext oldcontext; + AppendPathInput in = {0}; /* Already marked? */ if (is_dummy_rel(rel)) @@ -1529,7 +1530,7 @@ mark_dummy_rel(RelOptInfo *rel) rel->partial_pathlist = NIL; /* Set up the dummy path */ - add_path(rel, (Path *) create_append_path(NULL, rel, NIL, NIL, + add_path(rel, (Path *) create_append_path(NULL, rel, in, NIL, rel->lateral_relids, 0, false, -1)); diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c index a50260290fa..959df43c39e 100644 --- a/src/backend/optimizer/plan/createplan.c +++ b/src/backend/optimizer/plan/createplan.c @@ -1263,6 +1263,7 @@ create_append_plan(PlannerInfo *root, AppendPath *best_path, int flags) plan->plan.lefttree = NULL; plan->plan.righttree = NULL; plan->apprelids = rel->relids; + plan->child_append_relid_sets = best_path->child_append_relid_sets; if (pathkeys != NIL) { @@ -1475,6 +1476,7 @@ create_merge_append_plan(PlannerInfo *root, MergeAppendPath *best_path, plan->lefttree = NULL; plan->righttree = NULL; node->apprelids = rel->relids; + node->child_append_relid_sets = best_path->child_append_relid_sets; /* * Compute sort column info, and adjust MergeAppend's tlist as needed. diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c index f68142cfcb8..006b3281969 100644 --- a/src/backend/optimizer/plan/planner.c +++ b/src/backend/optimizer/plan/planner.c @@ -4063,7 +4063,7 @@ create_degenerate_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, * might get between 0 and N output rows. Offhand I think that's * desired.) */ - List *paths = NIL; + AppendPathInput append = {0}; while (--nrows >= 0) { @@ -4071,13 +4071,12 @@ create_degenerate_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, create_group_result_path(root, grouped_rel, grouped_rel->reltarget, (List *) parse->havingQual); - paths = lappend(paths, path); + append.subpaths = lappend(append.subpaths, path); } path = (Path *) create_append_path(root, grouped_rel, - paths, - NIL, + append, NIL, NULL, 0, diff --git a/src/backend/optimizer/prep/prepunion.c b/src/backend/optimizer/prep/prepunion.c index 78c95c36dd5..f50c296e3d9 100644 --- a/src/backend/optimizer/prep/prepunion.c +++ b/src/backend/optimizer/prep/prepunion.c @@ -696,9 +696,9 @@ generate_union_paths(SetOperationStmt *op, PlannerInfo *root, ListCell *lc; ListCell *lc2; ListCell *lc3; - List *cheapest_pathlist = NIL; - List *ordered_pathlist = NIL; - List *partial_pathlist = NIL; + AppendPathInput cheapest = {0}; + AppendPathInput ordered = {0}; + AppendPathInput partial = {0}; bool partial_paths_valid = true; bool consider_parallel = true; List *rellist; @@ -783,7 +783,7 @@ generate_union_paths(SetOperationStmt *op, PlannerInfo *root, if (is_dummy_rel(rel)) continue; - cheapest_pathlist = lappend(cheapest_pathlist, + cheapest.subpaths = lappend(cheapest.subpaths, rel->cheapest_total_path); if (try_sorted) @@ -795,7 +795,7 @@ generate_union_paths(SetOperationStmt *op, PlannerInfo *root, false); if (ordered_path != NULL) - ordered_pathlist = lappend(ordered_pathlist, ordered_path); + ordered.subpaths = lappend(ordered.subpaths, ordered_path); else { /* @@ -818,20 +818,20 @@ generate_union_paths(SetOperationStmt *op, PlannerInfo *root, else if (rel->partial_pathlist == NIL) partial_paths_valid = false; else - partial_pathlist = lappend(partial_pathlist, - linitial(rel->partial_pathlist)); + partial.partial_subpaths = lappend(partial.partial_subpaths, + linitial(rel->partial_pathlist)); } } /* Build result relation. */ result_rel = fetch_upper_rel(root, UPPERREL_SETOP, relids); result_rel->reltarget = create_setop_pathtarget(root, tlist, - cheapest_pathlist); + cheapest.subpaths); result_rel->consider_parallel = consider_parallel; result_rel->consider_startup = (root->tuple_fraction > 0); /* If all UNION children were dummy rels, make the resulting rel dummy */ - if (cheapest_pathlist == NIL) + if (cheapest.subpaths == NIL) { mark_dummy_rel(result_rel); @@ -842,8 +842,8 @@ generate_union_paths(SetOperationStmt *op, PlannerInfo *root, * Append the child results together using the cheapest paths from each * union child. */ - apath = (Path *) create_append_path(root, result_rel, cheapest_pathlist, - NIL, NIL, NULL, 0, false, -1); + apath = (Path *) create_append_path(root, result_rel, cheapest, + NIL, NULL, 0, false, -1); /* * Estimate number of groups. For now we just assume the output is unique @@ -862,7 +862,7 @@ generate_union_paths(SetOperationStmt *op, PlannerInfo *root, int parallel_workers = 0; /* Find the highest number of workers requested for any subpath. */ - foreach(lc, partial_pathlist) + foreach(lc, partial.partial_subpaths) { Path *subpath = lfirst(lc); @@ -881,14 +881,14 @@ generate_union_paths(SetOperationStmt *op, PlannerInfo *root, if (enable_parallel_append) { parallel_workers = Max(parallel_workers, - pg_leftmost_one_pos32(list_length(partial_pathlist)) + 1); + pg_leftmost_one_pos32(list_length(partial.partial_subpaths)) + 1); parallel_workers = Min(parallel_workers, max_parallel_workers_per_gather); } Assert(parallel_workers > 0); papath = (Path *) - create_append_path(root, result_rel, NIL, partial_pathlist, + create_append_path(root, result_rel, partial, NIL, NULL, parallel_workers, enable_parallel_append, -1); gpath = (Path *) @@ -901,7 +901,7 @@ generate_union_paths(SetOperationStmt *op, PlannerInfo *root, double dNumGroups; bool can_sort = grouping_is_sortable(groupList); bool can_hash = grouping_is_hashable(groupList); - Path *first_path = linitial(cheapest_pathlist); + Path *first_path = linitial(cheapest.subpaths); /* * Estimate the number of UNION output rows. In the case when only a @@ -911,7 +911,7 @@ generate_union_paths(SetOperationStmt *op, PlannerInfo *root, * contain Vars with varno==0, which estimate_num_groups() wouldn't * like. */ - if (list_length(cheapest_pathlist) == 1 && + if (list_length(cheapest.subpaths) == 1 && first_path->parent->reloptkind != RELOPT_UPPER_REL) { dNumGroups = estimate_num_groups(root, @@ -1017,7 +1017,8 @@ generate_union_paths(SetOperationStmt *op, PlannerInfo *root, path = (Path *) create_merge_append_path(root, result_rel, - ordered_pathlist, + ordered.subpaths, + NIL, union_pathkeys, NULL); @@ -1216,6 +1217,9 @@ generate_nonunion_paths(SetOperationStmt *op, PlannerInfo *root, if (op->all) { Path *apath; + AppendPathInput append = {0}; + + append.subpaths = list_make1(lpath); /* * EXCEPT ALL: If the right-hand input is dummy then we can @@ -1224,8 +1228,9 @@ generate_nonunion_paths(SetOperationStmt *op, PlannerInfo *root, * between the set op targetlist and the targetlist of the * left input. The Append will be removed in setrefs.c. */ - apath = (Path *) create_append_path(root, result_rel, list_make1(lpath), - NIL, NIL, NULL, 0, false, -1); + apath = (Path *) create_append_path(root, result_rel, + append, NIL, NULL, 0, + false, -1); add_path(result_rel, apath); diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c index 7b6c5d51e5d..9678c20ff1f 100644 --- a/src/backend/optimizer/util/pathnode.c +++ b/src/backend/optimizer/util/pathnode.c @@ -1298,7 +1298,7 @@ create_tidrangescan_path(PlannerInfo *root, RelOptInfo *rel, AppendPath * create_append_path(PlannerInfo *root, RelOptInfo *rel, - List *subpaths, List *partial_subpaths, + AppendPathInput input, List *pathkeys, Relids required_outer, int parallel_workers, bool parallel_aware, double rows) @@ -1308,6 +1308,7 @@ create_append_path(PlannerInfo *root, Assert(!parallel_aware || parallel_workers > 0); + pathnode->child_append_relid_sets = input.child_append_relid_sets; pathnode->path.pathtype = T_Append; pathnode->path.parent = rel; pathnode->path.pathtarget = rel->reltarget; @@ -1323,7 +1324,7 @@ create_append_path(PlannerInfo *root, * on the simpler get_appendrel_parampathinfo. There's no point in doing * the more expensive thing for a dummy path, either. */ - if (rel->reloptkind == RELOPT_BASEREL && root && subpaths != NIL) + if (rel->reloptkind == RELOPT_BASEREL && root && input.subpaths != NIL) pathnode->path.param_info = get_baserel_parampathinfo(root, rel, required_outer); @@ -1354,11 +1355,11 @@ create_append_path(PlannerInfo *root, */ Assert(pathkeys == NIL); - list_sort(subpaths, append_total_cost_compare); - list_sort(partial_subpaths, append_startup_cost_compare); + list_sort(input.subpaths, append_total_cost_compare); + list_sort(input.partial_subpaths, append_startup_cost_compare); } - pathnode->first_partial_path = list_length(subpaths); - pathnode->subpaths = list_concat(subpaths, partial_subpaths); + pathnode->first_partial_path = list_length(input.subpaths); + pathnode->subpaths = list_concat(input.subpaths, input.partial_subpaths); /* * Apply query-wide LIMIT if known and path is for sole base relation. @@ -1470,6 +1471,7 @@ MergeAppendPath * create_merge_append_path(PlannerInfo *root, RelOptInfo *rel, List *subpaths, + List *child_append_relid_sets, List *pathkeys, Relids required_outer) { @@ -1485,6 +1487,7 @@ create_merge_append_path(PlannerInfo *root, */ Assert(bms_is_empty(rel->lateral_relids) && bms_is_empty(required_outer)); + pathnode->child_append_relid_sets = child_append_relid_sets; pathnode->path.pathtype = T_MergeAppend; pathnode->path.parent = rel; pathnode->path.pathtarget = rel->reltarget; @@ -3932,11 +3935,12 @@ reparameterize_path(PlannerInfo *root, Path *path, case T_Append: { AppendPath *apath = (AppendPath *) path; - List *childpaths = NIL; - List *partialpaths = NIL; + AppendPathInput new_append = {0}; int i; ListCell *lc; + new_append.child_append_relid_sets = apath->child_append_relid_sets; + /* Reparameterize the children */ i = 0; foreach(lc, apath->subpaths) @@ -3950,13 +3954,13 @@ reparameterize_path(PlannerInfo *root, Path *path, return NULL; /* We have to re-split the regular and partial paths */ if (i < apath->first_partial_path) - childpaths = lappend(childpaths, spath); + new_append.subpaths = lappend(new_append.subpaths, spath); else - partialpaths = lappend(partialpaths, spath); + new_append.partial_subpaths = lappend(new_append.partial_subpaths, spath); i++; } return (Path *) - create_append_path(root, rel, childpaths, partialpaths, + create_append_path(root, rel, new_append, apath->path.pathkeys, required_outer, apath->path.parallel_workers, apath->path.parallel_aware, diff --git a/src/include/nodes/pathnodes.h b/src/include/nodes/pathnodes.h index 9cc5d2e7411..c175ee95b68 100644 --- a/src/include/nodes/pathnodes.h +++ b/src/include/nodes/pathnodes.h @@ -2250,6 +2250,12 @@ typedef struct CustomPath * For partial Append, 'subpaths' contains non-partial subpaths followed by * partial subpaths. * + * Whenever accumulate_append_subpath() allows us to consolidate multiple + * levels of Append paths down to one, we store the RTI sets for the omitted + * paths in child_append_relid_sets. This is not necessary for planning or + * execution; we do it for the benefit of code that wants to inspect the + * final plan and understand how it came to be. + * * Note: it is possible for "subpaths" to contain only one, or even no, * elements. These cases are optimized during create_append_plan. * In particular, an AppendPath with no subpaths is a "dummy" path that @@ -2265,6 +2271,7 @@ typedef struct AppendPath /* Index of first partial path in subpaths; list_length(subpaths) if none */ int first_partial_path; Cardinality limit_tuples; /* hard limit on output tuples, or -1 */ + List *child_append_relid_sets; } AppendPath; #define IS_DUMMY_APPEND(p) \ @@ -2281,12 +2288,15 @@ extern bool is_dummy_rel(RelOptInfo *rel); /* * MergeAppendPath represents a MergeAppend plan, ie, the merging of sorted * results from several member plans to produce similarly-sorted output. + * + * child_append_relid_sets has the same meaning here as for AppendPath. */ typedef struct MergeAppendPath { Path path; List *subpaths; /* list of component Paths */ Cardinality limit_tuples; /* hard limit on output tuples, or -1 */ + List *child_append_relid_sets; } MergeAppendPath; /* diff --git a/src/include/nodes/plannodes.h b/src/include/nodes/plannodes.h index 0ad0ff404c9..485bec5aabd 100644 --- a/src/include/nodes/plannodes.h +++ b/src/include/nodes/plannodes.h @@ -394,9 +394,16 @@ struct PartitionPruneInfo; /* forward reference to struct below */ typedef struct Append { Plan plan; + /* RTIs of appendrel(s) formed by this node */ Bitmapset *apprelids; + + /* sets of RTIs of appendrels consolidated into this node */ + List *child_append_relid_sets; + + /* plans to run */ List *appendplans; + /* # of asynchronous plans */ int nasyncplans; @@ -426,6 +433,10 @@ typedef struct MergeAppend /* RTIs of appendrel(s) formed by this node */ Bitmapset *apprelids; + /* sets of RTIs of appendrels consolidated into this node */ + List *child_append_relid_sets; + + /* plans to run */ List *mergeplans; /* these fields are just like the sort-key info in struct Sort: */ diff --git a/src/include/optimizer/pathnode.h b/src/include/optimizer/pathnode.h index 224750859c3..cf8a654fa53 100644 --- a/src/include/optimizer/pathnode.h +++ b/src/include/optimizer/pathnode.h @@ -17,6 +17,20 @@ #include "nodes/bitmapset.h" #include "nodes/pathnodes.h" +/* + * Everything in subpaths or partial_subpaths will become part of the + * Append node's subpaths list. Partial and non-partial subpaths can be + * mixed in the same Append node only if it is parallel-aware. + * + * See the comments for AppendPath for the meaning and purpose of the + * child_append_relid_sets field. + */ +typedef struct AppendPathInput +{ + List *subpaths; + List *partial_subpaths; + List *child_append_relid_sets; +} AppendPathInput; /* Hook for plugins to get control during joinrel setup */ typedef void (*joinrel_setup_hook_type) (PlannerInfo *root, @@ -78,14 +92,16 @@ extern TidRangePath *create_tidrangescan_path(PlannerInfo *root, List *tidrangequals, Relids required_outer, int parallel_workers); + extern AppendPath *create_append_path(PlannerInfo *root, RelOptInfo *rel, - List *subpaths, List *partial_subpaths, + AppendPathInput input, List *pathkeys, Relids required_outer, int parallel_workers, bool parallel_aware, double rows); extern MergeAppendPath *create_merge_append_path(PlannerInfo *root, RelOptInfo *rel, List *subpaths, + List *child_append_relid_sets, List *pathkeys, Relids required_outer); extern GroupResultPath *create_group_result_path(PlannerInfo *root, diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list index a942d030d2f..39c76691c86 100644 --- a/src/tools/pgindent/typedefs.list +++ b/src/tools/pgindent/typedefs.list @@ -125,6 +125,7 @@ AnlIndexData AnyArrayType Append AppendPath +AppendPathInput AppendRelInfo AppendState ApplyErrorCallbackArg