Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commitff5466a

Browse files
committed
Merge branch 'REL9_6_STABLE' into PGPRO9_6
2 parentsf59efd0 +8a70d8a commitff5466a

File tree

14 files changed

+263
-96
lines changed

14 files changed

+263
-96
lines changed

‎doc/src/sgml/ref/pg_xlogdump.sgml

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -117,9 +117,12 @@ doc/src/sgml/ref/pg_xlogdump.sgml
117117
<term><option>--path=<replaceable>path</replaceable></option></term>
118118
<listitem>
119119
<para>
120-
Directory in which to find log segment files. The default is to search
121-
for them in the <literal>pg_xlog</literal> subdirectory of the current
122-
directory.
120+
Specifies a directory to search for log segment files or a
121+
directory with a <literal>pg_xlog</literal> subdirectory that
122+
contains such files. The default is to search in the current
123+
directory, the <literal>pg_xlog</literal> subdirectory of the
124+
current directory, and the <literal>pg_xlog</literal> subdirectory
125+
of <envar>PGDATA</envar>.
123126
</para>
124127
</listitem>
125128
</varlistentry>

‎src/backend/access/brin/brin_pageops.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -289,7 +289,7 @@ brin_doupdate(Relation idxrel, BlockNumber pagesPerRange,
289289
XLogRegisterBufData(0, (char*)newtup,newsz);
290290

291291
/* revmap page */
292-
XLogRegisterBuffer(1,revmapbuf,REGBUF_STANDARD);
292+
XLogRegisterBuffer(1,revmapbuf,0);
293293

294294
/* old page */
295295
XLogRegisterBuffer(2,oldbuf,REGBUF_STANDARD);

‎src/backend/access/common/tupconvert.c

Lines changed: 65 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -206,55 +206,12 @@ convert_tuples_by_name(TupleDesc indesc,
206206
{
207207
TupleConversionMap*map;
208208
AttrNumber*attrMap;
209-
intn;
209+
intn=outdesc->natts;
210210
inti;
211211
boolsame;
212212

213213
/* Verify compatibility and prepare attribute-number map */
214-
n=outdesc->natts;
215-
attrMap= (AttrNumber*)palloc0(n*sizeof(AttrNumber));
216-
for (i=0;i<n;i++)
217-
{
218-
Form_pg_attributeatt=outdesc->attrs[i];
219-
char*attname;
220-
Oidatttypid;
221-
int32atttypmod;
222-
intj;
223-
224-
if (att->attisdropped)
225-
continue;/* attrMap[i] is already 0 */
226-
attname=NameStr(att->attname);
227-
atttypid=att->atttypid;
228-
atttypmod=att->atttypmod;
229-
for (j=0;j<indesc->natts;j++)
230-
{
231-
att=indesc->attrs[j];
232-
if (att->attisdropped)
233-
continue;
234-
if (strcmp(attname,NameStr(att->attname))==0)
235-
{
236-
/* Found it, check type */
237-
if (atttypid!=att->atttypid||atttypmod!=att->atttypmod)
238-
ereport(ERROR,
239-
(errcode(ERRCODE_DATATYPE_MISMATCH),
240-
errmsg_internal("%s",_(msg)),
241-
errdetail("Attribute \"%s\" of type %s does not match corresponding attribute of type %s.",
242-
attname,
243-
format_type_be(outdesc->tdtypeid),
244-
format_type_be(indesc->tdtypeid))));
245-
attrMap[i]= (AttrNumber) (j+1);
246-
break;
247-
}
248-
}
249-
if (attrMap[i]==0)
250-
ereport(ERROR,
251-
(errcode(ERRCODE_DATATYPE_MISMATCH),
252-
errmsg_internal("%s",_(msg)),
253-
errdetail("Attribute \"%s\" of type %s does not exist in type %s.",
254-
attname,
255-
format_type_be(outdesc->tdtypeid),
256-
format_type_be(indesc->tdtypeid))));
257-
}
214+
attrMap=convert_tuples_by_name_map(indesc,outdesc,msg);
258215

259216
/*
260217
* Check to see if the map is one-to-one and the tuple types are the same.
@@ -312,6 +269,69 @@ convert_tuples_by_name(TupleDesc indesc,
312269
returnmap;
313270
}
314271

272+
/*
273+
* Return a palloc'd bare attribute map for tuple conversion, matching input
274+
* and output columns by name. (Dropped columns are ignored in both input and
275+
* output.) This is normally a subroutine for convert_tuples_by_name, but can
276+
* be used standalone.
277+
*/
278+
AttrNumber*
279+
convert_tuples_by_name_map(TupleDescindesc,
280+
TupleDescoutdesc,
281+
constchar*msg)
282+
{
283+
AttrNumber*attrMap;
284+
intn;
285+
inti;
286+
287+
n=outdesc->natts;
288+
attrMap= (AttrNumber*)palloc0(n*sizeof(AttrNumber));
289+
for (i=0;i<n;i++)
290+
{
291+
Form_pg_attributeatt=outdesc->attrs[i];
292+
char*attname;
293+
Oidatttypid;
294+
int32atttypmod;
295+
intj;
296+
297+
if (att->attisdropped)
298+
continue;/* attrMap[i] is already 0 */
299+
attname=NameStr(att->attname);
300+
atttypid=att->atttypid;
301+
atttypmod=att->atttypmod;
302+
for (j=0;j<indesc->natts;j++)
303+
{
304+
att=indesc->attrs[j];
305+
if (att->attisdropped)
306+
continue;
307+
if (strcmp(attname,NameStr(att->attname))==0)
308+
{
309+
/* Found it, check type */
310+
if (atttypid!=att->atttypid||atttypmod!=att->atttypmod)
311+
ereport(ERROR,
312+
(errcode(ERRCODE_DATATYPE_MISMATCH),
313+
errmsg_internal("%s",_(msg)),
314+
errdetail("Attribute \"%s\" of type %s does not match corresponding attribute of type %s.",
315+
attname,
316+
format_type_be(outdesc->tdtypeid),
317+
format_type_be(indesc->tdtypeid))));
318+
attrMap[i]= (AttrNumber) (j+1);
319+
break;
320+
}
321+
}
322+
if (attrMap[i]==0)
323+
ereport(ERROR,
324+
(errcode(ERRCODE_DATATYPE_MISMATCH),
325+
errmsg_internal("%s",_(msg)),
326+
errdetail("Attribute \"%s\" of type %s does not exist in type %s.",
327+
attname,
328+
format_type_be(outdesc->tdtypeid),
329+
format_type_be(indesc->tdtypeid))));
330+
}
331+
332+
returnattrMap;
333+
}
334+
315335
/*
316336
* Perform conversion of a tuple according to the map.
317337
*/

‎src/backend/commands/tablecmds.c

Lines changed: 62 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
#include"access/reloptions.h"
2121
#include"access/relscan.h"
2222
#include"access/sysattr.h"
23+
#include"access/tupconvert.h"
2324
#include"access/xact.h"
2425
#include"access/xlog.h"
2526
#include"catalog/catalog.h"
@@ -8030,12 +8031,69 @@ ATPrepAlterColumnType(List **wqueue,
80308031
ReleaseSysCache(tuple);
80318032

80328033
/*
8033-
* The recursion case is handled by ATSimpleRecursion. However, if we are
8034-
* told not to recurse, there had better not be any child tables; else the
8035-
* alter would put them out of step.
8034+
* Recurse manually by queueing a new command for each child, if
8035+
* necessary. We cannot apply ATSimpleRecursion here because we need to
8036+
* remap attribute numbers in the USING expression, if any.
8037+
*
8038+
* If we are told not to recurse, there had better not be any child
8039+
* tables; else the alter would put them out of step.
80368040
*/
80378041
if (recurse)
8038-
ATSimpleRecursion(wqueue,rel,cmd,recurse,lockmode);
8042+
{
8043+
Oidrelid=RelationGetRelid(rel);
8044+
ListCell*child;
8045+
List*children;
8046+
8047+
children=find_all_inheritors(relid,lockmode,NULL);
8048+
8049+
/*
8050+
* find_all_inheritors does the recursive search of the inheritance
8051+
* hierarchy, so all we have to do is process all of the relids in the
8052+
* list that it returns.
8053+
*/
8054+
foreach(child,children)
8055+
{
8056+
Oidchildrelid=lfirst_oid(child);
8057+
Relationchildrel;
8058+
8059+
if (childrelid==relid)
8060+
continue;
8061+
8062+
/* find_all_inheritors already got lock */
8063+
childrel=relation_open(childrelid,NoLock);
8064+
CheckTableNotInUse(childrel,"ALTER TABLE");
8065+
8066+
/*
8067+
* Remap the attribute numbers. If no USING expression was
8068+
* specified, there is no need for this step.
8069+
*/
8070+
if (def->cooked_default)
8071+
{
8072+
AttrNumber*attmap;
8073+
boolfound_whole_row;
8074+
8075+
/* create a copy to scribble on */
8076+
cmd=copyObject(cmd);
8077+
8078+
attmap=convert_tuples_by_name_map(RelationGetDescr(childrel),
8079+
RelationGetDescr(rel),
8080+
gettext_noop("could not convert row type"));
8081+
((ColumnDef*)cmd->def)->cooked_default=
8082+
map_variable_attnos(def->cooked_default,
8083+
1,0,
8084+
attmap,RelationGetDescr(rel)->natts,
8085+
&found_whole_row);
8086+
if (found_whole_row)
8087+
ereport(ERROR,
8088+
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
8089+
errmsg("cannot convert whole-row table reference"),
8090+
errdetail("USING expression contains a whole-row table reference.")));
8091+
pfree(attmap);
8092+
}
8093+
ATPrepCmd(wqueue,childrel,cmd, false, true,lockmode);
8094+
relation_close(childrel,NoLock);
8095+
}
8096+
}
80398097
elseif (!recursing&&
80408098
find_inheritance_children(RelationGetRelid(rel),NoLock)!=NIL)
80418099
ereport(ERROR,

‎src/backend/executor/functions.c

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -500,7 +500,16 @@ init_execution_state(List *queryTree_list,
500500
fcache->readonly_func ?CURSOR_OPT_PARALLEL_OK :0,
501501
NULL);
502502

503-
/* Precheck all commands for validity in a function */
503+
/*
504+
* Precheck all commands for validity in a function. This should
505+
* generally match the restrictions spi.c applies.
506+
*/
507+
if (IsA(stmt,CopyStmt)&&
508+
((CopyStmt*)stmt)->filename==NULL)
509+
ereport(ERROR,
510+
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
511+
errmsg("cannot COPY to/from client in a SQL function")));
512+
504513
if (IsA(stmt,TransactionStmt))
505514
ereport(ERROR,
506515
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),

‎src/backend/optimizer/path/costsize.c

Lines changed: 48 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -161,6 +161,7 @@ static Selectivity get_foreign_key_join_selectivity(PlannerInfo *root,
161161
staticvoidset_rel_width(PlannerInfo*root,RelOptInfo*rel);
162162
staticdoublerelation_byte_size(doubletuples,intwidth);
163163
staticdoublepage_size(doubletuples,intwidth);
164+
staticdoubleget_parallel_divisor(Path*path);
164165

165166

166167
/*
@@ -238,32 +239,7 @@ cost_seqscan(Path *path, PlannerInfo *root,
238239
/* Adjust costing for parallelism, if used. */
239240
if (path->parallel_workers>0)
240241
{
241-
doubleparallel_divisor=path->parallel_workers;
242-
doubleleader_contribution;
243-
244-
/*
245-
* Early experience with parallel query suggests that when there is
246-
* only one worker, the leader often makes a very substantial
247-
* contribution to executing the parallel portion of the plan, but as
248-
* more workers are added, it does less and less, because it's busy
249-
* reading tuples from the workers and doing whatever non-parallel
250-
* post-processing is needed. By the time we reach 4 workers, the
251-
* leader no longer makes a meaningful contribution. Thus, for now,
252-
* estimate that the leader spends 30% of its time servicing each
253-
* worker, and the remainder executing the parallel plan.
254-
*/
255-
leader_contribution=1.0- (0.3*path->parallel_workers);
256-
if (leader_contribution>0)
257-
parallel_divisor+=leader_contribution;
258-
259-
/*
260-
* In the case of a parallel plan, the row count needs to represent
261-
* the number of tuples processed per worker. Otherwise, higher-level
262-
* plan nodes that appear below the gather will be costed incorrectly,
263-
* because they'll anticipate receiving more rows than any given copy
264-
* will actually get.
265-
*/
266-
path->rows=clamp_row_est(path->rows /parallel_divisor);
242+
doubleparallel_divisor=get_parallel_divisor(path);
267243

268244
/* The CPU cost is divided among all the workers. */
269245
cpu_run_cost /=parallel_divisor;
@@ -274,6 +250,12 @@ cost_seqscan(Path *path, PlannerInfo *root,
274250
* prefetching. For now, we assume that the disk run cost can't be
275251
* amortized at all.
276252
*/
253+
254+
/*
255+
* In the case of a parallel plan, the row count needs to represent
256+
* the number of tuples processed per worker.
257+
*/
258+
path->rows=clamp_row_est(path->rows /parallel_divisor);
277259
}
278260

279261
path->startup_cost=startup_cost;
@@ -2014,6 +1996,10 @@ final_cost_nestloop(PlannerInfo *root, NestPath *path,
20141996
else
20151997
path->path.rows=path->path.parent->rows;
20161998

1999+
/* For partial paths, scale row estimate. */
2000+
if (path->path.parallel_workers>0)
2001+
path->path.rows /=get_parallel_divisor(&path->path);
2002+
20172003
/*
20182004
* We could include disable_cost in the preliminary estimate, but that
20192005
* would amount to optimizing for the case where the join method is
@@ -2432,6 +2418,10 @@ final_cost_mergejoin(PlannerInfo *root, MergePath *path,
24322418
else
24332419
path->jpath.path.rows=path->jpath.path.parent->rows;
24342420

2421+
/* For partial paths, scale row estimate. */
2422+
if (path->jpath.path.parallel_workers>0)
2423+
path->jpath.path.rows /=get_parallel_divisor(&path->jpath.path);
2424+
24352425
/*
24362426
* We could include disable_cost in the preliminary estimate, but that
24372427
* would amount to optimizing for the case where the join method is
@@ -2811,6 +2801,10 @@ final_cost_hashjoin(PlannerInfo *root, HashPath *path,
28112801
else
28122802
path->jpath.path.rows=path->jpath.path.parent->rows;
28132803

2804+
/* For partial paths, scale row estimate. */
2805+
if (path->jpath.path.parallel_workers>0)
2806+
path->jpath.path.rows /=get_parallel_divisor(&path->jpath.path);
2807+
28142808
/*
28152809
* We could include disable_cost in the preliminary estimate, but that
28162810
* would amount to optimizing for the case where the join method is
@@ -4799,3 +4793,31 @@ page_size(double tuples, int width)
47994793
{
48004794
returnceil(relation_byte_size(tuples,width) /BLCKSZ);
48014795
}
4796+
4797+
/*
4798+
* Estimate the fraction of the work that each worker will do given the
4799+
* number of workers budgeted for the path.
4800+
*/
4801+
staticdouble
4802+
get_parallel_divisor(Path*path)
4803+
{
4804+
doubleparallel_divisor=path->parallel_workers;
4805+
doubleleader_contribution;
4806+
4807+
/*
4808+
* Early experience with parallel query suggests that when there is only
4809+
* one worker, the leader often makes a very substantial contribution to
4810+
* executing the parallel portion of the plan, but as more workers are
4811+
* added, it does less and less, because it's busy reading tuples from the
4812+
* workers and doing whatever non-parallel post-processing is needed. By
4813+
* the time we reach 4 workers, the leader no longer makes a meaningful
4814+
* contribution. Thus, for now, estimate that the leader spends 30% of
4815+
* its time servicing each worker, and the remainder executing the
4816+
* parallel plan.
4817+
*/
4818+
leader_contribution=1.0- (0.3*path->parallel_workers);
4819+
if (leader_contribution>0)
4820+
parallel_divisor+=leader_contribution;
4821+
4822+
returnparallel_divisor;
4823+
}

‎src/bin/pg_dump/pg_dump.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -616,7 +616,7 @@ main(int argc, char **argv)
616616
|| numWorkers > MAXIMUM_WAIT_OBJECTS
617617
#endif
618618
)
619-
exit_horribly(NULL, "%s:invalid number of parallel jobs\n", progname);
619+
exit_horribly(NULL, "invalid number of parallel jobs\n");
620620

621621
/* Parallel backup only in the directory archive format so far */
622622
if (archiveFormat != archDirectory && numWorkers > 1)
@@ -1219,7 +1219,7 @@ expand_schema_name_patterns(Archive *fout,
12191219

12201220
res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
12211221
if (strict_names && PQntuples(res) == 0)
1222-
exit_horribly(NULL, "no matchingtables were found for pattern \"%s\"\n", cell->val);
1222+
exit_horribly(NULL, "no matchingschemas were found for pattern \"%s\"\n", cell->val);
12231223

12241224
for (i = 0; i < PQntuples(res); i++)
12251225
{

0 commit comments

Comments
 (0)

[8]ページ先頭

©2009-2025 Movatter.jp