break
results.append(res)
res = []
- elif r.status == PIPELINE_SYNC:
- assert not res
- results.append([r])
else:
- res.append(r)
+ status = r.status
+ if status == PIPELINE_SYNC:
+ assert not res
+ results.append([r])
+ elif status == COPY_IN or status == COPY_OUT or status == COPY_BOTH:
+ # This shouldn't happen, but insisting hard enough, it will.
+ # For instance, in test_executemany_badquery(), with the COPY
+ # statement and the AsyncClientCursor, which disables
+ # prepared statements).
+ # Bail out from the resulting infinite loop.
+ raise e.NotSupportedError(
+ "COPY cannot be used in pipeline mode"
+ )
+ else:
+ res.append(r)
if ready & READY_W:
pgconn.flush()
if status == libpq.PGRES_PIPELINE_SYNC:
results.append([r])
break
+ elif (
+ status == libpq.PGRES_COPY_IN
+ or status == libpq.PGRES_COPY_OUT
+ or status == libpq.PGRES_COPY_BOTH
+ ):
+ # This shouldn't happen, but insisting hard enough, it will.
+ # For instance, in test_executemany_badquery(), with the COPY
+ # statement and the AsyncClientCursor, which disables
+ # prepared statements).
+ # Bail out from the resulting infinite loop.
+ raise e.NotSupportedError(
+ "COPY cannot be used in pipeline mode"
+ )
else:
res.append(r)
"query",
[
"insert into nosuchtable values (%s, %s)",
- # This fails, but only because we try to copy in pipeline mode,
- # crashing the connection. Which would be even fine, but with
- # the async cursor it's worse... See test_client_cursor_async.py.
- # "copy (select %s, %s) to stdout",
+ "copy (select %s, %s) to stdout",
"wat (%s, %s)",
],
)
"query",
[
"insert into nosuchtable values (%s, %s)",
- # This fails because we end up trying to copy in pipeline mode.
- # However, sometimes (and pretty regularly if we enable pgconn.trace())
- # something goes in a loop and only terminates by OOM. Strace shows
- # an allocation loop. I think it's in the libpq.
- # "copy (select %s, %s) to stdout",
+ "copy (select %s, %s) to stdout",
"wat (%s, %s)",
],
)