def readall(self):
"""Read until EOF, using multiple read() call."""
res = bytearray()
- while True:
- data = self.read(DEFAULT_BUFFER_SIZE)
- if not data:
- break
+ while data := self.read(DEFAULT_BUFFER_SIZE):
res += data
if res:
return bytes(res)
def encode(input, output):
"""Encode a file; input and output are binary files."""
- while True:
- s = input.read(MAXBINSIZE)
- if not s:
- break
- while len(s) < MAXBINSIZE:
- ns = input.read(MAXBINSIZE-len(s))
- if not ns:
- break
+ while s := input.read(MAXBINSIZE):
+ while len(s) < MAXBINSIZE and (ns := input.read(MAXBINSIZE-len(s))):
s += ns
line = binascii.b2a_base64(s)
output.write(line)
def decode(input, output):
"""Decode a file; input and output are binary files."""
- while True:
- line = input.readline()
- if not line:
- break
+ while line := input.readline():
s = binascii.a2b_base64(line)
output.write(s)
p = Popen(["/usr/bin/dump", f"-X{AIX_ABI}", "-H", file],
universal_newlines=True, stdout=PIPE, stderr=DEVNULL)
# be sure to read to the end-of-file - getting all entries
- while True:
- ld_header = get_ld_header(p)
- if ld_header:
- ldr_headers.append((ld_header, get_ld_header_info(p)))
- else:
- break
+ while ld_header := get_ld_header(p):
+ ldr_headers.append((ld_header, get_ld_header_info(p)))
p.stdout.close()
p.wait()
return ldr_headers
feedparser = FeedParser(self._class, policy=self.policy)
if headersonly:
feedparser._set_headersonly()
- while True:
- data = fp.read(8192)
- if not data:
- break
+ while data := fp.read(8192):
feedparser.feed(data)
return feedparser.close()
"""
self.voidcmd('TYPE I')
with self.transfercmd(cmd, rest) as conn:
- while 1:
- data = conn.recv(blocksize)
- if not data:
- break
+ while data := conn.recv(blocksize):
callback(data)
# shutdown ssl layer
if _SSLSocket is not None and isinstance(conn, _SSLSocket):
"""
self.voidcmd('TYPE I')
with self.transfercmd(cmd, rest) as conn:
- while 1:
- buf = fp.read(blocksize)
- if not buf:
- break
+ while buf := fp.read(blocksize):
conn.sendall(buf)
if callback:
callback(buf)
assert self.chunked != _UNKNOWN
value = []
try:
- while True:
- chunk_left = self._get_chunk_left()
- if chunk_left is None:
- break
-
+ while (chunk_left := self._get_chunk_left()) is not None:
if amt is not None and amt <= chunk_left:
value.append(self._safe_read(amt))
self.chunk_left = chunk_left - amt
encode = self._is_textIO(data)
if encode and self.debuglevel > 0:
print("encoding file using iso-8859-1")
- while 1:
- datablock = data.read(self.blocksize)
- if not datablock:
- break
+ while datablock := data.read(self.blocksize):
if encode:
datablock = datablock.encode("iso-8859-1")
sys.audit("http.client.send", self, datablock)
encode = self._is_textIO(readable)
if encode and self.debuglevel > 0:
print("encoding file using iso-8859-1")
- while True:
- datablock = readable.read(self.blocksize)
- if not datablock:
- break
+ while datablock := readable.read(self.blocksize):
if encode:
datablock = datablock.encode("iso-8859-1")
yield datablock
"comment", "commenturl")
try:
- while 1:
- line = f.readline()
- if line == "": break
+ while (line := f.readline()) != "":
if not line.startswith(header):
continue
line = line[len(header):].strip()
filename)
try:
- while 1:
- line = f.readline()
+ while (line := f.readline()) != "":
rest = {}
- if line == "": break
-
# httponly is a cookie flag as defined in rfc6265
# when encoded in a netscape cookie file,
# the line is prepended with "#HttpOnly_"
def __iter__(self):
"""Iterate over lines."""
- while True:
- line = self.readline()
- if not line:
- return
+ while line := self.readline():
yield line
def tell(self):
the viewing command is stored with the key "view".
"""
caps = {}
- while 1:
- line = fp.readline()
- if not line: break
+ while line := fp.readline():
# Ignore comments and blank lines
if line[0] == '#' or line.strip() == '':
continue
list of standard types, else to the list of non-standard
types.
"""
- while 1:
- line = fp.readline()
- if not line:
- break
+ while line := fp.readline():
words = line.split()
for i in range(len(words)):
if words[i][0] == '#':
for word, tup in self.sort_arg_dict_default.items():
fragment = word
while fragment:
- if not fragment:
- break
if fragment in dict:
bad_list[fragment] = 0
break
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?(\w+))')
- while True:
- match = pattern.search(text, here)
- if not match: break
+ while match := pattern.search(text, here):
start, end = match.span()
results.append(escape(text[here:start]))
output.write(s + lineEnd)
prevline = None
- while 1:
- line = input.readline()
- if not line:
- break
+ while line := input.readline():
outline = []
# Strip off any readline induced trailing newline
stripped = b''
return
new = b''
- while 1:
- line = input.readline()
- if not line: break
+ while line := input.readline():
i, n = 0, len(line)
if n > 0 and line[n-1:n] == b'\n':
partial = 0; n = n-1
def _print_tokens(lexer):
- while 1:
- tt = lexer.get_token()
- if not tt:
- break
+ while tt := lexer.get_token():
print("Token: " + repr(tt))
if __name__ == '__main__':
# Localize variable access to minimize overhead.
fsrc_read = fsrc.read
fdst_write = fdst.write
- while True:
- buf = fsrc_read(length)
- if not buf:
- break
+ while buf := fsrc_read(length):
fdst_write(buf)
def _samefile(src, dst):
toaddrs = prompt("To").split(',')
print("Enter message, end with ^D:")
msg = ''
- while 1:
- line = sys.stdin.readline()
- if not line:
- break
+ while line := sys.stdin.readline():
msg = msg + line
print("Message length is %d" % len(msg))
selector.register(self, selectors.EVENT_READ)
while True:
- ready = selector.select(timeout)
- if ready:
+ if selector.select(timeout):
return self._handle_request_noblock()
else:
if timeout is not None:
# the newline. keyword and value are both UTF-8 encoded strings.
regex = re.compile(br"(\d+) ([^=]+)=")
pos = 0
- while True:
- match = regex.match(buf, pos)
- if not match:
- break
-
+ while match := regex.match(buf, pos):
length, keyword = match.groups()
length = int(length)
if length == 0:
"""Read through the entire archive file and look for readable
members.
"""
- while True:
- tarinfo = self.next()
- if tarinfo is None:
- break
+ while self.next() is not None:
+ pass
self._loaded = True
def _check(self, mode=None):
def test_read_10(self):
with LZMAFile(BytesIO(COMPRESSED_XZ)) as f:
chunks = []
- while True:
- result = f.read(10)
- if not result:
- break
+ while result := f.read(10):
self.assertLessEqual(len(result), 10)
chunks.append(result)
self.assertEqual(b"".join(chunks), INPUT)
def test_read1(self):
with LZMAFile(BytesIO(COMPRESSED_XZ)) as f:
blocks = []
- while True:
- result = f.read1()
- if not result:
- break
+ while result := f.read1():
blocks.append(result)
self.assertEqual(b"".join(blocks), INPUT)
self.assertEqual(f.read1(), b"")
def test_read1_10(self):
with LZMAFile(BytesIO(COMPRESSED_XZ)) as f:
blocks = []
- while True:
- result = f.read1(10)
- if not result:
- break
+ while result := f.read1(10):
blocks.append(result)
self.assertEqual(b"".join(blocks), INPUT)
self.assertEqual(f.read1(), b"")
def test_read1_multistream(self):
with LZMAFile(BytesIO(COMPRESSED_XZ * 5)) as f:
blocks = []
- while True:
- result = f.read1()
- if not result:
- break
+ while result := f.read1():
blocks.append(result)
self.assertEqual(b"".join(blocks), INPUT * 5)
self.assertEqual(f.read1(), b"")
if reporthook:
reporthook(blocknum, bs, size)
- while True:
- block = fp.read(bs)
- if not block:
- break
+ while block := fp.read(bs):
read += len(block)
tfp.write(block)
blocknum += 1
size = int(headers["Content-Length"])
if reporthook:
reporthook(blocknum, bs, size)
- while 1:
- block = fp.read(bs)
- if not block:
- break
+ while block := fp.read(bs):
read += len(block)
tfp.write(block)
blocknum += 1
from warnings import warn
warn("SimpleHandler.stdout.write() should not do partial writes",
DeprecationWarning)
- while True:
- data = data[result:]
- if not data:
- break
+ while data := data[result:]:
result = self.stdout.write(data)
def _flush(self):
return lines
def __iter__(self):
- while 1:
- line = self.readline()
- if not line:
- return
+ while line := self.readline():
yield line
def close(self):
def unpack_list(self, unpack_item):
list = []
- while 1:
- x = self.unpack_uint()
- if x == 0: break
+ while (x := self.unpack_uint()) != 0:
if x != 1:
raise ConversionError('0 or 1 expected, got %r' % (x,))
item = unpack_item()
parser = self.getParser()
first_buffer = True
try:
- while 1:
- buffer = file.read(16*1024)
- if not buffer:
- break
+ while buffer := file.read(16*1024):
parser.Parse(buffer, False)
if first_buffer and self.document.documentElement:
self._setup_subset(buffer)
# it with chunks.
self._root = parser._parse_whole(source)
return self._root
- while True:
- data = source.read(65536)
- if not data:
- break
+ while data := source.read(65536):
parser.feed(data)
self._root = parser.close()
return self._root
file = source.getCharacterStream()
if file is None:
file = source.getByteStream()
- buffer = file.read(self._bufsize)
- while buffer:
+ while buffer := file.read(self._bufsize):
self.feed(buffer)
- buffer = file.read(self._bufsize)
self.close()
def feed(self, data):
p, u = self.getparser()
- while 1:
- data = stream.read(1024)
- if not data:
- break
+ while data := stream.read(1024):
if self.verbose:
print("body:", repr(data))
p.feed(data)
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?((?:\w|\.)+))\b')
- while 1:
- match = pattern.search(text, here)
- if not match: break
+ while match := pattern.search(text, here):
start, end = match.span()
results.append(escape(text[here:start]))