def iter_chunked(self, n: int) -> AsyncStreamIterator[bytes]: """Returns an asynchronous iterator that yields chunks of size n.""" return AsyncStreamIterator(lambda: self.read(n)) # type: ignore[attr-defined]
def iter_any(self) -> AsyncStreamIterator[bytes]: """Yield all available data as soon as it is received.""" return AsyncStreamIterator(self.readany) # type: ignore[attr-defined]
def iter_chunks(self) -> ChunkTupleAsyncStreamIterator: """Yield chunks of data as they are received by the server.
The yielded objects are tuples
of (bytes, bool) as returned by the StreamReader.readchunk method. """ return ChunkTupleAsyncStreamIterator(self) # type: ignore[arg-type]
class StreamReader(AsyncStreamReaderMixin): """An enhancement of asyncio.StreamReader.
Supports asynchronous iteration by line, chunk oras available::
async for line in reader:
...
async for chunk in reader.iter_chunked(1024):
...
async for slice in reader.iter_any():
...
def unread_data(self, data: bytes) -> None: """rollback reading some data from stream, inserting it to buffer head."""
warnings.warn( "unread_data() is deprecated " "and will be removed in future releases (#3260)",
DeprecationWarning,
stacklevel=2,
) ifnot data: return
# TODO: size is ignored, remove the param later def feed_data(self, data: bytes, size: int = 0) -> None: assertnot self._eof, "feed_data after feed_eof"
if self._size > self._high_water andnot self._protocol._reading_paused:
self._protocol.pause_reading()
def begin_http_chunk_receiving(self) -> None: if self._http_chunk_splits isNone: if self.total_bytes: raise RuntimeError( "Called begin_http_chunk_receiving when""some data was already fed"
)
self._http_chunk_splits = []
def end_http_chunk_receiving(self) -> None: if self._http_chunk_splits isNone: raise RuntimeError( "Called end_chunk_receiving without calling " "begin_chunk_receiving first"
)
# self._http_chunk_splits contains logical byte offsets from start of # the body transfer. Each offset is the offset of the end of a chunk. # "Logical" means bytes, accessible for a user. # If no chunks containing logical data were received, current position # is difinitely zero.
pos = self._http_chunk_splits[-1] if self._http_chunk_splits else 0
if self.total_bytes == pos: # We should not add empty chunks here. So we check for that. # Note, when chunked + gzip is used, we can receive a chunk # of compressed data, but that data may not be enough for gzip FSM # to yield any uncompressed data. That's why current position may # not change after receiving a chunk. return
self._http_chunk_splits.append(self.total_bytes)
# wake up readchunk when end of http chunk received
waiter = self._waiter if waiter isnotNone:
self._waiter = None
set_result(waiter, None)
# StreamReader uses a future to link the protocol feed_data() method # to a read coroutine. Running two read coroutines at the same time # would have an unexpected behaviour. It would not possible to know # which coroutine would get the next data. if self._waiter isnotNone: raise RuntimeError( "%s() called while another coroutine is " "already waiting for incoming data" % func_name
)
async def readuntil(self, separator: bytes = b"\n") -> bytes:
seplen = len(separator) if seplen == 0: raise ValueError("Separator should be at least one-byte string")
if self._exception isnotNone: raise self._exception
chunk = b""
chunk_size = 0
not_enough = True
while not_enough: while self._buffer and not_enough:
offset = self._buffer_offset
ichar = self._buffer[0].find(separator, offset) + 1 # Read from current offset to found separator or to the end.
data = self._read_nowait_chunk(
ichar - offset + seplen - 1 if ichar else -1
)
chunk += data
chunk_size += len(data) if ichar:
not_enough = False
if chunk_size > self._high_water: raise ValueError("Chunk too big")
if self._eof: break
if not_enough:
await self._wait("readuntil")
return chunk
async def read(self, n: int = -1) -> bytes: if self._exception isnotNone: raise self._exception
# migration problem; with DataQueue you have to catch # EofStream exception, so common way is to run payload.read() inside # infinite loop. what can cause real infinite loop with StreamReader # lets keep this code one major release. if __debug__: if self._eof andnot self._buffer:
self._eof_counter = getattr(self, "_eof_counter", 0) + 1 if self._eof_counter > 5:
internal_logger.warning( "Multiple access to StreamReader in eof state, " "might be infinite loop.",
stack_info=True,
)
ifnot n: return b""
if n < 0: # This used to just loop creating a new waiter hoping to # collect everything in self._buffer, but that would # deadlock if the subprocess sends more than self.limit # bytes. So just call self.readany() until EOF.
blocks = [] whileTrue:
block = await self.readany() ifnot block: break
blocks.append(block) return b"".join(blocks)
# TODO: should be `if` instead of `while` # because waiter maybe triggered on chunk end, # without feeding any data whilenot self._buffer andnot self._eof:
await self._wait("read")
return self._read_nowait(n)
async def readany(self) -> bytes: if self._exception isnotNone: raise self._exception
# TODO: should be `if` instead of `while` # because waiter maybe triggered on chunk end, # without feeding any data whilenot self._buffer andnot self._eof:
await self._wait("readany")
return self._read_nowait(-1)
async def readchunk(self) -> Tuple[bytes, bool]: """Returns a tuple of (data, end_of_http_chunk).
When chunked transfer
encoding is used, end_of_http_chunk is a boolean indicating if the end
of the data corresponds to the end of a HTTP chunk , otherwise it is
always False. """ whileTrue: if self._exception isnotNone: raise self._exception
while self._http_chunk_splits:
pos = self._http_chunk_splits.pop(0) if pos == self._cursor: return (b"", True) if pos > self._cursor: return (self._read_nowait(pos - self._cursor), True)
internal_logger.warning( "Skipping HTTP chunk end due to data " "consumption beyond chunk boundary"
)
if self._buffer: return (self._read_nowait_chunk(-1), False) # return (self._read_nowait(-1), False)
if self._eof: # Special case for signifying EOF. # (b'', True) is not a final return value actually. return (b"", False)
blocks: List[bytes] = [] while n > 0:
block = await self.read(n) ifnot block:
partial = b"".join(blocks) raise asyncio.IncompleteReadError(partial, len(partial) + n)
blocks.append(block)
n -= len(block)
return b"".join(blocks)
def read_nowait(self, n: int = -1) -> bytes: # default was changed to be consistent with .read(-1) # # I believe the most users don't know about the method and # they are not affected. if self._exception isnotNone: raise self._exception
if self._waiter andnot self._waiter.done(): raise RuntimeError( "Called while some coroutine is waiting for incoming data."
)
return self._read_nowait(n)
def _read_nowait_chunk(self, n: int) -> bytes:
first_buffer = self._buffer[0]
offset = self._buffer_offset if n != -1 and len(first_buffer) - offset > n:
data = first_buffer[offset : offset + n]
self._buffer_offset += n
elif offset:
self._buffer.popleft()
data = first_buffer[offset:]
self._buffer_offset = 0
else:
data = self._buffer.popleft()
self._size -= len(data)
self._cursor += len(data)
chunk_splits = self._http_chunk_splits # Prevent memory leak: drop useless chunk splits while chunk_splits and chunk_splits[0] < self._cursor:
chunk_splits.pop(0)
if self._size < self._low_water and self._protocol._reading_paused:
self._protocol.resume_reading() return data
def _read_nowait(self, n: int) -> bytes: """Read not more than n bytes, or whole buffer if n == -1"""
self._timer.assert_timeout()
chunks = [] while self._buffer:
chunk = self._read_nowait_chunk(n)
chunks.append(chunk) if n != -1:
n -= len(chunk) if n == 0: break
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.