跳转到内容

pydantic_ai.result

StreamedRunResult dataclass

基类:Generic[AgentDepsT, OutputDataT]

通过工具调用返回结构化数据的流式运行结果。

源代码位于 pydantic_ai_slim/pydantic_ai/result.py
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
@dataclass(init=False)
class StreamedRunResult(Generic[AgentDepsT, OutputDataT]):
    """Result of a streamed run that returns structured data via a tool call."""

    _all_messages: list[_messages.ModelMessage]
    _new_message_index: int

    _stream_response: AgentStream[AgentDepsT, OutputDataT] | None = None
    _on_complete: Callable[[], Awaitable[None]] | None = None

    _run_result: AgentRunResult[OutputDataT] | None = None

    is_complete: bool = field(default=False, init=False)
    """Whether the stream has all been received.

    This is set to `True` when one of
    [`stream_output`][pydantic_ai.result.StreamedRunResult.stream_output],
    [`stream_text`][pydantic_ai.result.StreamedRunResult.stream_text],
    [`stream_responses`][pydantic_ai.result.StreamedRunResult.stream_responses] or
    [`get_output`][pydantic_ai.result.StreamedRunResult.get_output] completes.
    """

    @overload
    def __init__(
        self,
        all_messages: list[_messages.ModelMessage],
        new_message_index: int,
        stream_response: AgentStream[AgentDepsT, OutputDataT] | None,
        on_complete: Callable[[], Awaitable[None]] | None,
    ) -> None: ...

    @overload
    def __init__(
        self,
        all_messages: list[_messages.ModelMessage],
        new_message_index: int,
        *,
        run_result: AgentRunResult[OutputDataT],
    ) -> None: ...

    def __init__(
        self,
        all_messages: list[_messages.ModelMessage],
        new_message_index: int,
        stream_response: AgentStream[AgentDepsT, OutputDataT] | None = None,
        on_complete: Callable[[], Awaitable[None]] | None = None,
        run_result: AgentRunResult[OutputDataT] | None = None,
    ) -> None:
        self._all_messages = all_messages
        self._new_message_index = new_message_index

        self._stream_response = stream_response
        self._on_complete = on_complete
        self._run_result = run_result

    def all_messages(self, *, output_tool_return_content: str | None = None) -> list[_messages.ModelMessage]:
        """Return the history of _messages.

        Args:
            output_tool_return_content: The return content of the tool call to set in the last message.
                This provides a convenient way to modify the content of the output tool call if you want to continue
                the conversation and want to set the response to the output tool call. If `None`, the last message will
                not be modified.

        Returns:
            List of messages.
        """
        # this is a method to be consistent with the other methods
        if output_tool_return_content is not None:
            raise NotImplementedError('Setting output tool return content is not supported for this result type.')
        return self._all_messages

    def all_messages_json(self, *, output_tool_return_content: str | None = None) -> bytes:  # pragma: no cover
        """Return all messages from [`all_messages`][pydantic_ai.result.StreamedRunResult.all_messages] as JSON bytes.

        Args:
            output_tool_return_content: The return content of the tool call to set in the last message.
                This provides a convenient way to modify the content of the output tool call if you want to continue
                the conversation and want to set the response to the output tool call. If `None`, the last message will
                not be modified.

        Returns:
            JSON bytes representing the messages.
        """
        return _messages.ModelMessagesTypeAdapter.dump_json(
            self.all_messages(output_tool_return_content=output_tool_return_content)
        )

    def new_messages(
        self, *, output_tool_return_content: str | None = None
    ) -> list[_messages.ModelMessage]:  # pragma: no cover
        """Return new messages associated with this run.

        Messages from older runs are excluded.

        Args:
            output_tool_return_content: The return content of the tool call to set in the last message.
                This provides a convenient way to modify the content of the output tool call if you want to continue
                the conversation and want to set the response to the output tool call. If `None`, the last message will
                not be modified.

        Returns:
            List of new messages.
        """
        return self.all_messages(output_tool_return_content=output_tool_return_content)[self._new_message_index :]

    def new_messages_json(self, *, output_tool_return_content: str | None = None) -> bytes:  # pragma: no cover
        """Return new messages from [`new_messages`][pydantic_ai.result.StreamedRunResult.new_messages] as JSON bytes.

        Args:
            output_tool_return_content: The return content of the tool call to set in the last message.
                This provides a convenient way to modify the content of the output tool call if you want to continue
                the conversation and want to set the response to the output tool call. If `None`, the last message will
                not be modified.

        Returns:
            JSON bytes representing the new messages.
        """
        return _messages.ModelMessagesTypeAdapter.dump_json(
            self.new_messages(output_tool_return_content=output_tool_return_content)
        )

    @deprecated('`StreamedRunResult.stream` is deprecated, use `stream_output` instead.')
    async def stream(self, *, debounce_by: float | None = 0.1) -> AsyncIterator[OutputDataT]:
        async for output in self.stream_output(debounce_by=debounce_by):
            yield output

    async def stream_output(self, *, debounce_by: float | None = 0.1) -> AsyncIterator[OutputDataT]:
        """Stream the output as an async iterable.

        The pydantic validator for structured data will be called in
        [partial mode](https://docs.pydantic.org.cn/dev/concepts/experimental/#partial-validation)
        on each iteration.

        Args:
            debounce_by: by how much (if at all) to debounce/group the output chunks by. `None` means no debouncing.
                Debouncing is particularly important for long structured outputs to reduce the overhead of
                performing validation as each token is received.

        Returns:
            An async iterable of the response data.
        """
        if self._run_result is not None:
            yield self._run_result.output
            await self._marked_completed()
        elif self._stream_response is not None:
            async for output in self._stream_response.stream_output(debounce_by=debounce_by):
                yield output
            await self._marked_completed(self._stream_response.get())
        else:
            raise ValueError('No stream response or run result provided')  # pragma: no cover

    async def stream_text(self, *, delta: bool = False, debounce_by: float | None = 0.1) -> AsyncIterator[str]:
        """Stream the text result as an async iterable.

        !!! note
            Result validators will NOT be called on the text result if `delta=True`.

        Args:
            delta: if `True`, yield each chunk of text as it is received, if `False` (default), yield the full text
                up to the current point.
            debounce_by: by how much (if at all) to debounce/group the response chunks by. `None` means no debouncing.
                Debouncing is particularly important for long structured responses to reduce the overhead of
                performing validation as each token is received.
        """
        if self._run_result is not None:  # pragma: no cover
            # We can't really get here, as `_run_result` is only set in `run_stream` when `CallToolsNode` produces `DeferredToolRequests` output
            # as a result of a tool function raising `CallDeferred` or `ApprovalRequired`.
            # That'll change if we ever support something like `raise EndRun(output: OutputT)` where `OutputT` could be `str`.
            if not isinstance(self._run_result.output, str):
                raise exceptions.UserError('stream_text() can only be used with text responses')
            yield self._run_result.output
            await self._marked_completed()
        elif self._stream_response is not None:
            async for text in self._stream_response.stream_text(delta=delta, debounce_by=debounce_by):
                yield text
            await self._marked_completed(self._stream_response.get())
        else:
            raise ValueError('No stream response or run result provided')  # pragma: no cover

    @deprecated('`StreamedRunResult.stream_structured` is deprecated, use `stream_responses` instead.')
    async def stream_structured(
        self, *, debounce_by: float | None = 0.1
    ) -> AsyncIterator[tuple[_messages.ModelResponse, bool]]:
        async for msg, last in self.stream_responses(debounce_by=debounce_by):
            yield msg, last

    async def stream_responses(
        self, *, debounce_by: float | None = 0.1
    ) -> AsyncIterator[tuple[_messages.ModelResponse, bool]]:
        """Stream the response as an async iterable of Structured LLM Messages.

        Args:
            debounce_by: by how much (if at all) to debounce/group the response chunks by. `None` means no debouncing.
                Debouncing is particularly important for long structured responses to reduce the overhead of
                performing validation as each token is received.

        Returns:
            An async iterable of the structured response message and whether that is the last message.
        """
        if self._run_result is not None:
            model_response = cast(_messages.ModelResponse, self.all_messages()[-1])
            yield model_response, True
            await self._marked_completed()
        elif self._stream_response is not None:
            # if the message currently has any parts with content, yield before streaming
            async for msg in self._stream_response.stream_responses(debounce_by=debounce_by):
                yield msg, False

            msg = self._stream_response.get()
            yield msg, True

            await self._marked_completed(msg)
        else:
            raise ValueError('No stream response or run result provided')  # pragma: no cover

    async def get_output(self) -> OutputDataT:
        """Stream the whole response, validate and return it."""
        if self._run_result is not None:
            output = self._run_result.output
            await self._marked_completed()
            return output
        elif self._stream_response is not None:
            output = await self._stream_response.get_output()
            await self._marked_completed(self._stream_response.get())
            return output
        else:
            raise ValueError('No stream response or run result provided')  # pragma: no cover

    def usage(self) -> RunUsage:
        """Return the usage of the whole run.

        !!! note
            This won't return the full usage until the stream is finished.
        """
        if self._run_result is not None:
            return self._run_result.usage()
        elif self._stream_response is not None:
            return self._stream_response.usage()
        else:
            raise ValueError('No stream response or run result provided')  # pragma: no cover

    def timestamp(self) -> datetime:
        """Get the timestamp of the response."""
        if self._run_result is not None:
            return self._run_result.timestamp()
        elif self._stream_response is not None:
            return self._stream_response.timestamp()
        else:
            raise ValueError('No stream response or run result provided')  # pragma: no cover

    @deprecated('`validate_structured_output` is deprecated, use `validate_response_output` instead.')
    async def validate_structured_output(
        self, message: _messages.ModelResponse, *, allow_partial: bool = False
    ) -> OutputDataT:
        return await self.validate_response_output(message, allow_partial=allow_partial)

    async def validate_response_output(
        self, message: _messages.ModelResponse, *, allow_partial: bool = False
    ) -> OutputDataT:
        """Validate a structured result message."""
        if self._run_result is not None:
            return self._run_result.output
        elif self._stream_response is not None:
            return await self._stream_response.validate_response_output(message, allow_partial=allow_partial)
        else:
            raise ValueError('No stream response or run result provided')  # pragma: no cover

    async def _marked_completed(self, message: _messages.ModelResponse | None = None) -> None:
        self.is_complete = True
        if message is not None:
            self._all_messages.append(message)
        if self._on_complete is not None:
            await self._on_complete()

is_complete class-attribute instance-attribute

is_complete: bool = field(default=False, init=False)

流是否已全部接收完毕。

stream_outputstream_textstream_responsesget_output 中的任何一个完成时,该值将被设置为 True

all_messages

all_messages(
    *, output_tool_return_content: str | None = None
) -> list[ModelMessage]

返回 _messages 的历史记录。

参数

名称 类型 描述 默认值
output_tool_return_content str | None

在最后一条消息中设置的工具调用的返回内容。如果您想继续对话并设置对输出工具调用的响应,这提供了一种修改输出工具调用内容的便捷方式。如果为 None,则最后一条消息将不会被修改。

None

返回

类型 描述
list[ModelMessage]

消息列表。

源代码位于 pydantic_ai_slim/pydantic_ai/result.py
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
def all_messages(self, *, output_tool_return_content: str | None = None) -> list[_messages.ModelMessage]:
    """Return the history of _messages.

    Args:
        output_tool_return_content: The return content of the tool call to set in the last message.
            This provides a convenient way to modify the content of the output tool call if you want to continue
            the conversation and want to set the response to the output tool call. If `None`, the last message will
            not be modified.

    Returns:
        List of messages.
    """
    # this is a method to be consistent with the other methods
    if output_tool_return_content is not None:
        raise NotImplementedError('Setting output tool return content is not supported for this result type.')
    return self._all_messages

all_messages_json

all_messages_json(
    *, output_tool_return_content: str | None = None
) -> bytes

all_messages 中的所有消息作为 JSON 字节返回。

参数

名称 类型 描述 默认值
output_tool_return_content str | None

在最后一条消息中设置的工具调用的返回内容。如果您想继续对话并设置对输出工具调用的响应,这提供了一种修改输出工具调用内容的便捷方式。如果为 None,则最后一条消息将不会被修改。

None

返回

类型 描述
bytes

表示消息的 JSON 字节。

源代码位于 pydantic_ai_slim/pydantic_ai/result.py
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
def all_messages_json(self, *, output_tool_return_content: str | None = None) -> bytes:  # pragma: no cover
    """Return all messages from [`all_messages`][pydantic_ai.result.StreamedRunResult.all_messages] as JSON bytes.

    Args:
        output_tool_return_content: The return content of the tool call to set in the last message.
            This provides a convenient way to modify the content of the output tool call if you want to continue
            the conversation and want to set the response to the output tool call. If `None`, the last message will
            not be modified.

    Returns:
        JSON bytes representing the messages.
    """
    return _messages.ModelMessagesTypeAdapter.dump_json(
        self.all_messages(output_tool_return_content=output_tool_return_content)
    )

new_messages

new_messages(
    *, output_tool_return_content: str | None = None
) -> list[ModelMessage]

返回与此运行相关的新消息。

旧运行中的消息将被排除。

参数

名称 类型 描述 默认值
output_tool_return_content str | None

在最后一条消息中设置的工具调用的返回内容。如果您想继续对话并设置对输出工具调用的响应,这提供了一种修改输出工具调用内容的便捷方式。如果为 None,则最后一条消息将不会被修改。

None

返回

类型 描述
list[ModelMessage]

新消息列表。

源代码位于 pydantic_ai_slim/pydantic_ai/result.py
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
def new_messages(
    self, *, output_tool_return_content: str | None = None
) -> list[_messages.ModelMessage]:  # pragma: no cover
    """Return new messages associated with this run.

    Messages from older runs are excluded.

    Args:
        output_tool_return_content: The return content of the tool call to set in the last message.
            This provides a convenient way to modify the content of the output tool call if you want to continue
            the conversation and want to set the response to the output tool call. If `None`, the last message will
            not be modified.

    Returns:
        List of new messages.
    """
    return self.all_messages(output_tool_return_content=output_tool_return_content)[self._new_message_index :]

new_messages_json

new_messages_json(
    *, output_tool_return_content: str | None = None
) -> bytes

new_messages 中的新消息作为 JSON 字节返回。

参数

名称 类型 描述 默认值
output_tool_return_content str | None

在最后一条消息中设置的工具调用的返回内容。如果您想继续对话并设置对输出工具调用的响应,这提供了一种修改输出工具调用内容的便捷方式。如果为 None,则最后一条消息将不会被修改。

None

返回

类型 描述
bytes

表示新消息的 JSON 字节。

源代码位于 pydantic_ai_slim/pydantic_ai/result.py
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
def new_messages_json(self, *, output_tool_return_content: str | None = None) -> bytes:  # pragma: no cover
    """Return new messages from [`new_messages`][pydantic_ai.result.StreamedRunResult.new_messages] as JSON bytes.

    Args:
        output_tool_return_content: The return content of the tool call to set in the last message.
            This provides a convenient way to modify the content of the output tool call if you want to continue
            the conversation and want to set the response to the output tool call. If `None`, the last message will
            not be modified.

    Returns:
        JSON bytes representing the new messages.
    """
    return _messages.ModelMessagesTypeAdapter.dump_json(
        self.new_messages(output_tool_return_content=output_tool_return_content)
    )

stream async deprecated

stream(
    *, debounce_by: float | None = 0.1
) -> AsyncIterator[OutputDataT]
已弃用

StreamedRunResult.stream 已弃用,请改用 stream_output

源代码位于 pydantic_ai_slim/pydantic_ai/result.py
359
360
361
362
@deprecated('`StreamedRunResult.stream` is deprecated, use `stream_output` instead.')
async def stream(self, *, debounce_by: float | None = 0.1) -> AsyncIterator[OutputDataT]:
    async for output in self.stream_output(debounce_by=debounce_by):
        yield output

stream_output async

stream_output(
    *, debounce_by: float | None = 0.1
) -> AsyncIterator[OutputDataT]

将输出作为异步可迭代对象进行流式传输。

用于结构化数据的 pydantic 验证器将在每次迭代中以部分模式被调用。

参数

名称 类型 描述 默认值
debounce_by float | None

对输出块进行去抖动/分组的程度(如果有的话)。None 表示不去抖动。对于长的结构化输出,去抖动尤其重要,可以减少在接收每个令牌时执行验证的开销。

0.1

返回

类型 描述
AsyncIterator[OutputDataT]

响应数据的异步可迭代对象。

源代码位于 pydantic_ai_slim/pydantic_ai/result.py
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
async def stream_output(self, *, debounce_by: float | None = 0.1) -> AsyncIterator[OutputDataT]:
    """Stream the output as an async iterable.

    The pydantic validator for structured data will be called in
    [partial mode](https://docs.pydantic.org.cn/dev/concepts/experimental/#partial-validation)
    on each iteration.

    Args:
        debounce_by: by how much (if at all) to debounce/group the output chunks by. `None` means no debouncing.
            Debouncing is particularly important for long structured outputs to reduce the overhead of
            performing validation as each token is received.

    Returns:
        An async iterable of the response data.
    """
    if self._run_result is not None:
        yield self._run_result.output
        await self._marked_completed()
    elif self._stream_response is not None:
        async for output in self._stream_response.stream_output(debounce_by=debounce_by):
            yield output
        await self._marked_completed(self._stream_response.get())
    else:
        raise ValueError('No stream response or run result provided')  # pragma: no cover

stream_text async

stream_text(
    *, delta: bool = False, debounce_by: float | None = 0.1
) -> AsyncIterator[str]

将文本结果作为异步可迭代对象进行流式传输。

注意

如果 delta=True,结果验证器将不会对文本结果进行调用。

参数

名称 类型 描述 默认值
delta bool

如果为 True,则在接收到每个文本块时生成它;如果为 False(默认),则生成截至当前点的完整文本。

False
debounce_by float | None

对响应块进行去抖动/分组的程度(如果有的话)。None 表示不去抖动。对于长的结构化响应,去抖动尤其重要,可以减少在接收每个令牌时执行验证的开销。

0.1
源代码位于 pydantic_ai_slim/pydantic_ai/result.py
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
async def stream_text(self, *, delta: bool = False, debounce_by: float | None = 0.1) -> AsyncIterator[str]:
    """Stream the text result as an async iterable.

    !!! note
        Result validators will NOT be called on the text result if `delta=True`.

    Args:
        delta: if `True`, yield each chunk of text as it is received, if `False` (default), yield the full text
            up to the current point.
        debounce_by: by how much (if at all) to debounce/group the response chunks by. `None` means no debouncing.
            Debouncing is particularly important for long structured responses to reduce the overhead of
            performing validation as each token is received.
    """
    if self._run_result is not None:  # pragma: no cover
        # We can't really get here, as `_run_result` is only set in `run_stream` when `CallToolsNode` produces `DeferredToolRequests` output
        # as a result of a tool function raising `CallDeferred` or `ApprovalRequired`.
        # That'll change if we ever support something like `raise EndRun(output: OutputT)` where `OutputT` could be `str`.
        if not isinstance(self._run_result.output, str):
            raise exceptions.UserError('stream_text() can only be used with text responses')
        yield self._run_result.output
        await self._marked_completed()
    elif self._stream_response is not None:
        async for text in self._stream_response.stream_text(delta=delta, debounce_by=debounce_by):
            yield text
        await self._marked_completed(self._stream_response.get())
    else:
        raise ValueError('No stream response or run result provided')  # pragma: no cover

stream_structured async deprecated

stream_structured(
    *, debounce_by: float | None = 0.1
) -> AsyncIterator[tuple[ModelResponse, bool]]
已弃用

StreamedRunResult.stream_structured 已弃用,请改用 stream_responses

源代码位于 pydantic_ai_slim/pydantic_ai/result.py
417
418
419
420
421
422
@deprecated('`StreamedRunResult.stream_structured` is deprecated, use `stream_responses` instead.')
async def stream_structured(
    self, *, debounce_by: float | None = 0.1
) -> AsyncIterator[tuple[_messages.ModelResponse, bool]]:
    async for msg, last in self.stream_responses(debounce_by=debounce_by):
        yield msg, last

stream_responses async

stream_responses(
    *, debounce_by: float | None = 0.1
) -> AsyncIterator[tuple[ModelResponse, bool]]

将响应作为结构化 LLM 消息的异步可迭代对象进行流式传输。

参数

名称 类型 描述 默认值
debounce_by float | None

对响应块进行去抖动/分组的程度(如果有的话)。None 表示不去抖动。对于长的结构化响应,去抖动尤其重要,可以减少在接收每个令牌时执行验证的开销。

0.1

返回

类型 描述
AsyncIterator[tuple[ModelResponse, bool]]

结构化响应消息以及这是否是最后一条消息的异步可迭代对象。

源代码位于 pydantic_ai_slim/pydantic_ai/result.py
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
async def stream_responses(
    self, *, debounce_by: float | None = 0.1
) -> AsyncIterator[tuple[_messages.ModelResponse, bool]]:
    """Stream the response as an async iterable of Structured LLM Messages.

    Args:
        debounce_by: by how much (if at all) to debounce/group the response chunks by. `None` means no debouncing.
            Debouncing is particularly important for long structured responses to reduce the overhead of
            performing validation as each token is received.

    Returns:
        An async iterable of the structured response message and whether that is the last message.
    """
    if self._run_result is not None:
        model_response = cast(_messages.ModelResponse, self.all_messages()[-1])
        yield model_response, True
        await self._marked_completed()
    elif self._stream_response is not None:
        # if the message currently has any parts with content, yield before streaming
        async for msg in self._stream_response.stream_responses(debounce_by=debounce_by):
            yield msg, False

        msg = self._stream_response.get()
        yield msg, True

        await self._marked_completed(msg)
    else:
        raise ValueError('No stream response or run result provided')  # pragma: no cover

get_output async

get_output() -> OutputDataT

流式传输整个响应,对其进行验证并返回。

源代码位于 pydantic_ai_slim/pydantic_ai/result.py
453
454
455
456
457
458
459
460
461
462
463
464
async def get_output(self) -> OutputDataT:
    """Stream the whole response, validate and return it."""
    if self._run_result is not None:
        output = self._run_result.output
        await self._marked_completed()
        return output
    elif self._stream_response is not None:
        output = await self._stream_response.get_output()
        await self._marked_completed(self._stream_response.get())
        return output
    else:
        raise ValueError('No stream response or run result provided')  # pragma: no cover

usage

usage() -> RunUsage

返回整个运行的使用情况。

注意

在流结束前,此方法不会返回完整的使用情况。

源代码位于 pydantic_ai_slim/pydantic_ai/result.py
466
467
468
469
470
471
472
473
474
475
476
477
def usage(self) -> RunUsage:
    """Return the usage of the whole run.

    !!! note
        This won't return the full usage until the stream is finished.
    """
    if self._run_result is not None:
        return self._run_result.usage()
    elif self._stream_response is not None:
        return self._stream_response.usage()
    else:
        raise ValueError('No stream response or run result provided')  # pragma: no cover

时间戳(timestamp)

timestamp() -> datetime

获取响应的时间戳。

源代码位于 pydantic_ai_slim/pydantic_ai/result.py
479
480
481
482
483
484
485
486
def timestamp(self) -> datetime:
    """Get the timestamp of the response."""
    if self._run_result is not None:
        return self._run_result.timestamp()
    elif self._stream_response is not None:
        return self._stream_response.timestamp()
    else:
        raise ValueError('No stream response or run result provided')  # pragma: no cover

validate_structured_output async deprecated

validate_structured_output(
    message: ModelResponse, *, allow_partial: bool = False
) -> OutputDataT
已弃用

validate_structured_output 已弃用,请改用 validate_response_output

源代码位于 pydantic_ai_slim/pydantic_ai/result.py
488
489
490
491
492
@deprecated('`validate_structured_output` is deprecated, use `validate_response_output` instead.')
async def validate_structured_output(
    self, message: _messages.ModelResponse, *, allow_partial: bool = False
) -> OutputDataT:
    return await self.validate_response_output(message, allow_partial=allow_partial)

validate_response_output async

validate_response_output(
    message: ModelResponse, *, allow_partial: bool = False
) -> OutputDataT

验证结构化结果消息。

源代码位于 pydantic_ai_slim/pydantic_ai/result.py
494
495
496
497
498
499
500
501
502
503
async def validate_response_output(
    self, message: _messages.ModelResponse, *, allow_partial: bool = False
) -> OutputDataT:
    """Validate a structured result message."""
    if self._run_result is not None:
        return self._run_result.output
    elif self._stream_response is not None:
        return await self._stream_response.validate_response_output(message, allow_partial=allow_partial)
    else:
        raise ValueError('No stream response or run result provided')  # pragma: no cover