GRAYBYTE WORDPRESS FILE MANAGER7439

Server IP : 198.54.121.189 / Your IP : 216.73.216.112
System : Linux premium69.web-hosting.com 4.18.0-553.44.1.lve.el8.x86_64 #1 SMP Thu Mar 13 14:29:12 UTC 2025 x86_64
PHP Version : 7.4.33
Disable Function : NONE
cURL : ON | WGET : ON | Sudo : OFF | Pkexec : OFF
Directory : /opt/hc_python/lib/python3.12/site-packages/sentry_sdk/integrations/
Upload Files :
Current_dir [ Not Writeable ] Document_root [ Writeable ]

Command :


Current File : /opt/hc_python/lib/python3.12/site-packages/sentry_sdk/integrations//openai.py
from functools import wraps

import sentry_sdk
from sentry_sdk import consts
from sentry_sdk.ai.monitoring import record_token_usage
from sentry_sdk.ai.utils import set_data_normalized
from sentry_sdk.consts import SPANDATA
from sentry_sdk.integrations import DidNotEnable, Integration
from sentry_sdk.scope import should_send_default_pii
from sentry_sdk.utils import (
    capture_internal_exceptions,
    event_from_exception,
)

from typing import TYPE_CHECKING

if TYPE_CHECKING:
    from typing import Any, Iterable, List, Optional, Callable, AsyncIterator, Iterator
    from sentry_sdk.tracing import Span

try:
    from openai.resources.chat.completions import Completions, AsyncCompletions
    from openai.resources import Embeddings, AsyncEmbeddings

    if TYPE_CHECKING:
        from openai.types.chat import ChatCompletionMessageParam, ChatCompletionChunk
except ImportError:
    raise DidNotEnable("OpenAI not installed")


class OpenAIIntegration(Integration):
    identifier = "openai"
    origin = f"auto.ai.{identifier}"

    def __init__(self, include_prompts=True, tiktoken_encoding_name=None):
        # type: (OpenAIIntegration, bool, Optional[str]) -> None
        self.include_prompts = include_prompts

        self.tiktoken_encoding = None
        if tiktoken_encoding_name is not None:
            import tiktoken  # type: ignore

            self.tiktoken_encoding = tiktoken.get_encoding(tiktoken_encoding_name)

    @staticmethod
    def setup_once():
        # type: () -> None
        Completions.create = _wrap_chat_completion_create(Completions.create)
        Embeddings.create = _wrap_embeddings_create(Embeddings.create)

        AsyncCompletions.create = _wrap_async_chat_completion_create(
            AsyncCompletions.create
        )
        AsyncEmbeddings.create = _wrap_async_embeddings_create(AsyncEmbeddings.create)

    def count_tokens(self, s):
        # type: (OpenAIIntegration, str) -> int
        if self.tiktoken_encoding is not None:
            return len(self.tiktoken_encoding.encode_ordinary(s))
        return 0


def _capture_exception(exc):
    # type: (Any) -> None
    event, hint = event_from_exception(
        exc,
        client_options=sentry_sdk.get_client().options,
        mechanism={"type": "openai", "handled": False},
    )
    sentry_sdk.capture_event(event, hint=hint)


def _calculate_chat_completion_usage(
    messages, response, span, streaming_message_responses, count_tokens
):
    # type: (Iterable[ChatCompletionMessageParam], Any, Span, Optional[List[str]], Callable[..., Any]) -> None
    completion_tokens = 0  # type: Optional[int]
    prompt_tokens = 0  # type: Optional[int]
    total_tokens = 0  # type: Optional[int]
    if hasattr(response, "usage"):
        if hasattr(response.usage, "completion_tokens") and isinstance(
            response.usage.completion_tokens, int
        ):
            completion_tokens = response.usage.completion_tokens
        if hasattr(response.usage, "prompt_tokens") and isinstance(
            response.usage.prompt_tokens, int
        ):
            prompt_tokens = response.usage.prompt_tokens
        if hasattr(response.usage, "total_tokens") and isinstance(
            response.usage.total_tokens, int
        ):
            total_tokens = response.usage.total_tokens

    if prompt_tokens == 0:
        for message in messages:
            if "content" in message:
                prompt_tokens += count_tokens(message["content"])

    if completion_tokens == 0:
        if streaming_message_responses is not None:
            for message in streaming_message_responses:
                completion_tokens += count_tokens(message)
        elif hasattr(response, "choices"):
            for choice in response.choices:
                if hasattr(choice, "message"):
                    completion_tokens += count_tokens(choice.message)

    if prompt_tokens == 0:
        prompt_tokens = None
    if completion_tokens == 0:
        completion_tokens = None
    if total_tokens == 0:
        total_tokens = None
    record_token_usage(span, prompt_tokens, completion_tokens, total_tokens)


def _new_chat_completion_common(f, *args, **kwargs):
    # type: (Any, *Any, **Any) -> Any
    integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
    if integration is None:
        return f(*args, **kwargs)

    if "messages" not in kwargs:
        # invalid call (in all versions of openai), let it return error
        return f(*args, **kwargs)

    try:
        iter(kwargs["messages"])
    except TypeError:
        # invalid call (in all versions), messages must be iterable
        return f(*args, **kwargs)

    kwargs["messages"] = list(kwargs["messages"])
    messages = kwargs["messages"]
    model = kwargs.get("model")
    streaming = kwargs.get("stream")

    span = sentry_sdk.start_span(
        op=consts.OP.OPENAI_CHAT_COMPLETIONS_CREATE,
        name="Chat Completion",
        origin=OpenAIIntegration.origin,
    )
    span.__enter__()

    res = yield f, args, kwargs

    with capture_internal_exceptions():
        if should_send_default_pii() and integration.include_prompts:
            set_data_normalized(span, SPANDATA.AI_INPUT_MESSAGES, messages)

        set_data_normalized(span, SPANDATA.AI_MODEL_ID, model)
        set_data_normalized(span, SPANDATA.AI_STREAMING, streaming)

        if hasattr(res, "choices"):
            if should_send_default_pii() and integration.include_prompts:
                set_data_normalized(
                    span,
                    SPANDATA.AI_RESPONSES,
                    list(map(lambda x: x.message, res.choices)),
                )
            _calculate_chat_completion_usage(
                messages, res, span, None, integration.count_tokens
            )
            span.__exit__(None, None, None)
        elif hasattr(res, "_iterator"):
            data_buf: list[list[str]] = []  # one for each choice

            old_iterator = res._iterator

            def new_iterator():
                # type: () -> Iterator[ChatCompletionChunk]
                with capture_internal_exceptions():
                    for x in old_iterator:
                        if hasattr(x, "choices"):
                            choice_index = 0
                            for choice in x.choices:
                                if hasattr(choice, "delta") and hasattr(
                                    choice.delta, "content"
                                ):
                                    content = choice.delta.content
                                    if len(data_buf) <= choice_index:
                                        data_buf.append([])
                                    data_buf[choice_index].append(content or "")
                                choice_index += 1
                        yield x
                    if len(data_buf) > 0:
                        all_responses = list(
                            map(lambda chunk: "".join(chunk), data_buf)
                        )
                        if should_send_default_pii() and integration.include_prompts:
                            set_data_normalized(
                                span, SPANDATA.AI_RESPONSES, all_responses
                            )
                        _calculate_chat_completion_usage(
                            messages,
                            res,
                            span,
                            all_responses,
                            integration.count_tokens,
                        )
                span.__exit__(None, None, None)

            async def new_iterator_async():
                # type: () -> AsyncIterator[ChatCompletionChunk]
                with capture_internal_exceptions():
                    async for x in old_iterator:
                        if hasattr(x, "choices"):
                            choice_index = 0
                            for choice in x.choices:
                                if hasattr(choice, "delta") and hasattr(
                                    choice.delta, "content"
                                ):
                                    content = choice.delta.content
                                    if len(data_buf) <= choice_index:
                                        data_buf.append([])
                                    data_buf[choice_index].append(content or "")
                                choice_index += 1
                        yield x
                    if len(data_buf) > 0:
                        all_responses = list(
                            map(lambda chunk: "".join(chunk), data_buf)
                        )
                        if should_send_default_pii() and integration.include_prompts:
                            set_data_normalized(
                                span, SPANDATA.AI_RESPONSES, all_responses
                            )
                        _calculate_chat_completion_usage(
                            messages,
                            res,
                            span,
                            all_responses,
                            integration.count_tokens,
                        )
                span.__exit__(None, None, None)

            if str(type(res._iterator)) == "<class 'async_generator'>":
                res._iterator = new_iterator_async()
            else:
                res._iterator = new_iterator()

        else:
            set_data_normalized(span, "unknown_response", True)
            span.__exit__(None, None, None)
    return res


def _wrap_chat_completion_create(f):
    # type: (Callable[..., Any]) -> Callable[..., Any]
    def _execute_sync(f, *args, **kwargs):
        # type: (Any, *Any, **Any) -> Any
        gen = _new_chat_completion_common(f, *args, **kwargs)

        try:
            f, args, kwargs = next(gen)
        except StopIteration as e:
            return e.value

        try:
            try:
                result = f(*args, **kwargs)
            except Exception as e:
                _capture_exception(e)
                raise e from None

            return gen.send(result)
        except StopIteration as e:
            return e.value

    @wraps(f)
    def _sentry_patched_create_sync(*args, **kwargs):
        # type: (*Any, **Any) -> Any
        integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
        if integration is None or "messages" not in kwargs:
            # no "messages" means invalid call (in all versions of openai), let it return error
            return f(*args, **kwargs)

        return _execute_sync(f, *args, **kwargs)

    return _sentry_patched_create_sync


def _wrap_async_chat_completion_create(f):
    # type: (Callable[..., Any]) -> Callable[..., Any]
    async def _execute_async(f, *args, **kwargs):
        # type: (Any, *Any, **Any) -> Any
        gen = _new_chat_completion_common(f, *args, **kwargs)

        try:
            f, args, kwargs = next(gen)
        except StopIteration as e:
            return await e.value

        try:
            try:
                result = await f(*args, **kwargs)
            except Exception as e:
                _capture_exception(e)
                raise e from None

            return gen.send(result)
        except StopIteration as e:
            return e.value

    @wraps(f)
    async def _sentry_patched_create_async(*args, **kwargs):
        # type: (*Any, **Any) -> Any
        integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
        if integration is None or "messages" not in kwargs:
            # no "messages" means invalid call (in all versions of openai), let it return error
            return await f(*args, **kwargs)

        return await _execute_async(f, *args, **kwargs)

    return _sentry_patched_create_async


def _new_embeddings_create_common(f, *args, **kwargs):
    # type: (Any, *Any, **Any) -> Any
    integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
    if integration is None:
        return f(*args, **kwargs)

    with sentry_sdk.start_span(
        op=consts.OP.OPENAI_EMBEDDINGS_CREATE,
        description="OpenAI Embedding Creation",
        origin=OpenAIIntegration.origin,
    ) as span:
        if "input" in kwargs and (
            should_send_default_pii() and integration.include_prompts
        ):
            if isinstance(kwargs["input"], str):
                set_data_normalized(span, SPANDATA.AI_INPUT_MESSAGES, [kwargs["input"]])
            elif (
                isinstance(kwargs["input"], list)
                and len(kwargs["input"]) > 0
                and isinstance(kwargs["input"][0], str)
            ):
                set_data_normalized(span, SPANDATA.AI_INPUT_MESSAGES, kwargs["input"])
        if "model" in kwargs:
            set_data_normalized(span, SPANDATA.AI_MODEL_ID, kwargs["model"])

        response = yield f, args, kwargs

        prompt_tokens = 0
        total_tokens = 0
        if hasattr(response, "usage"):
            if hasattr(response.usage, "prompt_tokens") and isinstance(
                response.usage.prompt_tokens, int
            ):
                prompt_tokens = response.usage.prompt_tokens
            if hasattr(response.usage, "total_tokens") and isinstance(
                response.usage.total_tokens, int
            ):
                total_tokens = response.usage.total_tokens

        if prompt_tokens == 0:
            prompt_tokens = integration.count_tokens(kwargs["input"] or "")

        record_token_usage(span, prompt_tokens, None, total_tokens or prompt_tokens)

        return response


def _wrap_embeddings_create(f):
    # type: (Any) -> Any
    def _execute_sync(f, *args, **kwargs):
        # type: (Any, *Any, **Any) -> Any
        gen = _new_embeddings_create_common(f, *args, **kwargs)

        try:
            f, args, kwargs = next(gen)
        except StopIteration as e:
            return e.value

        try:
            try:
                result = f(*args, **kwargs)
            except Exception as e:
                _capture_exception(e)
                raise e from None

            return gen.send(result)
        except StopIteration as e:
            return e.value

    @wraps(f)
    def _sentry_patched_create_sync(*args, **kwargs):
        # type: (*Any, **Any) -> Any
        integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
        if integration is None:
            return f(*args, **kwargs)

        return _execute_sync(f, *args, **kwargs)

    return _sentry_patched_create_sync


def _wrap_async_embeddings_create(f):
    # type: (Any) -> Any
    async def _execute_async(f, *args, **kwargs):
        # type: (Any, *Any, **Any) -> Any
        gen = _new_embeddings_create_common(f, *args, **kwargs)

        try:
            f, args, kwargs = next(gen)
        except StopIteration as e:
            return await e.value

        try:
            try:
                result = await f(*args, **kwargs)
            except Exception as e:
                _capture_exception(e)
                raise e from None

            return gen.send(result)
        except StopIteration as e:
            return e.value

    @wraps(f)
    async def _sentry_patched_create_async(*args, **kwargs):
        # type: (*Any, **Any) -> Any
        integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
        if integration is None:
            return await f(*args, **kwargs)

        return await _execute_async(f, *args, **kwargs)

    return _sentry_patched_create_async

[ Back ]
Name
Size
Last Modified
Owner / Group
Permissions
Options
..
--
May 23 2025 08:31:28
root / root
0755
__pycache__
--
May 23 2025 08:31:28
root / root
0755
celery
--
May 23 2025 08:31:28
root / root
0755
django
--
May 23 2025 08:31:28
root / root
0755
grpc
--
May 23 2025 08:31:28
root / root
0755
opentelemetry
--
May 23 2025 08:31:28
root / root
0755
redis
--
May 23 2025 08:31:28
root / root
0755
spark
--
May 23 2025 08:31:28
root / root
0755
__init__.py
9.979 KB
May 23 2025 08:31:28
root / root
0644
_asgi_common.py
3.112 KB
May 23 2025 08:31:28
root / root
0644
_wsgi_common.py
7.381 KB
May 23 2025 08:31:28
root / root
0644
aiohttp.py
12.593 KB
May 23 2025 08:31:28
root / root
0644
anthropic.py
9.205 KB
May 23 2025 08:31:28
root / root
0644
argv.py
0.89 KB
May 23 2025 08:31:28
root / root
0644
ariadne.py
5.697 KB
May 23 2025 08:31:28
root / root
0644
arq.py
7.673 KB
May 23 2025 08:31:28
root / root
0644
asgi.py
12.479 KB
May 23 2025 08:31:28
root / root
0644
asyncio.py
3.939 KB
May 23 2025 08:31:28
root / root
0644
asyncpg.py
6.368 KB
May 23 2025 08:31:28
root / root
0644
atexit.py
1.613 KB
May 23 2025 08:31:28
root / root
0644
aws_lambda.py
17.533 KB
May 23 2025 08:31:28
root / root
0644
beam.py
5.061 KB
May 23 2025 08:31:28
root / root
0644
boto3.py
4.308 KB
May 23 2025 08:31:28
root / root
0644
bottle.py
6.46 KB
May 23 2025 08:31:28
root / root
0644
chalice.py
4.589 KB
May 23 2025 08:31:28
root / root
0644
clickhouse_driver.py
5.124 KB
May 23 2025 08:31:28
root / root
0644
cloud_resource_context.py
7.598 KB
May 23 2025 08:31:28
root / root
0644
cohere.py
9.114 KB
May 23 2025 08:31:28
root / root
0644
dedupe.py
1.385 KB
May 23 2025 08:31:28
root / root
0644
dramatiq.py
5.452 KB
May 23 2025 08:31:28
root / root
0644
excepthook.py
2.352 KB
May 23 2025 08:31:28
root / root
0644
executing.py
1.947 KB
May 23 2025 08:31:28
root / root
0644
falcon.py
9.278 KB
May 23 2025 08:31:28
root / root
0644
fastapi.py
4.607 KB
May 23 2025 08:31:28
root / root
0644
flask.py
8.535 KB
May 23 2025 08:31:28
root / root
0644
gcp.py
8.08 KB
May 23 2025 08:31:28
root / root
0644
gnu_backtrace.py
2.826 KB
May 23 2025 08:31:28
root / root
0644
gql.py
4.081 KB
May 23 2025 08:31:28
root / root
0644
graphene.py
4.924 KB
May 23 2025 08:31:28
root / root
0644
httpx.py
5.729 KB
May 23 2025 08:31:28
root / root
0644
huey.py
5.315 KB
May 23 2025 08:31:28
root / root
0644
huggingface_hub.py
6.397 KB
May 23 2025 08:31:28
root / root
0644
langchain.py
17.303 KB
May 23 2025 08:31:28
root / root
0644
launchdarkly.py
1.89 KB
May 23 2025 08:31:28
root / root
0644
litestar.py
11.298 KB
May 23 2025 08:31:28
root / root
0644
logging.py
13.189 KB
May 23 2025 08:31:28
root / root
0644
loguru.py
3.535 KB
May 23 2025 08:31:28
root / root
0644
modules.py
0.801 KB
May 23 2025 08:31:28
root / root
0644
openai.py
15.22 KB
May 23 2025 08:31:28
root / root
0644
openfeature.py
1.206 KB
May 23 2025 08:31:28
root / root
0644
pure_eval.py
4.474 KB
May 23 2025 08:31:28
root / root
0644
pymongo.py
6.23 KB
May 23 2025 08:31:28
root / root
0644
pyramid.py
7.191 KB
May 23 2025 08:31:28
root / root
0644
quart.py
7.263 KB
May 23 2025 08:31:28
root / root
0644
ray.py
4.064 KB
May 23 2025 08:31:28
root / root
0644
rq.py
5.183 KB
May 23 2025 08:31:28
root / root
0644
rust_tracing.py
8.865 KB
May 23 2025 08:31:28
root / root
0644
sanic.py
12.656 KB
May 23 2025 08:31:28
root / root
0644
serverless.py
1.762 KB
May 23 2025 08:31:28
root / root
0644
socket.py
3.095 KB
May 23 2025 08:31:28
root / root
0644
sqlalchemy.py
4.27 KB
May 23 2025 08:31:28
root / root
0644
starlette.py
25.794 KB
May 23 2025 08:31:28
root / root
0644
starlite.py
10.371 KB
May 23 2025 08:31:28
root / root
0644
statsig.py
1.198 KB
May 23 2025 08:31:28
root / root
0644
stdlib.py
8.624 KB
May 23 2025 08:31:28
root / root
0644
strawberry.py
13.795 KB
May 23 2025 08:31:28
root / root
0644
sys_exit.py
2.435 KB
May 23 2025 08:31:28
root / root
0644
threading.py
5.266 KB
May 23 2025 08:31:28
root / root
0644
tornado.py
7.053 KB
May 23 2025 08:31:28
root / root
0644
trytond.py
1.612 KB
May 23 2025 08:31:28
root / root
0644
typer.py
1.772 KB
May 23 2025 08:31:28
root / root
0644
unleash.py
1.033 KB
May 23 2025 08:31:28
root / root
0644
wsgi.py
10.495 KB
May 23 2025 08:31:28
root / root
0644

GRAYBYTE WORDPRESS FILE MANAGER @ 2025
CONTACT ME
Static GIF