Coverage for src / agent / providers / github / chat_client.py: 100%
16 statements
« prev ^ index » next coverage.py v7.13.0, created at 2025-12-11 14:30 +0000
« prev ^ index » next coverage.py v7.13.0, created at 2025-12-11 14:30 +0000
1# Copyright 2025-2026 Microsoft Corporation
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
15"""
16GitHub Models chat client implementation using OpenAI-compatible API.
18This module provides GitHubChatClient that integrates GitHub Models
19with the Microsoft Agent Framework. GitHub Models uses an OpenAI-compatible
20API at https://models.github.ai.
21"""
23import logging
25from agent_framework.openai import OpenAIChatClient
27from .auth import get_github_token
29logger = logging.getLogger(__name__)
32class GitHubChatClient(OpenAIChatClient):
33 """Chat client for GitHub Models using OpenAI-compatible API.
35 This client extends OpenAIChatClient since GitHub Models uses an
36 OpenAI-compatible API with model parameter in request body (not deployment path).
38 Args:
39 model_id: Model name (e.g., "gpt-5-nano", "gpt-4o-mini", "Mistral-small", "Meta-Llama-3.1-8B-Instruct")
40 token: GitHub token (optional, will use get_github_token() if not provided)
42 Example:
43 >>> client = GitHubChatClient(model_id="gpt-5-nano")
44 >>> # Token automatically fetched from GITHUB_TOKEN or gh CLI
46 Available Models:
47 - gpt-5-nano (default), gpt-4o-mini, gpt-4o (OpenAI models)
48 - Mistral-small, Mistral-Nemo, Mistral-large-2407 (Mistral models)
49 - Meta-Llama-3.1-8B-Instruct, Meta-Llama-3.1-70B-Instruct, Meta-Llama-3.1-405B-Instruct
50 - AI21-Jamba-Instruct
51 """
53 # OpenTelemetry provider name for tracing
54 OTEL_PROVIDER_NAME = "github"
56 def __init__(
57 self,
58 model_id: str,
59 token: str | None = None,
60 endpoint: str = "https://models.github.ai",
61 org: str | None = None,
62 ):
63 """Initialize GitHubChatClient with model and authentication.
65 Args:
66 model_id: GitHub model name
67 token: GitHub token (optional, will use get_github_token() if not provided)
68 endpoint: GitHub Models API endpoint (default: https://models.github.ai)
69 org: Organization name for enterprise rate limits (optional)
71 Raises:
72 ValueError: If authentication fails
74 Note:
75 For enterprise users, providing org enables organization-scoped requests
76 which offer 15,000 requests/hour instead of free tier limits.
77 Example: org="microsoft" uses https://models.github.ai/orgs/microsoft
78 """
79 # Get token if not provided
80 if token is None:
81 token = get_github_token()
83 # Construct base URL with organization scope if provided
84 # OpenAI client will append /chat/completions to the base URL
85 if org:
86 base_url = f"{endpoint}/orgs/{org}/inference"
87 logger.info(f"Using organization-scoped endpoint: {base_url}")
88 else:
89 base_url = f"{endpoint}/inference"
90 logger.info(f"Using personal endpoint: {base_url}")
92 # Initialize OpenAI client with GitHub Models endpoint
93 # GitHub Models uses OpenAI-compatible API with model in request body
94 super().__init__(
95 model_id=model_id,
96 base_url=base_url,
97 api_key=token,
98 )
100 logger.info(f"GitHubChatClient initialized with model: {model_id}")