+ * When {@code true}, sessions in a GitHub repository working directory are + * accessible from GitHub web and mobile. + * + * @return {@code true} if remote sessions are enabled + */ + public boolean isRemote() { + return remote; + } + + /** + * Enables remote session support (Mission Control integration). + *
+ * When {@code true}, sessions in a GitHub repository working directory are + * accessible from GitHub web and mobile. + *
+ * This option is only used when the SDK spawns the CLI process; it is ignored
+ * when connecting to an external server via {@link #setCliUrl(String)}.
+ *
+ * @param remote
+ * {@code true} to enable remote sessions
+ * @return this options instance for method chaining
+ */
+ public CopilotClientOptions setRemote(boolean remote) {
+ this.remote = remote;
+ return this;
+ }
+
/**
* Gets the OpenTelemetry configuration for the CLI server.
*
@@ -599,6 +631,7 @@ public CopilotClientOptions clone() {
copy.logLevel = this.logLevel;
copy.onListModels = this.onListModels;
copy.port = this.port;
+ copy.remote = this.remote;
copy.sessionIdleTimeoutSeconds = this.sessionIdleTimeoutSeconds;
copy.tcpConnectionToken = this.tcpConnectionToken;
copy.telemetry = this.telemetry;
diff --git a/src/main/java/com/github/copilot/sdk/json/ProviderConfig.java b/src/main/java/com/github/copilot/sdk/json/ProviderConfig.java
index 3b2995681..8947696c9 100644
--- a/src/main/java/com/github/copilot/sdk/json/ProviderConfig.java
+++ b/src/main/java/com/github/copilot/sdk/json/ProviderConfig.java
@@ -57,6 +57,18 @@ public class ProviderConfig {
@JsonProperty("headers")
private Map
+ * Used to look up agent configuration (tools, prompts, reasoning behavior) and
+ * default token limits. Also used as the wire model when
+ * {@link #getWireModel()} is not set.
+ *
+ * @return the model ID, or {@code null} if not set
+ */
+ public String getModelId() {
+ return modelId;
+ }
+
+ /**
+ * Sets the well-known model name used by the runtime.
+ *
+ * Used to look up agent configuration (tools, prompts, reasoning behavior) and
+ * default token limits. Also used as the wire model when
+ * {@link #getWireModel()} is not set. Falls back to
+ * {@link SessionConfig#getModel()}.
+ *
+ * @param modelId
+ * the model ID
+ * @return this config for method chaining
+ */
+ public ProviderConfig setModelId(String modelId) {
+ this.modelId = modelId;
+ return this;
+ }
+
+ /**
+ * Gets the model name sent to the provider API for inference.
+ *
+ * @return the wire model name, or {@code null} if not set
+ */
+ public String getWireModel() {
+ return wireModel;
+ }
+
+ /**
+ * Sets the model name sent to the provider API for inference.
+ *
+ * Use this when the provider's model name (e.g. an Azure deployment name or a
+ * custom fine-tune name) differs from {@link #getModelId()}. Falls back to
+ * {@link #getModelId()}, then {@link SessionConfig#getModel()}.
+ *
+ * @param wireModel
+ * the wire model name
+ * @return this config for method chaining
+ */
+ public ProviderConfig setWireModel(String wireModel) {
+ this.wireModel = wireModel;
+ return this;
+ }
+
+ /**
+ * Gets the maximum prompt token override.
+ *
+ * @return the max prompt tokens, or {@code null} if not set
+ */
+ public Integer getMaxPromptTokens() {
+ return maxPromptTokens;
+ }
+
+ /**
+ * Sets the maximum prompt tokens override.
+ *
+ * Overrides the resolved model's default max prompt tokens. The runtime
+ * triggers conversation compaction before sending a request when the prompt
+ * (system message, history, tool definitions, user message) would exceed this
+ * limit.
+ *
+ * @param maxPromptTokens
+ * the max prompt tokens
+ * @return this config for method chaining
+ */
+ public ProviderConfig setMaxPromptTokens(Integer maxPromptTokens) {
+ this.maxPromptTokens = maxPromptTokens;
+ return this;
+ }
+
+ /**
+ * Gets the maximum output token override.
+ *
+ * @return the max output tokens, or {@code null} if not set
+ */
+ public Integer getMaxOutputTokens() {
+ return maxOutputTokens;
+ }
+
+ /**
+ * Sets the maximum output tokens override.
+ *
+ * Overrides the resolved model's default max output tokens. When hit, the model
+ * stops generating and returns a truncated response.
+ *
+ * @param maxOutputTokens
+ * the max output tokens
+ * @return this config for method chaining
+ */
+ public ProviderConfig setMaxOutputTokens(Integer maxOutputTokens) {
+ this.maxOutputTokens = maxOutputTokens;
+ return this;
+ }
}
diff --git a/src/site/markdown/advanced.md b/src/site/markdown/advanced.md
index e1e08a275..ccf386640 100644
--- a/src/site/markdown/advanced.md
+++ b/src/site/markdown/advanced.md
@@ -383,6 +383,29 @@ var session = client.createSession(
> **Note:** The `bearerToken` option accepts a **static token string** only. The SDK does not refresh this token automatically. If your token expires, requests will fail and you'll need to create a new session with a fresh token.
+### Model Overrides
+
+Use `modelId` and `wireModel` to control model resolution and the model name on the wire:
+
+```java
+var session = client.createSession(
+ new SessionConfig().setOnPermissionRequest(PermissionHandler.APPROVE_ALL)
+ .setProvider(new ProviderConfig()
+ .setType("openai")
+ .setBaseUrl("https://api.openai.com/v1")
+ .setApiKey("sk-...")
+ .setModelId("gpt-4o") // Runtime config lookup
+ .setWireModel("my-finetune-v3") // Sent to the provider API
+ .setMaxPromptTokens(100_000) // Override max prompt tokens
+ .setMaxOutputTokens(4096)) // Override max output tokens
+).get();
+```
+
+- **`modelId`** — Well-known model name used by the runtime to look up agent configuration (tools, prompts, reasoning behavior) and default token limits. Also used as the wire model when `wireModel` is not set.
+- **`wireModel`** — Model name sent to the provider API for inference. Use when the provider's model name (e.g., an Azure deployment name or a custom fine-tune name) differs from `modelId`.
+- **`maxPromptTokens`** — Overrides the resolved model's default max prompt tokens. The runtime triggers conversation compaction when the prompt would exceed this limit.
+- **`maxOutputTokens`** — Overrides the resolved model's default max output tokens.
+
### Microsoft Foundry Local
[Microsoft Foundry Local](https://foundrylocal.ai) lets you run AI models locally on your own device with an OpenAI-compatible API. Install it via the Foundry Local CLI, then point the SDK at your local endpoint:
@@ -1262,6 +1285,39 @@ This is more efficient than `listSessions()` when you already know the session I
---
+## Remote Sessions
+
+Remote sessions enable Mission Control integration, making sessions accessible from GitHub web and mobile. When enabled, sessions in a GitHub repository working directory receive a remote URL.
+
+### Enabling Remote Sessions
+
+Set `remote(true)` on the client options to enable remote session support for all sessions:
+
+```java
+var options = new CopilotClientOptions()
+ .setRemote(true)
+ .setCwd("/path/to/github-repo");
+
+try (var client = new CopilotClient(options)) {
+ var session = client.createSession(
+ new SessionConfig().setOnPermissionRequest(PermissionHandler.APPROVE_ALL)
+ ).get();
+
+ // Listen for the remote URL info event
+ session.on(SessionInfoEvent.class, event -> {
+ System.out.println("Remote URL: " + event.getData());
+ });
+}
+```
+
+### Prerequisites
+
+- The user must be authenticated (GitHub token or logged-in user)
+- The session's working directory must be a GitHub repository
+- This option is only used when the SDK spawns the CLI process; it is ignored when connecting to an external server via `setCliUrl()`
+
+---
+
## Next Steps
- 📖 **[Documentation](documentation.html)** - Core concepts, events, streaming, models, tool filtering, reasoning effort
diff --git a/src/test/java/com/github/copilot/sdk/CapiProxy.java b/src/test/java/com/github/copilot/sdk/CapiProxy.java
index 30f843436..d91762d51 100644
--- a/src/test/java/com/github/copilot/sdk/CapiProxy.java
+++ b/src/test/java/com/github/copilot/sdk/CapiProxy.java
@@ -56,10 +56,12 @@
public class CapiProxy implements AutoCloseable {
private static final ObjectMapper MAPPER = new ObjectMapper();
- private static final Pattern LISTENING_PATTERN = Pattern.compile("Listening: (http://[^\\s]+)");
+ private static final Pattern LISTENING_PATTERN = Pattern.compile("Listening: (http://[^\\s]+)(?:\\s+(\\{.*\\}))?$");
private Process process;
private String proxyUrl;
+ private String connectProxyUrl;
+ private String caFilePath;
private final HttpClient httpClient;
private BufferedReader stdoutReader;
@@ -137,7 +139,24 @@ public String start() throws IOException, InterruptedException {
throw new IOException("Unexpected proxy output: " + line);
}
- proxyUrl = matcher.group(1);
+ String url = matcher.group(1);
+
+ // Parse optional metadata (CONNECT proxy details)
+ String metadata = matcher.group(2);
+ if (metadata != null && !metadata.isEmpty()) {
+ try {
+ Map