open_ai: Move from o1-preview to o1 for OpenAI Assistant provider (#23425)
Some checks failed
CI / Check Postgres and Protobuf migrations, mergability (push) Has been cancelled
CI / Check formatting and spelling (push) Has been cancelled
CI / (macOS) Run Clippy and tests (push) Has been cancelled
CI / (Linux) Run Clippy and tests (push) Has been cancelled
CI / (Linux) Build Remote Server (push) Has been cancelled
CI / (Windows) Run Clippy and tests (push) Has been cancelled
CI / Create a macOS bundle (push) Has been cancelled
CI / Linux x86_x64 release bundle (push) Has been cancelled
CI / Linux arm64 release bundle (push) Has been cancelled
CI / Auto release preview (push) Has been cancelled

- Closes: https://github.com/zed-industries/zed/issues/22521
- Follow-up to: https://github.com/zed-industries/zed/pull/22376
This commit is contained in:
Peter Tripp 2025-01-21 15:05:21 -05:00
parent 058bdae413
commit 365398a7f1
No known key found for this signature in database
3 changed files with 9 additions and 11 deletions

View file

@ -80,7 +80,7 @@ impl CloudModel {
| open_ai::Model::FourOmni | open_ai::Model::FourOmni
| open_ai::Model::FourOmniMini | open_ai::Model::FourOmniMini
| open_ai::Model::O1Mini | open_ai::Model::O1Mini
| open_ai::Model::O1Preview | open_ai::Model::O1
| open_ai::Model::Custom { .. } => { | open_ai::Model::Custom { .. } => {
LanguageModelAvailability::RequiresPlan(Plan::ZedPro) LanguageModelAvailability::RequiresPlan(Plan::ZedPro)
} }

View file

@ -362,9 +362,7 @@ pub fn count_open_ai_tokens(
.collect::<Vec<_>>(); .collect::<Vec<_>>();
match model { match model {
open_ai::Model::Custom { .. } open_ai::Model::Custom { .. } | open_ai::Model::O1Mini | open_ai::Model::O1 => {
| open_ai::Model::O1Mini
| open_ai::Model::O1Preview => {
tiktoken_rs::num_tokens_from_messages("gpt-4", &messages) tiktoken_rs::num_tokens_from_messages("gpt-4", &messages)
} }
_ => tiktoken_rs::num_tokens_from_messages(model.id(), &messages), _ => tiktoken_rs::num_tokens_from_messages(model.id(), &messages),

View file

@ -72,8 +72,8 @@ pub enum Model {
FourOmni, FourOmni,
#[serde(rename = "gpt-4o-mini", alias = "gpt-4o-mini")] #[serde(rename = "gpt-4o-mini", alias = "gpt-4o-mini")]
FourOmniMini, FourOmniMini,
#[serde(rename = "o1-preview", alias = "o1-preview")] #[serde(rename = "o1", alias = "o1-preview")]
O1Preview, O1,
#[serde(rename = "o1-mini", alias = "o1-mini")] #[serde(rename = "o1-mini", alias = "o1-mini")]
O1Mini, O1Mini,
@ -96,7 +96,7 @@ impl Model {
"gpt-4-turbo-preview" => Ok(Self::FourTurbo), "gpt-4-turbo-preview" => Ok(Self::FourTurbo),
"gpt-4o" => Ok(Self::FourOmni), "gpt-4o" => Ok(Self::FourOmni),
"gpt-4o-mini" => Ok(Self::FourOmniMini), "gpt-4o-mini" => Ok(Self::FourOmniMini),
"o1-preview" => Ok(Self::O1Preview), "o1" => Ok(Self::O1),
"o1-mini" => Ok(Self::O1Mini), "o1-mini" => Ok(Self::O1Mini),
_ => Err(anyhow!("invalid model id")), _ => Err(anyhow!("invalid model id")),
} }
@ -109,7 +109,7 @@ impl Model {
Self::FourTurbo => "gpt-4-turbo", Self::FourTurbo => "gpt-4-turbo",
Self::FourOmni => "gpt-4o", Self::FourOmni => "gpt-4o",
Self::FourOmniMini => "gpt-4o-mini", Self::FourOmniMini => "gpt-4o-mini",
Self::O1Preview => "o1-preview", Self::O1 => "o1",
Self::O1Mini => "o1-mini", Self::O1Mini => "o1-mini",
Self::Custom { name, .. } => name, Self::Custom { name, .. } => name,
} }
@ -122,7 +122,7 @@ impl Model {
Self::FourTurbo => "gpt-4-turbo", Self::FourTurbo => "gpt-4-turbo",
Self::FourOmni => "gpt-4o", Self::FourOmni => "gpt-4o",
Self::FourOmniMini => "gpt-4o-mini", Self::FourOmniMini => "gpt-4o-mini",
Self::O1Preview => "o1-preview", Self::O1 => "o1",
Self::O1Mini => "o1-mini", Self::O1Mini => "o1-mini",
Self::Custom { Self::Custom {
name, display_name, .. name, display_name, ..
@ -137,7 +137,7 @@ impl Model {
Self::FourTurbo => 128000, Self::FourTurbo => 128000,
Self::FourOmni => 128000, Self::FourOmni => 128000,
Self::FourOmniMini => 128000, Self::FourOmniMini => 128000,
Self::O1Preview => 128000, Self::O1 => 128000,
Self::O1Mini => 128000, Self::O1Mini => 128000,
Self::Custom { max_tokens, .. } => *max_tokens, Self::Custom { max_tokens, .. } => *max_tokens,
} }
@ -475,7 +475,7 @@ pub async fn stream_completion(
api_key: &str, api_key: &str,
request: Request, request: Request,
) -> Result<BoxStream<'static, Result<ResponseStreamEvent>>> { ) -> Result<BoxStream<'static, Result<ResponseStreamEvent>>> {
if request.model == "o1-preview" || request.model == "o1-mini" { if request.model.starts_with("o1") {
let response = complete(client, api_url, api_key, request).await; let response = complete(client, api_url, api_key, request).await;
let response_stream_event = response.map(adapt_response_to_stream); let response_stream_event = response.map(adapt_response_to_stream);
return Ok(stream::once(future::ready(response_stream_event)).boxed()); return Ok(stream::once(future::ready(response_stream_event)).boxed());