Add language_models crate to house language model providers (#20945)

This PR adds a new `language_models` crate to house the various language
model providers.

By extracting the provider definitions out of `language_model`, we're
able to remove `language_model`'s dependency on `editor`, which improves
incremental compilation when changing `editor`.

Release Notes:

- N/A
This commit is contained in:
Marshall Bowers 2024-11-20 18:49:34 -05:00 committed by GitHub
parent 335b112abd
commit cbba44900d
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
27 changed files with 265 additions and 199 deletions

43
Cargo.lock generated
View file

@ -402,6 +402,7 @@ dependencies = [
"indoc",
"language",
"language_model",
"language_models",
"languages",
"log",
"lsp",
@ -6520,27 +6521,48 @@ dependencies = [
"anthropic",
"anyhow",
"base64 0.22.1",
"client",
"collections",
"copilot",
"ctor",
"editor",
"env_logger 0.11.5",
"feature_flags",
"futures 0.3.31",
"google_ai",
"gpui",
"http_client",
"image",
"language",
"log",
"menu",
"ollama",
"open_ai",
"parking_lot",
"proto",
"schemars",
"serde",
"serde_json",
"smol",
"strum 0.25.0",
"ui",
"util",
]
[[package]]
name = "language_models"
version = "0.1.0"
dependencies = [
"anthropic",
"anyhow",
"client",
"collections",
"copilot",
"editor",
"feature_flags",
"fs",
"futures 0.3.31",
"google_ai",
"gpui",
"http_client",
"language_model",
"menu",
"ollama",
"open_ai",
"project",
"proto",
"rand 0.8.5",
"schemars",
"serde",
"serde_json",
@ -6548,12 +6570,10 @@ dependencies = [
"smol",
"strum 0.25.0",
"telemetry_events",
"text",
"theme",
"thiserror 1.0.69",
"tiktoken-rs",
"ui",
"unindent",
"util",
]
@ -15481,6 +15501,7 @@ dependencies = [
"journal",
"language",
"language_model",
"language_models",
"language_selector",
"language_tools",
"languages",

View file

@ -55,6 +55,7 @@ members = [
"crates/journal",
"crates/language",
"crates/language_model",
"crates/language_models",
"crates/language_selector",
"crates/language_tools",
"crates/languages",
@ -228,6 +229,7 @@ install_cli = { path = "crates/install_cli" }
journal = { path = "crates/journal" }
language = { path = "crates/language" }
language_model = { path = "crates/language_model" }
language_models = { path = "crates/language_models" }
language_selector = { path = "crates/language_selector" }
language_tools = { path = "crates/language_tools" }
languages = { path = "crates/languages" }

View file

@ -50,6 +50,7 @@ indexed_docs.workspace = true
indoc.workspace = true
language.workspace = true
language_model.workspace = true
language_models.workspace = true
log.workspace = true
lsp.workspace = true
markdown.workspace = true

View file

@ -50,11 +50,11 @@ use indexed_docs::IndexedDocsStore;
use language::{
language_settings::SoftWrap, BufferSnapshot, LanguageRegistry, LspAdapterDelegate, ToOffset,
};
use language_model::{
provider::cloud::PROVIDER_ID, LanguageModelProvider, LanguageModelProviderId,
LanguageModelRegistry, Role,
};
use language_model::{LanguageModelImage, LanguageModelToolUse};
use language_model::{
LanguageModelProvider, LanguageModelProviderId, LanguageModelRegistry, Role,
ZED_CLOUD_PROVIDER_ID,
};
use multi_buffer::MultiBufferRow;
use picker::{Picker, PickerDelegate};
use project::lsp_store::LocalLspAdapterDelegate;
@ -664,7 +664,7 @@ impl AssistantPanel {
// If we're signed out and don't have a provider configured, or we're signed-out AND Zed.dev is
// the provider, we want to show a nudge to sign in.
let show_zed_ai_notice = client_status.is_signed_out()
&& active_provider.map_or(true, |provider| provider.id().0 == PROVIDER_ID);
&& active_provider.map_or(true, |provider| provider.id().0 == ZED_CLOUD_PROVIDER_ID);
self.show_zed_ai_notice = show_zed_ai_notice;
cx.notify();

View file

@ -5,13 +5,12 @@ use anthropic::Model as AnthropicModel;
use feature_flags::FeatureFlagAppExt;
use fs::Fs;
use gpui::{AppContext, Pixels};
use language_model::provider::open_ai;
use language_model::settings::{
AnthropicSettingsContent, AnthropicSettingsContentV1, OllamaSettingsContent,
OpenAiSettingsContent, OpenAiSettingsContentV1, VersionedAnthropicSettingsContent,
VersionedOpenAiSettingsContent,
use language_model::{CloudModel, LanguageModel};
use language_models::{
provider::open_ai, AllLanguageModelSettings, AnthropicSettingsContent,
AnthropicSettingsContentV1, OllamaSettingsContent, OpenAiSettingsContent,
OpenAiSettingsContentV1, VersionedAnthropicSettingsContent, VersionedOpenAiSettingsContent,
};
use language_model::{settings::AllLanguageModelSettings, CloudModel, LanguageModel};
use ollama::Model as OllamaModel;
use schemars::{schema::Schema, JsonSchema};
use serde::{Deserialize, Serialize};

View file

@ -25,13 +25,15 @@ use gpui::{
use language::{AnchorRangeExt, Bias, Buffer, LanguageRegistry, OffsetRangeExt, Point, ToOffset};
use language_model::{
logging::report_assistant_event,
provider::cloud::{MaxMonthlySpendReachedError, PaymentRequiredError},
LanguageModel, LanguageModelCacheConfiguration, LanguageModelCompletionEvent,
LanguageModelImage, LanguageModelRegistry, LanguageModelRequest, LanguageModelRequestMessage,
LanguageModelRequestTool, LanguageModelToolResult, LanguageModelToolUse, MessageContent, Role,
StopReason,
};
use language_models::{
provider::cloud::{MaxMonthlySpendReachedError, PaymentRequiredError},
report_assistant_event,
};
use open_ai::Model as OpenAiModel;
use paths::contexts_dir;
use project::Project;

View file

@ -30,9 +30,10 @@ use gpui::{
};
use language::{Buffer, IndentKind, Point, Selection, TransactionId};
use language_model::{
logging::report_assistant_event, LanguageModel, LanguageModelRegistry, LanguageModelRequest,
LanguageModelRequestMessage, LanguageModelTextStream, Role,
LanguageModel, LanguageModelRegistry, LanguageModelRequest, LanguageModelRequestMessage,
LanguageModelTextStream, Role,
};
use language_models::report_assistant_event;
use multi_buffer::MultiBufferRow;
use parking_lot::Mutex;
use project::{CodeAction, ProjectTransaction};

View file

@ -17,9 +17,9 @@ use gpui::{
};
use language::Buffer;
use language_model::{
logging::report_assistant_event, LanguageModelRegistry, LanguageModelRequest,
LanguageModelRequestMessage, Role,
LanguageModelRegistry, LanguageModelRequest, LanguageModelRequestMessage, Role,
};
use language_models::report_assistant_event;
use settings::Settings;
use std::{
cmp,

View file

@ -13,56 +13,30 @@ path = "src/language_model.rs"
doctest = false
[features]
test-support = [
"editor/test-support",
"language/test-support",
"project/test-support",
"text/test-support",
]
test-support = []
[dependencies]
anthropic = { workspace = true, features = ["schemars"] }
anyhow.workspace = true
client.workspace = true
base64.workspace = true
collections.workspace = true
copilot = { workspace = true, features = ["schemars"] }
editor.workspace = true
feature_flags.workspace = true
futures.workspace = true
google_ai = { workspace = true, features = ["schemars"] }
gpui.workspace = true
http_client.workspace = true
image.workspace = true
log.workspace = true
menu.workspace = true
ollama = { workspace = true, features = ["schemars"] }
open_ai = { workspace = true, features = ["schemars"] }
parking_lot.workspace = true
proto.workspace = true
project.workspace = true
schemars.workspace = true
serde.workspace = true
serde_json.workspace = true
settings.workspace = true
smol.workspace = true
strum.workspace = true
telemetry_events.workspace = true
theme.workspace = true
thiserror.workspace = true
tiktoken-rs.workspace = true
ui.workspace = true
util.workspace = true
base64.workspace = true
image.workspace = true
[dev-dependencies]
ctor.workspace = true
editor = { workspace = true, features = ["test-support"] }
env_logger.workspace = true
language = { workspace = true, features = ["test-support"] }
log.workspace = true
project = { workspace = true, features = ["test-support"] }
proto = { workspace = true, features = ["test-support"] }
rand.workspace = true
text = { workspace = true, features = ["test-support"] }
unindent.workspace = true
gpui = { workspace = true, features = ["test-support"] }

View file

@ -1,23 +1,19 @@
pub mod logging;
mod model;
pub mod provider;
mod rate_limiter;
mod registry;
mod request;
mod role;
pub mod settings;
#[cfg(any(test, feature = "test-support"))]
pub mod fake_provider;
use anyhow::Result;
use client::{Client, UserStore};
use futures::FutureExt;
use futures::{future::BoxFuture, stream::BoxStream, StreamExt, TryStreamExt as _};
use gpui::{
AnyElement, AnyView, AppContext, AsyncAppContext, Model, SharedString, Task, WindowContext,
};
use gpui::{AnyElement, AnyView, AppContext, AsyncAppContext, SharedString, Task, WindowContext};
pub use model::*;
use project::Fs;
use proto::Plan;
pub(crate) use rate_limiter::*;
pub use rate_limiter::*;
pub use registry::*;
pub use request::*;
pub use role::*;
@ -27,14 +23,10 @@ use std::fmt;
use std::{future::Future, sync::Arc};
use ui::IconName;
pub fn init(
user_store: Model<UserStore>,
client: Arc<Client>,
fs: Arc<dyn Fs>,
cx: &mut AppContext,
) {
settings::init(fs, cx);
registry::init(user_store, client, cx);
pub const ZED_CLOUD_PROVIDER_ID: &str = "zed.dev";
pub fn init(cx: &mut AppContext) {
registry::init(cx);
}
/// The availability of a [`LanguageModel`].
@ -184,7 +176,7 @@ pub trait LanguageModel: Send + Sync {
}
#[cfg(any(test, feature = "test-support"))]
fn as_fake(&self) -> &provider::fake::FakeLanguageModel {
fn as_fake(&self) -> &fake_provider::FakeLanguageModel {
unimplemented!()
}
}

View file

@ -1,76 +1,17 @@
use crate::provider::cloud::RefreshLlmTokenListener;
use crate::{
provider::{
anthropic::AnthropicLanguageModelProvider, cloud::CloudLanguageModelProvider,
copilot_chat::CopilotChatLanguageModelProvider, google::GoogleLanguageModelProvider,
ollama::OllamaLanguageModelProvider, open_ai::OpenAiLanguageModelProvider,
},
LanguageModel, LanguageModelId, LanguageModelProvider, LanguageModelProviderId,
LanguageModelProviderState,
};
use client::{Client, UserStore};
use collections::BTreeMap;
use gpui::{AppContext, EventEmitter, Global, Model, ModelContext};
use std::sync::Arc;
use ui::Context;
pub fn init(user_store: Model<UserStore>, client: Arc<Client>, cx: &mut AppContext) {
let registry = cx.new_model(|cx| {
let mut registry = LanguageModelRegistry::default();
register_language_model_providers(&mut registry, user_store, client, cx);
registry
});
pub fn init(cx: &mut AppContext) {
let registry = cx.new_model(|_cx| LanguageModelRegistry::default());
cx.set_global(GlobalLanguageModelRegistry(registry));
}
fn register_language_model_providers(
registry: &mut LanguageModelRegistry,
user_store: Model<UserStore>,
client: Arc<Client>,
cx: &mut ModelContext<LanguageModelRegistry>,
) {
use feature_flags::FeatureFlagAppExt;
RefreshLlmTokenListener::register(client.clone(), cx);
registry.register_provider(
AnthropicLanguageModelProvider::new(client.http_client(), cx),
cx,
);
registry.register_provider(
OpenAiLanguageModelProvider::new(client.http_client(), cx),
cx,
);
registry.register_provider(
OllamaLanguageModelProvider::new(client.http_client(), cx),
cx,
);
registry.register_provider(
GoogleLanguageModelProvider::new(client.http_client(), cx),
cx,
);
registry.register_provider(CopilotChatLanguageModelProvider::new(cx), cx);
cx.observe_flag::<feature_flags::LanguageModels, _>(move |enabled, cx| {
let user_store = user_store.clone();
let client = client.clone();
LanguageModelRegistry::global(cx).update(cx, move |registry, cx| {
if enabled {
registry.register_provider(
CloudLanguageModelProvider::new(user_store.clone(), client.clone(), cx),
cx,
);
} else {
registry.unregister_provider(
LanguageModelProviderId::from(crate::provider::cloud::PROVIDER_ID.to_string()),
cx,
);
}
});
})
.detach();
}
struct GlobalLanguageModelRegistry(Model<LanguageModelRegistry>);
impl Global for GlobalLanguageModelRegistry {}
@ -106,8 +47,8 @@ impl LanguageModelRegistry {
}
#[cfg(any(test, feature = "test-support"))]
pub fn test(cx: &mut AppContext) -> crate::provider::fake::FakeLanguageModelProvider {
let fake_provider = crate::provider::fake::FakeLanguageModelProvider;
pub fn test(cx: &mut AppContext) -> crate::fake_provider::FakeLanguageModelProvider {
let fake_provider = crate::fake_provider::FakeLanguageModelProvider;
let registry = cx.new_model(|cx| {
let mut registry = Self::default();
registry.register_provider(fake_provider.clone(), cx);
@ -148,7 +89,7 @@ impl LanguageModelRegistry {
}
pub fn providers(&self) -> Vec<Arc<dyn LanguageModelProvider>> {
let zed_provider_id = LanguageModelProviderId(crate::provider::cloud::PROVIDER_ID.into());
let zed_provider_id = LanguageModelProviderId("zed.dev".into());
let mut providers = Vec::with_capacity(self.providers.len());
if let Some(provider) = self.providers.get(&zed_provider_id) {
providers.push(provider.clone());
@ -269,7 +210,7 @@ impl LanguageModelRegistry {
#[cfg(test)]
mod tests {
use super::*;
use crate::provider::fake::FakeLanguageModelProvider;
use crate::fake_provider::FakeLanguageModelProvider;
#[gpui::test]
fn test_register_providers(cx: &mut AppContext) {
@ -281,10 +222,10 @@ mod tests {
let providers = registry.read(cx).providers();
assert_eq!(providers.len(), 1);
assert_eq!(providers[0].id(), crate::provider::fake::provider_id());
assert_eq!(providers[0].id(), crate::fake_provider::provider_id());
registry.update(cx, |registry, cx| {
registry.unregister_provider(crate::provider::fake::provider_id(), cx);
registry.unregister_provider(crate::fake_provider::provider_id(), cx);
});
let providers = registry.read(cx).providers();

View file

@ -0,0 +1,49 @@
[package]
name = "language_models"
version = "0.1.0"
edition = "2021"
publish = false
license = "GPL-3.0-or-later"
[lints]
workspace = true
[lib]
path = "src/language_models.rs"
[dependencies]
anthropic = { workspace = true, features = ["schemars"] }
anyhow.workspace = true
client.workspace = true
collections.workspace = true
copilot = { workspace = true, features = ["schemars"] }
editor.workspace = true
feature_flags.workspace = true
fs.workspace = true
futures.workspace = true
google_ai = { workspace = true, features = ["schemars"] }
gpui.workspace = true
http_client.workspace = true
language_model.workspace = true
menu.workspace = true
ollama = { workspace = true, features = ["schemars"] }
open_ai = { workspace = true, features = ["schemars"] }
project.workspace = true
proto.workspace = true
schemars.workspace = true
serde.workspace = true
serde_json.workspace = true
settings.workspace = true
smol.workspace = true
strum.workspace = true
telemetry_events.workspace = true
theme.workspace = true
thiserror.workspace = true
tiktoken-rs.workspace = true
ui.workspace = true
util.workspace = true
[dev-dependencies]
editor = { workspace = true, features = ["test-support"] }
language_model = { workspace = true, features = ["test-support"] }
project = { workspace = true, features = ["test-support"] }

View file

@ -0,0 +1 @@
../../LICENSE-GPL

View file

@ -0,0 +1,80 @@
use std::sync::Arc;
use client::{Client, UserStore};
use fs::Fs;
use gpui::{AppContext, Model, ModelContext};
use language_model::{LanguageModelProviderId, LanguageModelRegistry, ZED_CLOUD_PROVIDER_ID};
mod logging;
pub mod provider;
mod settings;
use crate::provider::anthropic::AnthropicLanguageModelProvider;
use crate::provider::cloud::{CloudLanguageModelProvider, RefreshLlmTokenListener};
use crate::provider::copilot_chat::CopilotChatLanguageModelProvider;
use crate::provider::google::GoogleLanguageModelProvider;
use crate::provider::ollama::OllamaLanguageModelProvider;
use crate::provider::open_ai::OpenAiLanguageModelProvider;
pub use crate::settings::*;
pub use logging::report_assistant_event;
pub fn init(
user_store: Model<UserStore>,
client: Arc<Client>,
fs: Arc<dyn Fs>,
cx: &mut AppContext,
) {
crate::settings::init(fs, cx);
let registry = LanguageModelRegistry::global(cx);
registry.update(cx, |registry, cx| {
register_language_model_providers(registry, user_store, client, cx);
});
}
fn register_language_model_providers(
registry: &mut LanguageModelRegistry,
user_store: Model<UserStore>,
client: Arc<Client>,
cx: &mut ModelContext<LanguageModelRegistry>,
) {
use feature_flags::FeatureFlagAppExt;
RefreshLlmTokenListener::register(client.clone(), cx);
registry.register_provider(
AnthropicLanguageModelProvider::new(client.http_client(), cx),
cx,
);
registry.register_provider(
OpenAiLanguageModelProvider::new(client.http_client(), cx),
cx,
);
registry.register_provider(
OllamaLanguageModelProvider::new(client.http_client(), cx),
cx,
);
registry.register_provider(
GoogleLanguageModelProvider::new(client.http_client(), cx),
cx,
);
registry.register_provider(CopilotChatLanguageModelProvider::new(cx), cx);
cx.observe_flag::<feature_flags::LanguageModels, _>(move |enabled, cx| {
let user_store = user_store.clone();
let client = client.clone();
LanguageModelRegistry::global(cx).update(cx, move |registry, cx| {
if enabled {
registry.register_provider(
CloudLanguageModelProvider::new(user_store.clone(), client.clone(), cx),
cx,
);
} else {
registry.unregister_provider(
LanguageModelProviderId::from(ZED_CLOUD_PROVIDER_ID.to_string()),
cx,
);
}
});
})
.detach();
}

View file

@ -1,8 +1,6 @@
pub mod anthropic;
pub mod cloud;
pub mod copilot_chat;
#[cfg(any(test, feature = "test-support"))]
pub mod fake;
pub mod google;
pub mod ollama;
pub mod open_ai;

View file

@ -1,9 +1,4 @@
use crate::{
settings::AllLanguageModelSettings, LanguageModel, LanguageModelCacheConfiguration,
LanguageModelId, LanguageModelName, LanguageModelProvider, LanguageModelProviderId,
LanguageModelProviderName, LanguageModelProviderState, LanguageModelRequest, RateLimiter, Role,
};
use crate::{LanguageModelCompletionEvent, LanguageModelToolUse, StopReason};
use crate::AllLanguageModelSettings;
use anthropic::{AnthropicError, ContentDelta, Event, ResponseContent};
use anyhow::{anyhow, Context as _, Result};
use collections::{BTreeMap, HashMap};
@ -15,6 +10,12 @@ use gpui::{
View, WhiteSpace,
};
use http_client::HttpClient;
use language_model::{
LanguageModel, LanguageModelCacheConfiguration, LanguageModelId, LanguageModelName,
LanguageModelProvider, LanguageModelProviderId, LanguageModelProviderName,
LanguageModelProviderState, LanguageModelRequest, RateLimiter, Role,
};
use language_model::{LanguageModelCompletionEvent, LanguageModelToolUse, StopReason};
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use settings::{Settings, SettingsStore};
@ -256,7 +257,7 @@ pub fn count_anthropic_tokens(
let mut string_messages = Vec::with_capacity(messages.len());
for message in messages {
use crate::MessageContent;
use language_model::MessageContent;
let mut string_contents = String::new();

View file

@ -1,10 +1,4 @@
use super::open_ai::count_open_ai_tokens;
use crate::provider::anthropic::map_to_language_model_completion_events;
use crate::{
settings::AllLanguageModelSettings, CloudModel, LanguageModel, LanguageModelCacheConfiguration,
LanguageModelId, LanguageModelName, LanguageModelProviderId, LanguageModelProviderName,
LanguageModelProviderState, LanguageModelRequest, RateLimiter,
};
use anthropic::AnthropicError;
use anyhow::{anyhow, Result};
use client::{
@ -22,6 +16,14 @@ use gpui::{
ModelContext, ReadGlobal, Subscription, Task,
};
use http_client::{AsyncBody, HttpClient, Method, Response, StatusCode};
use language_model::{
CloudModel, LanguageModel, LanguageModelCacheConfiguration, LanguageModelId, LanguageModelName,
LanguageModelProviderId, LanguageModelProviderName, LanguageModelProviderState,
LanguageModelRequest, RateLimiter, ZED_CLOUD_PROVIDER_ID,
};
use language_model::{
LanguageModelAvailability, LanguageModelCompletionEvent, LanguageModelProvider,
};
use proto::TypedEnvelope;
use schemars::JsonSchema;
use serde::{de::DeserializeOwned, Deserialize, Serialize};
@ -40,11 +42,11 @@ use strum::IntoEnumIterator;
use thiserror::Error;
use ui::{prelude::*, TintColor};
use crate::{LanguageModelAvailability, LanguageModelCompletionEvent, LanguageModelProvider};
use crate::provider::anthropic::map_to_language_model_completion_events;
use crate::AllLanguageModelSettings;
use super::anthropic::count_anthropic_tokens;
pub const PROVIDER_ID: &str = "zed.dev";
pub const PROVIDER_NAME: &str = "Zed";
const ZED_CLOUD_PROVIDER_ADDITIONAL_MODELS_JSON: Option<&str> =
@ -255,7 +257,7 @@ impl LanguageModelProviderState for CloudLanguageModelProvider {
impl LanguageModelProvider for CloudLanguageModelProvider {
fn id(&self) -> LanguageModelProviderId {
LanguageModelProviderId(PROVIDER_ID.into())
LanguageModelProviderId(ZED_CLOUD_PROVIDER_ID.into())
}
fn name(&self) -> LanguageModelProviderName {
@ -535,7 +537,7 @@ impl LanguageModel for CloudLanguageModel {
}
fn provider_id(&self) -> LanguageModelProviderId {
LanguageModelProviderId(PROVIDER_ID.into())
LanguageModelProviderId(ZED_CLOUD_PROVIDER_ID.into())
}
fn provider_name(&self) -> LanguageModelProviderName {

View file

@ -14,6 +14,11 @@ use gpui::{
percentage, svg, Animation, AnimationExt, AnyView, AppContext, AsyncAppContext, Model, Render,
Subscription, Task, Transformation,
};
use language_model::{
LanguageModel, LanguageModelCompletionEvent, LanguageModelId, LanguageModelName,
LanguageModelProvider, LanguageModelProviderId, LanguageModelProviderName,
LanguageModelProviderState, LanguageModelRequest, RateLimiter, Role,
};
use settings::SettingsStore;
use std::time::Duration;
use strum::IntoEnumIterator;
@ -23,12 +28,6 @@ use ui::{
ViewContext, VisualContext, WindowContext,
};
use crate::{
LanguageModel, LanguageModelId, LanguageModelName, LanguageModelProvider,
LanguageModelProviderId, LanguageModelProviderName, LanguageModelRequest, RateLimiter, Role,
};
use crate::{LanguageModelCompletionEvent, LanguageModelProviderState};
use super::anthropic::count_anthropic_tokens;
use super::open_ai::count_open_ai_tokens;

View file

@ -8,6 +8,12 @@ use gpui::{
View, WhiteSpace,
};
use http_client::HttpClient;
use language_model::LanguageModelCompletionEvent;
use language_model::{
LanguageModel, LanguageModelId, LanguageModelName, LanguageModelProvider,
LanguageModelProviderId, LanguageModelProviderName, LanguageModelProviderState,
LanguageModelRequest, RateLimiter,
};
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use settings::{Settings, SettingsStore};
@ -17,12 +23,7 @@ use theme::ThemeSettings;
use ui::{prelude::*, Icon, IconName, Tooltip};
use util::ResultExt;
use crate::LanguageModelCompletionEvent;
use crate::{
settings::AllLanguageModelSettings, LanguageModel, LanguageModelId, LanguageModelName,
LanguageModelProvider, LanguageModelProviderId, LanguageModelProviderName,
LanguageModelProviderState, LanguageModelRequest, RateLimiter,
};
use crate::AllLanguageModelSettings;
const PROVIDER_ID: &str = "google";
const PROVIDER_NAME: &str = "Google AI";

View file

@ -2,6 +2,12 @@ use anyhow::{anyhow, bail, Result};
use futures::{future::BoxFuture, stream::BoxStream, FutureExt, StreamExt};
use gpui::{AnyView, AppContext, AsyncAppContext, ModelContext, Subscription, Task};
use http_client::HttpClient;
use language_model::LanguageModelCompletionEvent;
use language_model::{
LanguageModel, LanguageModelId, LanguageModelName, LanguageModelProvider,
LanguageModelProviderId, LanguageModelProviderName, LanguageModelProviderState,
LanguageModelRequest, RateLimiter, Role,
};
use ollama::{
get_models, preload_model, stream_chat_completion, ChatMessage, ChatOptions, ChatRequest,
ChatResponseDelta, KeepAlive, OllamaToolCall,
@ -13,12 +19,7 @@ use std::{collections::BTreeMap, sync::Arc};
use ui::{prelude::*, ButtonLike, Indicator};
use util::ResultExt;
use crate::LanguageModelCompletionEvent;
use crate::{
settings::AllLanguageModelSettings, LanguageModel, LanguageModelId, LanguageModelName,
LanguageModelProvider, LanguageModelProviderId, LanguageModelProviderName,
LanguageModelProviderState, LanguageModelRequest, RateLimiter, Role,
};
use crate::AllLanguageModelSettings;
const OLLAMA_DOWNLOAD_URL: &str = "https://ollama.com/download";
const OLLAMA_LIBRARY_URL: &str = "https://ollama.com/library";

View file

@ -7,6 +7,11 @@ use gpui::{
View, WhiteSpace,
};
use http_client::HttpClient;
use language_model::{
LanguageModel, LanguageModelCompletionEvent, LanguageModelId, LanguageModelName,
LanguageModelProvider, LanguageModelProviderId, LanguageModelProviderName,
LanguageModelProviderState, LanguageModelRequest, RateLimiter, Role,
};
use open_ai::{
stream_completion, FunctionDefinition, ResponseStreamEvent, ToolChoice, ToolDefinition,
};
@ -19,12 +24,7 @@ use theme::ThemeSettings;
use ui::{prelude::*, Icon, IconName, Tooltip};
use util::ResultExt;
use crate::LanguageModelCompletionEvent;
use crate::{
settings::AllLanguageModelSettings, LanguageModel, LanguageModelId, LanguageModelName,
LanguageModelProvider, LanguageModelProviderId, LanguageModelProviderName,
LanguageModelProviderState, LanguageModelRequest, RateLimiter, Role,
};
use crate::AllLanguageModelSettings;
const PROVIDER_ID: &str = "openai";
const PROVIDER_NAME: &str = "OpenAI";

View file

@ -2,22 +2,20 @@ use std::sync::Arc;
use anyhow::Result;
use gpui::AppContext;
use language_model::LanguageModelCacheConfiguration;
use project::Fs;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use settings::{update_settings_file, Settings, SettingsSources};
use crate::{
provider::{
self,
anthropic::AnthropicSettings,
cloud::{self, ZedDotDevSettings},
copilot_chat::CopilotChatSettings,
google::GoogleSettings,
ollama::OllamaSettings,
open_ai::OpenAiSettings,
},
LanguageModelCacheConfiguration,
use crate::provider::{
self,
anthropic::AnthropicSettings,
cloud::{self, ZedDotDevSettings},
copilot_chat::CopilotChatSettings,
google::GoogleSettings,
ollama::OllamaSettings,
open_ai::OpenAiSettings,
};
/// Initializes the language model settings.

View file

@ -61,6 +61,7 @@ install_cli.workspace = true
journal.workspace = true
language.workspace = true
language_model.workspace = true
language_models.workspace = true
language_selector.workspace = true
language_tools.workspace = true
languages = { workspace = true, features = ["load-grammars"] }

View file

@ -387,7 +387,8 @@ fn main() {
cx,
);
supermaven::init(app_state.client.clone(), cx);
language_model::init(
language_model::init(cx);
language_models::init(
app_state.user_store.clone(),
app_state.client.clone(),
app_state.fs.clone(),

View file

@ -3504,7 +3504,8 @@ mod tests {
app_state.client.http_client().clone(),
cx,
);
language_model::init(
language_model::init(cx);
language_models::init(
app_state.user_store.clone(),
app_state.client.clone(),
app_state.fs.clone(),