Skip to content

Commit

Permalink
feat!: replace from_authentication by new from_env method
Browse files Browse the repository at this point in the history
as saas api offering is gone, we can not assume a default base url anymore
  • Loading branch information
moldhouse committed Dec 10, 2024
1 parent cab333a commit d2da859
Show file tree
Hide file tree
Showing 6 changed files with 26 additions and 25 deletions.
2 changes: 2 additions & 0 deletions .env.example
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
AA_API_TOKEN=
AA_BASE_URL=
2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ categories = ["api-bindings"]
[dependencies]
async-stream = "0.3.6"
base64 = "0.22.0"
dotenv = "0.15.0"
futures-util = "0.3.31"
image = "0.25.1"
itertools = "0.13.0"
Expand All @@ -26,6 +27,5 @@ tokenizers = { version = "0.21.0", default-features = false, features = [
] }

[dev-dependencies]
dotenv = "0.15.0"
tokio = { version = "1.37.0", features = ["rt", "macros"] }
wiremock = "0.6.0"
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ use aleph_alpha_client::{Client, TaskCompletion, How, Task};
#[tokio::main]
fn main() {
// Authenticate against API. Fetches token.
let client = Client::with_authentication("AA_API_TOKEN").unwrap();
let client = Client::from_env().unwrap();

// Name of the model we we want to use. Large models give usually better answer, but are also
// more costly.
Expand Down
2 changes: 1 addition & 1 deletion src/http.rs
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ impl HttpClient {
///
/// async fn print_completion() -> Result<(), Error> {
/// // Authenticate against API. Fetches token.
/// let client = Client::with_authentication("AA_API_TOKEN")?;
/// let client = Client::from_env()?;
///
/// // Name of the model we we want to use. Large models give usually better answer, but are
/// // also slower and more costly.
Expand Down
38 changes: 20 additions & 18 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
//! #[tokio::main(flavor = "current_thread")]
//! async fn main() {
//! // Authenticate against API. Fetches token.
//! let client = Client::with_authentication("AA_API_TOKEN").unwrap();
//! let client = Client::from_env().unwrap();
//!
//! // Name of the model we we want to use. Large models give usually better answer, but are also
//! // more costly.
Expand All @@ -33,11 +33,12 @@ mod prompt;
mod semantic_embedding;
mod stream;
mod tokenization;
use std::{pin::Pin, time::Duration};

use dotenv::dotenv;
use futures_util::Stream;
use http::HttpClient;
use semantic_embedding::{BatchSemanticEmbeddingOutput, SemanticEmbeddingOutput};
use std::env;
use std::{pin::Pin, time::Duration};
use tokenizers::Tokenizer;

pub use self::{
Expand Down Expand Up @@ -70,8 +71,7 @@ pub struct Client {

impl Client {
/// A new instance of an Aleph Alpha client helping you interact with the Aleph Alpha API.
/// For "normal" client applications you may likely rather use [`Self::with_authentication`] or
/// [`Self::with_base_url`].
/// For "normal" client applications you may likely rather use [`Self::with_base_url`].
///
/// You may want to only use request based authentication and skip default authentication. This
/// is useful if writing an application which invokes the client on behalf of many different
Expand All @@ -82,11 +82,6 @@ impl Client {
Ok(Self { http_client })
}

/// Use the Aleph Alpha SaaS offering with your API token for all requests.
pub fn with_authentication(api_token: impl Into<String>) -> Result<Self, Error> {
Self::with_base_url("https://api.aleph-alpha.com", api_token)
}

/// Use your on-premise inference with your API token for all requests.
///
/// In production you typically would want set this to <https://api.aleph-alpha.com>. Yet
Expand All @@ -98,14 +93,21 @@ impl Client {
Self::new(host, Some(api_token.into()))
}

pub fn from_env() -> Result<Self, Error> {
let _ = dotenv();
let api_token = env::var("AA_API_TOKEN").unwrap();
let base_url = env::var("AA_BASE_URL").unwrap();
Self::with_base_url(base_url, api_token)
}

/// Execute a task with the aleph alpha API and fetch its result.
///
/// ```no_run
/// use aleph_alpha_client::{Client, How, TaskCompletion, Error};
///
/// async fn print_completion() -> Result<(), Error> {
/// // Authenticate against API. Fetches token.
/// let client = Client::with_authentication("AA_API_TOKEN")?;
/// let client = Client::from_env()?;
///
/// // Name of the model we we want to use. Large models give usually better answer, but are
/// // also slower and more costly.
Expand Down Expand Up @@ -169,7 +171,7 @@ impl Client {
///
/// async fn print_completion() -> Result<(), Error> {
/// // Authenticate against API. Fetches token.
/// let client = Client::with_authentication("AA_API_TOKEN")?;
/// let client = Client::from_env()?;
///
/// // Name of the model we we want to use. Large models give usually better answer, but are
/// // also slower and more costly.
Expand Down Expand Up @@ -207,7 +209,7 @@ impl Client {
///
/// async fn print_stream_completion() -> Result<(), Error> {
/// // Authenticate against API. Fetches token.
/// let client = Client::with_authentication("AA_API_TOKEN")?;
/// let client = Client::from_env()?;
///
/// // Name of the model we we want to use. Large models give usually better answer, but are
/// // also slower and more costly.
Expand Down Expand Up @@ -244,7 +246,7 @@ impl Client {
///
/// async fn print_chat() -> Result<(), Error> {
/// // Authenticate against API. Fetches token.
/// let client = Client::with_authentication("AA_API_TOKEN")?;
/// let client = Client::from_env()?;
///
/// // Name of a model that supports chat.
/// let model = "pharia-1-llm-7b-control";
Expand Down Expand Up @@ -279,7 +281,7 @@ impl Client {
///
/// async fn print_stream_chat() -> Result<(), Error> {
/// // Authenticate against API. Fetches token.
/// let client = Client::with_authentication("AA_API_TOKEN")?;
/// let client = Client::from_env()?;
///
/// // Name of a model that supports chat.
/// let model = "pharia-1-llm-7b-control";
Expand Down Expand Up @@ -315,7 +317,7 @@ impl Client {
/// use aleph_alpha_client::{Client, How, TaskCompletion, Task, Error, Granularity, TaskExplanation, Stopping, Prompt, Sampling};
///
/// async fn print_explanation() -> Result<(), Error> {
/// let client = Client::with_authentication("AA_API_TOKEN")?;
/// let client = Client::from_env()?;
///
/// // Name of the model we we want to use. Large models give usually better answer, but are
/// // also slower and more costly.
Expand Down Expand Up @@ -359,7 +361,7 @@ impl Client {
/// use aleph_alpha_client::{Client, Error, How, TaskTokenization};
///
/// async fn tokenize() -> Result<(), Error> {
/// let client = Client::with_authentication("AA_API_TOKEN")?;
/// let client = Client::from_env()?;
///
/// // Name of the model for which we want to tokenize text.
/// let model = "luminous-base";
Expand Down Expand Up @@ -395,7 +397,7 @@ impl Client {
/// use aleph_alpha_client::{Client, Error, How, TaskDetokenization};
///
/// async fn detokenize() -> Result<(), Error> {
/// let client = Client::with_authentication("AA_API_TOKEN")?;
/// let client = Client::from_env()?;
///
/// // Specify the name of the model whose tokenizer was used to generate the input token ids.
/// let model = "luminous-base";
Expand Down
5 changes: 1 addition & 4 deletions src/prompt.rs
Original file line number Diff line number Diff line change
Expand Up @@ -87,10 +87,7 @@ impl<'a> Modality<'a> {
/// #[tokio::main(flavor = "current_thread")]
/// async fn main() {
/// // Create client
/// let _ = dotenv();
/// let aa_api_token = std::env::var("AA_API_TOKEN")
/// .expect("AA_API_TOKEN environment variable must be specified to run demo.");
/// let client = Client::with_authentication(aa_api_token).unwrap();
/// let client = Client::from_env().unwrap();
/// // Define task
/// let task = TaskCompletion {
/// prompt: Prompt::from_vec(vec![
Expand Down

0 comments on commit d2da859

Please sign in to comment.