Skip to content

Commit

Permalink
No token passed by default in gr.load() (#9069)
Browse files Browse the repository at this point in the history
* changes

* add changeset

* docstring

* change

* client changess

---------

Co-authored-by: gradio-pr-bot <gradio-pr-bot@users.noreply.github.com>
  • Loading branch information
abidlabs and gradio-pr-bot authored Aug 12, 2024
1 parent 370e01a commit f9f84bf
Showing 3 changed files with 23 additions and 18 deletions.
5 changes: 5 additions & 0 deletions .changeset/beige-houses-shine.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
"gradio": minor
---

feat:No token passed by default in `gr.load()`
24 changes: 12 additions & 12 deletions client/python/gradio_client/client.py
Original file line number Diff line number Diff line change
@@ -75,7 +75,7 @@ class Client:
def __init__(
self,
src: str,
hf_token: str | None = None,
hf_token: str | Literal[False] | None = False,
max_workers: int = 40,
verbose: bool = True,
auth: tuple[str, str] | None = None,
@@ -88,14 +88,14 @@ def __init__(
):
"""
Parameters:
src: Either the name of the Hugging Face Space to load, (e.g. "abidlabs/whisper-large-v2") or the full URL (including "http" or "https") of the hosted Gradio app to load (e.g. "http://mydomain.com/app" or "https://bec81a83-5b5c-471e.gradio.live/").
hf_token: The Hugging Face token to use to access private Spaces. Automatically fetched if you are logged in via the Hugging Face Hub CLI. Obtain from: https://huggingface.co/settings/token
max_workers: The maximum number of thread workers that can be used to make requests to the remote Gradio app simultaneously.
verbose: Whether the client should print statements to the console.
headers: Additional headers to send to the remote Gradio app on every request. By default only the HF authorization and user-agent headers are sent. This parameter will override the default headers if they have the same keys.
download_files: Directory where the client should download output files on the local machine from the remote API. By default, uses the value of the GRADIO_TEMP_DIR environment variable which, if not set by the user, is a temporary directory on your machine. If False, the client does not download files and returns a FileData dataclass object with the filepath on the remote machine instead.
ssl_verify: If False, skips certificate validation which allows the client to connect to Gradio apps that are using self-signed certificates.
httpx_kwargs: Additional keyword arguments to pass to `httpx.Client`, `httpx.stream`, `httpx.get` and `httpx.post`. This can be used to set timeouts, proxies, http auth, etc.
src: either the name of the Hugging Face Space to load, (e.g. "abidlabs/whisper-large-v2") or the full URL (including "http" or "https") of the hosted Gradio app to load (e.g. "http://mydomain.com/app" or "https://bec81a83-5b5c-471e.gradio.live/").
hf_token: optional Hugging Face token to use to access private Spaces. By default, no token is sent to the server. Set `hf_token=None` to use the locally saved token if there is one (warning: only provide a token if you are loading a trusted private Space as the token can be read by the Space you are loading). Find your tokens here: https://huggingface.co/settings/tokens.
max_workers: maximum number of thread workers that can be used to make requests to the remote Gradio app simultaneously.
verbose: whether the client should print statements to the console.
headers: additional headers to send to the remote Gradio app on every request. By default only the HF authorization and user-agent headers are sent. This parameter will override the default headers if they have the same keys.
download_files: directory where the client should download output files on the local machine from the remote API. By default, uses the value of the GRADIO_TEMP_DIR environment variable which, if not set by the user, is a temporary directory on your machine. If False, the client does not download files and returns a FileData dataclass object with the filepath on the remote machine instead.
ssl_verify: if False, skips certificate validation which allows the client to connect to Gradio apps that are using self-signed certificates.
httpx_kwargs: additional keyword arguments to pass to `httpx.Client`, `httpx.stream`, `httpx.get` and `httpx.post`. This can be used to set timeouts, proxies, http auth, etc.
"""
self.verbose = verbose
self.hf_token = hf_token
@@ -322,7 +322,7 @@ def duplicate(
cls,
from_id: str,
to_id: str | None = None,
hf_token: str | None = None,
hf_token: str | Literal[False] | None = False,
private: bool = True,
hardware: Literal[
"cpu-basic",
@@ -355,7 +355,7 @@ def duplicate(
Parameters:
from_id: The name of the Hugging Face Space to duplicate in the format "{username}/{space_id}", e.g. "gradio/whisper".
to_id: The name of the new Hugging Face Space to create, e.g. "abidlabs/whisper-duplicate". If not provided, the new Space will be named "{your_HF_username}/{space_id}".
hf_token: The Hugging Face token to use to access private Spaces. Automatically fetched if you are logged in via the Hugging Face Hub CLI. Obtain from: https://huggingface.co/settings/token
hf_token: optional Hugging Face token to use to duplicating private Spaces. By default, no token is sent to the server. Set `hf_token=None` to use the locally saved token if there is one. Find your tokens here: https://huggingface.co/settings/tokens.
private: Whether the new Space should be private (True) or public (False). Defaults to True.
hardware: The hardware tier to use for the new Space. Defaults to the same hardware tier as the original Space. Options include "cpu-basic", "cpu-upgrade", "t4-small", "t4-medium", "a10g-small", "a10g-large", "a100-large", subject to availability.
secrets: A dictionary of (secret key, secret value) to pass to the new Space. Defaults to None. Secrets are only used when the Space is duplicated for the first time, and are not updated if the duplicated Space already exists.
@@ -890,7 +890,7 @@ def deploy_discord(
discord_bot_token: str | None = None,
api_names: list[str | tuple[str, str]] | None = None,
to_id: str | None = None,
hf_token: str | None = None,
hf_token: str | Literal[False] | None = False,
private: bool = False,
):
"""
12 changes: 6 additions & 6 deletions gradio/external.py
Original file line number Diff line number Diff line change
@@ -37,7 +37,7 @@
def load(
name: str,
src: str | None = None,
hf_token: str | Literal[False] | None = None,
hf_token: str | Literal[False] | None = False,
alias: str | None = None,
**kwargs,
) -> Blocks:
@@ -48,7 +48,7 @@ def load(
Parameters:
name: the name of the model (e.g. "gpt2" or "facebook/bart-base") or space (e.g. "flax-community/spanish-gpt2"), can include the `src` as prefix (e.g. "models/facebook/bart-base")
src: the source of the model: `models` or `spaces` (or leave empty if source is provided as a prefix in `name`)
hf_token: optional access token for loading private Hugging Face Hub models or spaces. Will default to the locally saved token if not provided. Pass `token=False` if you don't want to send your token to the server. Find your token here: https://huggingface.co/settings/tokens. Warning: only provide a token if you are loading a trusted private Space as it can be read by the Space you are loading.
hf_token: optional Hugging Face token for loading private models or Spaces. By default, no token is sent to the server, set `hf_token=None` to use the locally saved token if there is one (warning: when loading Spaces, only provide a token if you are loading a trusted private Space as the token can be read by the Space you are loading). Find your tokens here: https://huggingface.co/settings/tokens.
alias: optional string used as the name of the loaded model instead of the default name (only applies if loading a Space running Gradio 2.x)
Returns:
a Gradio Blocks object for the given model
@@ -65,7 +65,7 @@ def load(
def load_blocks_from_repo(
name: str,
src: str | None = None,
hf_token: str | Literal[False] | None = None,
hf_token: str | Literal[False] | None = False,
alias: str | None = None,
**kwargs,
) -> Blocks:
@@ -397,7 +397,7 @@ def query_huggingface_inference_endpoints(*data):


def from_spaces(
space_name: str, hf_token: str | None, alias: str | None, **kwargs
space_name: str, hf_token: str | None | Literal[False], alias: str | None, **kwargs
) -> Blocks:
space_url = f"https://huggingface.co/spaces/{space_name}"

@@ -444,7 +444,7 @@ def from_spaces(
return from_spaces_blocks(space=space_name, hf_token=hf_token)


def from_spaces_blocks(space: str, hf_token: str | None) -> Blocks:
def from_spaces_blocks(space: str, hf_token: str | None | Literal[False]) -> Blocks:
client = Client(
space,
hf_token=hf_token,
@@ -479,7 +479,7 @@ def from_spaces_interface(
model_name: str,
config: dict,
alias: str | None,
hf_token: str | None,
hf_token: str | None | Literal[False],
iframe_url: str,
**kwargs,
) -> Interface:

0 comments on commit f9f84bf

Please sign in to comment.