diff --git a/.changeset/beige-houses-shine.md b/.changeset/beige-houses-shine.md new file mode 100644 index 0000000000000..8444e14f5e531 --- /dev/null +++ b/.changeset/beige-houses-shine.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:No token passed by default in `gr.load()` diff --git a/.changeset/blue-zoos-marry.md b/.changeset/blue-zoos-marry.md new file mode 100644 index 0000000000000..6cb830aae59ed --- /dev/null +++ b/.changeset/blue-zoos-marry.md @@ -0,0 +1,15 @@ +--- +"@gradio/atoms": minor +"@gradio/button": minor +"@gradio/checkboxgroup": minor +"@gradio/datetime": minor +"@gradio/dropdown": minor +"@gradio/form": minor +"@gradio/multimodaltextbox": minor +"@gradio/number": minor +"@gradio/radio": minor +"@gradio/textbox": minor +"gradio": minor +--- + +feat:Adding new themes to Gradio 5.0 diff --git a/.changeset/breezy-olives-wonder.md b/.changeset/breezy-olives-wonder.md new file mode 100644 index 0000000000000..3473bae75c72d --- /dev/null +++ b/.changeset/breezy-olives-wonder.md @@ -0,0 +1,6 @@ +--- +"@gradio/chatbot": minor +"gradio": minor +--- + +feat:Small tweak to how thoughts are shown in `gr.Chatbot` diff --git a/.changeset/bright-apes-fly.md b/.changeset/bright-apes-fly.md new file mode 100644 index 0000000000000..9c2489268e0c1 --- /dev/null +++ b/.changeset/bright-apes-fly.md @@ -0,0 +1,7 @@ +--- +"@gradio/audio": minor +"gradio": minor +"website": minor +--- + +feat:Start/stop recoding from the backend. Add guide on conversational chatbots diff --git a/.changeset/bright-garlics-melt.md b/.changeset/bright-garlics-melt.md new file mode 100644 index 0000000000000..ec3c8247b3f72 --- /dev/null +++ b/.changeset/bright-garlics-melt.md @@ -0,0 +1,9 @@ +--- +"@gradio/core": minor +"@self/app": minor +"@self/spa": minor +"@self/tootils": minor +"gradio": minor +--- + +feat:SSR e2e + fixes diff --git a/.changeset/brown-queens-begin.md b/.changeset/brown-queens-begin.md new file mode 100644 index 0000000000000..bd67e57266840 --- /dev/null +++ b/.changeset/brown-queens-begin.md @@ -0,0 +1,9 @@ +--- +"@gradio/lite": minor +"@gradio/tabitem": minor +"@gradio/tabs": minor +"gradio": minor +"website": minor +--- + +feat:Playground requirements tab diff --git a/.changeset/busy-jokes-sit.md b/.changeset/busy-jokes-sit.md new file mode 100644 index 0000000000000..299e0a7e2a333 --- /dev/null +++ b/.changeset/busy-jokes-sit.md @@ -0,0 +1,8 @@ +--- +"@gradio/icons": minor +"@gradio/multimodaltextbox": minor +"@gradio/textbox": minor +"gradio": minor +--- + +feat:Built-in submit and stop buttons in `gr.ChatInterface(multimodal=False)`, adding `submit_btn` and `stop_btn` props to `gr.Textbox()` and `gr.MultimodalText()` diff --git a/.changeset/busy-lizards-heal.md b/.changeset/busy-lizards-heal.md new file mode 100644 index 0000000000000..e95bd65e24277 --- /dev/null +++ b/.changeset/busy-lizards-heal.md @@ -0,0 +1,6 @@ +--- +"gradio": minor +"gradio_client": minor +--- + +feat:Fix most flaky Python tests in `5.0-dev` branch diff --git a/.changeset/busy-tools-chew.md b/.changeset/busy-tools-chew.md new file mode 100644 index 0000000000000..d79fcff9695b1 --- /dev/null +++ b/.changeset/busy-tools-chew.md @@ -0,0 +1,9 @@ +--- +"@gradio/chatbot": minor +"@gradio/icons": minor +"@gradio/imageeditor": minor +"@gradio/statustracker": minor +"gradio": minor +--- + +feat:Move buttons from chat_interface into Chatbot diff --git a/.changeset/calm-jobs-hope.md b/.changeset/calm-jobs-hope.md new file mode 100644 index 0000000000000..d30df1d87d00d --- /dev/null +++ b/.changeset/calm-jobs-hope.md @@ -0,0 +1,7 @@ +--- +"@gradio/tabitem": minor +"@gradio/tabs": minor +"gradio": minor +--- + +feat:Redesign `gr.Tabs()` diff --git a/.changeset/calm-monkeys-argue.md b/.changeset/calm-monkeys-argue.md new file mode 100644 index 0000000000000..a1c24afe681b7 --- /dev/null +++ b/.changeset/calm-monkeys-argue.md @@ -0,0 +1,18 @@ +--- +"@gradio/annotatedimage": minor +"@gradio/atoms": minor +"@gradio/audio": minor +"@gradio/file": minor +"@gradio/gallery": minor +"@gradio/image": minor +"@gradio/imageeditor": minor +"@gradio/json": minor +"@gradio/model3d": minor +"@gradio/simpleimage": minor +"@gradio/upload": minor +"@gradio/video": minor +"@gradio/wasm": minor +"gradio": minor +--- + +feat:Improve Icon Button consistency diff --git a/.changeset/calm-planets-send.md b/.changeset/calm-planets-send.md new file mode 100644 index 0000000000000..b8de4df88a17e --- /dev/null +++ b/.changeset/calm-planets-send.md @@ -0,0 +1,7 @@ +--- +"@gradio/core": minor +"@self/spa": minor +"gradio": minor +--- + +feat:Fix reload mode and streaming in 5.0 dev diff --git a/.changeset/chatty-houses-do.md b/.changeset/chatty-houses-do.md new file mode 100644 index 0000000000000..73b4a7af4ff92 --- /dev/null +++ b/.changeset/chatty-houses-do.md @@ -0,0 +1,6 @@ +--- +"@gradio/markdown": minor +"gradio": minor +--- + +feat:Use `container` param in `gr.Markdown` diff --git a/.changeset/chilly-dragons-smoke.md b/.changeset/chilly-dragons-smoke.md new file mode 100644 index 0000000000000..7df9c33aedbdf --- /dev/null +++ b/.changeset/chilly-dragons-smoke.md @@ -0,0 +1,7 @@ +--- +"@gradio/column": minor +"@gradio/row": minor +"gradio": minor +--- + +feat:Equal height columns diff --git a/.changeset/chilly-jars-sip.md b/.changeset/chilly-jars-sip.md new file mode 100644 index 0000000000000..570168c8e7b3c --- /dev/null +++ b/.changeset/chilly-jars-sip.md @@ -0,0 +1,6 @@ +--- +"@gradio/colorpicker": minor +"gradio": minor +--- + +feat:Update gr.ColorPicker UI diff --git a/.changeset/chilly-places-sniff.md b/.changeset/chilly-places-sniff.md new file mode 100644 index 0000000000000..5d02eeb9557db --- /dev/null +++ b/.changeset/chilly-places-sniff.md @@ -0,0 +1,6 @@ +--- +"@gradio/slider": minor +"gradio": minor +--- + +feat:Redesign `gr.Slider()` diff --git a/.changeset/chilly-socks-poke.md b/.changeset/chilly-socks-poke.md new file mode 100644 index 0000000000000..85f9336838641 --- /dev/null +++ b/.changeset/chilly-socks-poke.md @@ -0,0 +1,7 @@ +--- +"@gradio/code": minor +"@gradio/icons": minor +"gradio": minor +--- + +feat:Improve `gr.Code` diff --git a/.changeset/chubby-hairs-beam.md b/.changeset/chubby-hairs-beam.md new file mode 100644 index 0000000000000..78e3986753b57 --- /dev/null +++ b/.changeset/chubby-hairs-beam.md @@ -0,0 +1,6 @@ +--- +"gradio": minor +"gradio_client": minor +--- + +feat:Drop python 3.8 and 3.9 diff --git a/.changeset/clean-pigs-arrive.md b/.changeset/clean-pigs-arrive.md new file mode 100644 index 0000000000000..e67535c90ddad --- /dev/null +++ b/.changeset/clean-pigs-arrive.md @@ -0,0 +1,6 @@ +--- +"@gradio/atoms": minor +"gradio": minor +--- + +feat:Centre components within `Block` when height and width are set diff --git a/.changeset/cold-lies-mate.md b/.changeset/cold-lies-mate.md new file mode 100644 index 0000000000000..04757a87d7a48 --- /dev/null +++ b/.changeset/cold-lies-mate.md @@ -0,0 +1,7 @@ +--- +"@gradio/button": minor +"@gradio/group": minor +"gradio": minor +--- + +feat:Redesign `gr.Button()` diff --git a/.changeset/cruel-heads-hunt.md b/.changeset/cruel-heads-hunt.md new file mode 100644 index 0000000000000..9f8a50692a018 --- /dev/null +++ b/.changeset/cruel-heads-hunt.md @@ -0,0 +1,6 @@ +--- +"@gradio/button": patch +"gradio": patch +--- + +fix:Center icon in button when no text is present diff --git a/.changeset/cuddly-queens-melt.md b/.changeset/cuddly-queens-melt.md new file mode 100644 index 0000000000000..69070def8ec18 --- /dev/null +++ b/.changeset/cuddly-queens-melt.md @@ -0,0 +1,8 @@ +--- +"@gradio/chatbot": minor +"@gradio/client": minor +"@gradio/core": minor +"gradio": minor +--- + +feat:Disable liking user message in chatbot by default but make it configurable diff --git a/.changeset/cute-rules-write.md b/.changeset/cute-rules-write.md new file mode 100644 index 0000000000000..98ae2917b9b6d --- /dev/null +++ b/.changeset/cute-rules-write.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Remove two dependencies: `importlib_resources` and `urllib3` (if not in Wasm) diff --git a/.changeset/cyan-spies-check.md b/.changeset/cyan-spies-check.md new file mode 100644 index 0000000000000..0259a9f252cdc --- /dev/null +++ b/.changeset/cyan-spies-check.md @@ -0,0 +1,5 @@ +--- +"gradio": patch +--- + +feat:Improve url downloads for file objects diff --git a/.changeset/dark-cougars-fold.md b/.changeset/dark-cougars-fold.md new file mode 100644 index 0000000000000..3c3288fce0325 --- /dev/null +++ b/.changeset/dark-cougars-fold.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Prevent invalid values from being submitted to dropdown, etc. diff --git a/.changeset/dark-moose-shine.md b/.changeset/dark-moose-shine.md new file mode 100644 index 0000000000000..8f2b3da9bb89e --- /dev/null +++ b/.changeset/dark-moose-shine.md @@ -0,0 +1,8 @@ +--- +"@gradio/chatbot": minor +"@gradio/multimodaltextbox": minor +"@gradio/textbox": minor +"gradio": minor +--- + +feat:Chatbot Examples diff --git a/.changeset/deep-bananas-switch.md b/.changeset/deep-bananas-switch.md new file mode 100644 index 0000000000000..29005cbc79702 --- /dev/null +++ b/.changeset/deep-bananas-switch.md @@ -0,0 +1,6 @@ +--- +"gradio": minor +"website": minor +--- + +feat:Deprecate type='tuples for chatbot and focus chatbot docs on 'messages' type diff --git a/.changeset/deep-memes-cheat.md b/.changeset/deep-memes-cheat.md new file mode 100644 index 0000000000000..429d9b4dd69ad --- /dev/null +++ b/.changeset/deep-memes-cheat.md @@ -0,0 +1,7 @@ +--- +"@gradio/client": patch +"@gradio/core": patch +"gradio": patch +--- + +fix:Trigger state change event on iterators diff --git a/.changeset/deep-ways-wink.md b/.changeset/deep-ways-wink.md new file mode 100644 index 0000000000000..abb1b36134981 --- /dev/null +++ b/.changeset/deep-ways-wink.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Postprocess hardening diff --git a/.changeset/dirty-lions-follow.md b/.changeset/dirty-lions-follow.md new file mode 100644 index 0000000000000..e1d472603a3d1 --- /dev/null +++ b/.changeset/dirty-lions-follow.md @@ -0,0 +1,5 @@ +--- +"website": minor +--- + +feat:Expanding AI Playground Prompt for Qwen diff --git a/.changeset/dirty-pugs-hunt.md b/.changeset/dirty-pugs-hunt.md new file mode 100644 index 0000000000000..19924d0c8516b --- /dev/null +++ b/.changeset/dirty-pugs-hunt.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Adds TLS to FRP tunnel diff --git a/.changeset/dry-frogs-argue.md b/.changeset/dry-frogs-argue.md new file mode 100644 index 0000000000000..bde50194330b0 --- /dev/null +++ b/.changeset/dry-frogs-argue.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Minor changes to flagging for 5.0 diff --git a/.changeset/dull-plants-trade.md b/.changeset/dull-plants-trade.md new file mode 100644 index 0000000000000..9a6649bb3adfb --- /dev/null +++ b/.changeset/dull-plants-trade.md @@ -0,0 +1,11 @@ +--- +"@gradio/audio": minor +"@gradio/core": minor +"@gradio/file": minor +"@gradio/image": minor +"@gradio/model3d": minor +"@gradio/video": minor +"gradio": minor +--- + +feat:Adds ability to block event trigger when file is uploading diff --git a/.changeset/easy-files-serve.md b/.changeset/easy-files-serve.md new file mode 100644 index 0000000000000..f8a389d5c3606 --- /dev/null +++ b/.changeset/easy-files-serve.md @@ -0,0 +1,10 @@ +--- +"@gradio/audio": minor +"@gradio/client": minor +"@gradio/core": minor +"@gradio/icons": minor +"@gradio/image": minor +"gradio": minor +--- + +feat:Open audio/image input stream only when queue is ready diff --git a/.changeset/easy-snakes-arrive.md b/.changeset/easy-snakes-arrive.md new file mode 100644 index 0000000000000..bd8b95b967ae5 --- /dev/null +++ b/.changeset/easy-snakes-arrive.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Equal height in row false by default diff --git a/.changeset/every-breads-float.md b/.changeset/every-breads-float.md new file mode 100644 index 0000000000000..c22bfe55199fa --- /dev/null +++ b/.changeset/every-breads-float.md @@ -0,0 +1,9 @@ +--- +"@gradio/atoms": minor +"@gradio/chatbot": minor +"@gradio/icons": minor +"@gradio/statustracker": minor +"gradio": minor +--- + +feat:Move icons into `IconButtonWrapper` diff --git a/.changeset/every-candies-sell.md b/.changeset/every-candies-sell.md new file mode 100644 index 0000000000000..efba39dbbbe92 --- /dev/null +++ b/.changeset/every-candies-sell.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Improve is_in_or_equal and fuzzer diff --git a/.changeset/every-geese-shave.md b/.changeset/every-geese-shave.md new file mode 100644 index 0000000000000..e36e996fad2ea --- /dev/null +++ b/.changeset/every-geese-shave.md @@ -0,0 +1,8 @@ +--- +"@gradio/core": minor +"@gradio/image": minor +"@gradio/utils": minor +"gradio": minor +--- + +feat:Streaming Guides diff --git a/.changeset/evil-clocks-visit.md b/.changeset/evil-clocks-visit.md new file mode 100644 index 0000000000000..c31c3eb685db4 --- /dev/null +++ b/.changeset/evil-clocks-visit.md @@ -0,0 +1,6 @@ +--- +"gradio": minor +"gradio_client": minor +--- + +feat:Only move files to the cache that have a meta key diff --git a/.changeset/fancy-pianos-dig.md b/.changeset/fancy-pianos-dig.md new file mode 100644 index 0000000000000..291793f193082 --- /dev/null +++ b/.changeset/fancy-pianos-dig.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Enhance Lite E2E tests and fix a networking problem on Lite diff --git a/.changeset/fast-trams-grin.md b/.changeset/fast-trams-grin.md new file mode 100644 index 0000000000000..826947104b0da --- /dev/null +++ b/.changeset/fast-trams-grin.md @@ -0,0 +1,9 @@ +--- +"@gradio/client": minor +"@gradio/core": minor +"@gradio/image": minor +"gradio": minor +"gradio_client": minor +--- + +feat:Send Streaming data over Websocket if possible. Also support base64 output format for images. diff --git a/.changeset/few-clowns-notice.md b/.changeset/few-clowns-notice.md new file mode 100644 index 0000000000000..f00461a09705a --- /dev/null +++ b/.changeset/few-clowns-notice.md @@ -0,0 +1,6 @@ +--- +"@gradio/button": minor +"gradio": minor +--- + +feat:Adds a "huggingface" button variant, and makes it the default for `gr.LoginButton` and `gr.DuplicateButton` diff --git a/.changeset/five-teeth-relax.md b/.changeset/five-teeth-relax.md new file mode 100644 index 0000000000000..d428e3960d5cf --- /dev/null +++ b/.changeset/five-teeth-relax.md @@ -0,0 +1,5 @@ +--- +"@gradio/preview": patch +--- + +fix:Fix package.json `exports` of @gradio/preview diff --git a/.changeset/flat-chairs-fly.md b/.changeset/flat-chairs-fly.md new file mode 100644 index 0000000000000..a0c0fa155d402 --- /dev/null +++ b/.changeset/flat-chairs-fly.md @@ -0,0 +1,20 @@ +--- +"@gradio/atoms": minor +"@gradio/checkbox": minor +"@gradio/checkboxgroup": minor +"@gradio/colorpicker": minor +"@gradio/core": minor +"@gradio/datetime": minor +"@gradio/dropdown": minor +"@gradio/multimodaltextbox": minor +"@gradio/nativeplot": minor +"@gradio/number": minor +"@gradio/radio": minor +"@gradio/simpledropdown": minor +"@gradio/simpletextbox": minor +"@gradio/slider": minor +"@gradio/textbox": minor +"gradio": minor +--- + +feat:Allow `info=` to render markdown diff --git a/.changeset/floppy-keys-heal.md b/.changeset/floppy-keys-heal.md new file mode 100644 index 0000000000000..e8db79d627491 --- /dev/null +++ b/.changeset/floppy-keys-heal.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Fix typo in `tunneling.py` diff --git a/.changeset/floppy-nails-grab.md b/.changeset/floppy-nails-grab.md new file mode 100644 index 0000000000000..b59772a49e004 --- /dev/null +++ b/.changeset/floppy-nails-grab.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Object Detection From Webcam Stream Guide diff --git a/.changeset/floppy-pandas-appear.md b/.changeset/floppy-pandas-appear.md new file mode 100644 index 0000000000000..7b9880429471c --- /dev/null +++ b/.changeset/floppy-pandas-appear.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Disable sagemaker_check() for now diff --git a/.changeset/funny-games-train.md b/.changeset/funny-games-train.md new file mode 100644 index 0000000000000..cb7b2fdb799ca --- /dev/null +++ b/.changeset/funny-games-train.md @@ -0,0 +1,7 @@ +--- +"@gradio/atoms": minor +"@gradio/chatbot": minor +"gradio": minor +--- + +feat:Fix chatinterface embedding height issues diff --git a/.changeset/gold-bats-return.md b/.changeset/gold-bats-return.md new file mode 100644 index 0000000000000..15e95f119465a --- /dev/null +++ b/.changeset/gold-bats-return.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Raise ChecksumMismatchError diff --git a/.changeset/gold-kiwis-share.md b/.changeset/gold-kiwis-share.md new file mode 100644 index 0000000000000..d81cfe1c24745 --- /dev/null +++ b/.changeset/gold-kiwis-share.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Chat Interface Multimodal Fix & Fallback to `gr.Examples()` diff --git a/.changeset/great-ghosts-find.md b/.changeset/great-ghosts-find.md new file mode 100644 index 0000000000000..3913f00314fa1 --- /dev/null +++ b/.changeset/great-ghosts-find.md @@ -0,0 +1,9 @@ +--- +"@gradio/preview": minor +"@self/app": minor +"@self/build": minor +"@self/spa": minor +"gradio": minor +--- + +feat:Fix custom component CLI on main/5.0 diff --git a/.changeset/green-pigs-wonder.md b/.changeset/green-pigs-wonder.md new file mode 100644 index 0000000000000..96d9dcf7dd062 --- /dev/null +++ b/.changeset/green-pigs-wonder.md @@ -0,0 +1,6 @@ +--- +"@gradio/audio": minor +"gradio": minor +--- + +feat:Fix Cached Examples for Streamed Media diff --git a/.changeset/green-wasps-invent.md b/.changeset/green-wasps-invent.md new file mode 100644 index 0000000000000..9618604b12313 --- /dev/null +++ b/.changeset/green-wasps-invent.md @@ -0,0 +1,6 @@ +--- +"@self/spa": minor +"gradio": minor +--- + +feat:add local fonts and update themes diff --git a/.changeset/heavy-goats-grab.md b/.changeset/heavy-goats-grab.md new file mode 100644 index 0000000000000..3a8bc2d745ad4 --- /dev/null +++ b/.changeset/heavy-goats-grab.md @@ -0,0 +1,10 @@ +--- +"@gradio/button": minor +"@gradio/column": minor +"@gradio/dataframe": minor +"@gradio/row": minor +"@gradio/tabs": minor +"gradio": minor +--- + +feat:UI theme fixes diff --git a/.changeset/heavy-memes-create.md b/.changeset/heavy-memes-create.md new file mode 100644 index 0000000000000..32d86729376c5 --- /dev/null +++ b/.changeset/heavy-memes-create.md @@ -0,0 +1,6 @@ +--- +"@gradio/dataframe": minor +"gradio": minor +--- + +feat:Tweak gr.Dataframe menu UX diff --git a/.changeset/honest-plums-peel.md b/.changeset/honest-plums-peel.md new file mode 100644 index 0000000000000..95a79e08017dd --- /dev/null +++ b/.changeset/honest-plums-peel.md @@ -0,0 +1,6 @@ +--- +"@gradio/chatbot": minor +"gradio": minor +--- + +feat:Chatbot bug fixes diff --git a/.changeset/huge-corners-tease.md b/.changeset/huge-corners-tease.md new file mode 100644 index 0000000000000..0a7a4fdacf0de --- /dev/null +++ b/.changeset/huge-corners-tease.md @@ -0,0 +1,8 @@ +--- +"@gradio/core": minor +"@gradio/lite": minor +"@self/spa": minor +"gradio": minor +--- + +feat:Remove lite/theme.css from the Git-managed file tree diff --git a/.changeset/hungry-dragons-drum.md b/.changeset/hungry-dragons-drum.md new file mode 100644 index 0000000000000..f7c5224f7afdc --- /dev/null +++ b/.changeset/hungry-dragons-drum.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Small tweaks to improve the DX for the "tuples"/"messages" argument in `gr.Chatbot` diff --git a/.changeset/hungry-tips-sin.md b/.changeset/hungry-tips-sin.md new file mode 100644 index 0000000000000..8136472702670 --- /dev/null +++ b/.changeset/hungry-tips-sin.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Adds `strict_cors` parameter to `launch()` diff --git a/.changeset/khaki-ducks-stare.md b/.changeset/khaki-ducks-stare.md new file mode 100644 index 0000000000000..ae516e5c30f4d --- /dev/null +++ b/.changeset/khaki-ducks-stare.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Dont move files to cache automatically in chatbot postprocess diff --git a/.changeset/khaki-ways-agree.md b/.changeset/khaki-ways-agree.md new file mode 100644 index 0000000000000..9302d7063a44a --- /dev/null +++ b/.changeset/khaki-ways-agree.md @@ -0,0 +1,6 @@ +--- +"@gradio/slider": minor +"gradio": minor +--- + +feat:Fix `slider-color` var diff --git a/.changeset/large-buttons-look.md b/.changeset/large-buttons-look.md new file mode 100644 index 0000000000000..44a86e8734e15 --- /dev/null +++ b/.changeset/large-buttons-look.md @@ -0,0 +1,7 @@ +--- +"@gradio/core": minor +"@gradio/dataframe": minor +"gradio": minor +--- + +feat:Update gr.Dataframe UI with action popover diff --git a/.changeset/large-impalas-take.md b/.changeset/large-impalas-take.md new file mode 100644 index 0000000000000..efa8e826f6ee9 --- /dev/null +++ b/.changeset/large-impalas-take.md @@ -0,0 +1,6 @@ +--- +"@gradio/chatbot": patch +"gradio": patch +--- + +fix:Ensure undo/try shows for final bot message in gr.Chatbot diff --git a/.changeset/lazy-clubs-help.md b/.changeset/lazy-clubs-help.md new file mode 100644 index 0000000000000..987753ba7e0d4 --- /dev/null +++ b/.changeset/lazy-clubs-help.md @@ -0,0 +1,9 @@ +--- +"@gradio/chatbot": minor +"@gradio/icons": minor +"@gradio/image": minor +"@gradio/video": minor +"gradio": minor +--- + +feat:Chatbot autoscroll diff --git a/.changeset/legal-masks-pay.md b/.changeset/legal-masks-pay.md new file mode 100644 index 0000000000000..c36f6c12ac39a --- /dev/null +++ b/.changeset/legal-masks-pay.md @@ -0,0 +1,8 @@ +--- +"@gradio/html": minor +"@gradio/markdown": minor +"@gradio/utils": minor +"gradio": minor +--- + +feat:Prevent HTML and Markdown height changing when status is hidden diff --git a/.changeset/light-bats-arrive.md b/.changeset/light-bats-arrive.md new file mode 100644 index 0000000000000..859277feba180 --- /dev/null +++ b/.changeset/light-bats-arrive.md @@ -0,0 +1,7 @@ +--- +"@gradio/core": minor +"@gradio/gallery": minor +"gradio": minor +--- + +feat:Video gallery diff --git a/.changeset/long-donkeys-hang.md b/.changeset/long-donkeys-hang.md new file mode 100644 index 0000000000000..cd3db25b6c7f3 --- /dev/null +++ b/.changeset/long-donkeys-hang.md @@ -0,0 +1,6 @@ +--- +"@gradio/markdown": minor +"gradio": minor +--- + +feat:Fix overflowing markdown in Chatbot diff --git a/.changeset/lovely-ties-live.md b/.changeset/lovely-ties-live.md new file mode 100644 index 0000000000000..0969acd9888dd --- /dev/null +++ b/.changeset/lovely-ties-live.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Add support for 3rd party providers to `gr.load`, and provide a better UX for conversational models diff --git a/.changeset/lucky-hotels-sort.md b/.changeset/lucky-hotels-sort.md new file mode 100644 index 0000000000000..dda4642942351 --- /dev/null +++ b/.changeset/lucky-hotels-sort.md @@ -0,0 +1,7 @@ +--- +"@gradio/column": minor +"gradio": minor +"website": minor +--- + +feat:Remove grey background behind all components diff --git a/.changeset/many-moons-like.md b/.changeset/many-moons-like.md new file mode 100644 index 0000000000000..575cdcc08f44b --- /dev/null +++ b/.changeset/many-moons-like.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Fix custom component CLI unit tests diff --git a/.changeset/mighty-goats-dance.md b/.changeset/mighty-goats-dance.md new file mode 100644 index 0000000000000..5837c7bb9349a --- /dev/null +++ b/.changeset/mighty-goats-dance.md @@ -0,0 +1,6 @@ +--- +"@gradio/code": minor +"gradio": minor +--- + +feat:Add Jinja2 language to Code component diff --git a/.changeset/modern-baboons-swim.md b/.changeset/modern-baboons-swim.md new file mode 100644 index 0000000000000..96ed211a18bf1 --- /dev/null +++ b/.changeset/modern-baboons-swim.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Improve button consistency across light/dark mode diff --git a/.changeset/nasty-moments-mix.md b/.changeset/nasty-moments-mix.md new file mode 100644 index 0000000000000..b14952e57106d --- /dev/null +++ b/.changeset/nasty-moments-mix.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Make `gr.Image` preprocessing more efficient diff --git a/.changeset/nasty-zebras-cheat.md b/.changeset/nasty-zebras-cheat.md new file mode 100644 index 0000000000000..f55da95dbdea2 --- /dev/null +++ b/.changeset/nasty-zebras-cheat.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Set default `format` in `gr.Audio` to be `None` to avoid unnecessary preprocessing diff --git a/.changeset/neat-bushes-sit.md b/.changeset/neat-bushes-sit.md new file mode 100644 index 0000000000000..a216864891d1c --- /dev/null +++ b/.changeset/neat-bushes-sit.md @@ -0,0 +1,7 @@ +--- +"@gradio/chatbot": minor +"@gradio/markdown": minor +"gradio": minor +--- + +feat:Fix markdown code copy/check button in gr.Chatbot diff --git a/.changeset/nice-badgers-admire.md b/.changeset/nice-badgers-admire.md new file mode 100644 index 0000000000000..a5cd4965e9244 --- /dev/null +++ b/.changeset/nice-badgers-admire.md @@ -0,0 +1,5 @@ +--- +"website": minor +--- + +feat:Fixes website build in 5.0-dev diff --git a/.changeset/nice-donuts-invite.md b/.changeset/nice-donuts-invite.md new file mode 100644 index 0000000000000..244789855d2cb --- /dev/null +++ b/.changeset/nice-donuts-invite.md @@ -0,0 +1,6 @@ +--- +"@gradio/imageeditor": minor +"gradio": minor +--- + +feat:Fix cut off in gr.ImageEditor diff --git a/.changeset/nice-rats-flash.md b/.changeset/nice-rats-flash.md new file mode 100644 index 0000000000000..b1dd0c2bdd25d --- /dev/null +++ b/.changeset/nice-rats-flash.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Small fixes to `gr.Dataframe` and chatbot docs diff --git a/.changeset/nine-hotels-juggle.md b/.changeset/nine-hotels-juggle.md new file mode 100644 index 0000000000000..d0ca3d059de35 --- /dev/null +++ b/.changeset/nine-hotels-juggle.md @@ -0,0 +1,6 @@ +--- +"@gradio/chatbot": minor +"gradio": minor +--- + +feat:9227 chatinterface retry bug diff --git a/.changeset/old-items-sink.md b/.changeset/old-items-sink.md new file mode 100644 index 0000000000000..5b1d82ba61494 --- /dev/null +++ b/.changeset/old-items-sink.md @@ -0,0 +1,5 @@ +--- +"@self/app": minor +--- + +feat:fix css diff --git a/.changeset/open-chefs-say.md b/.changeset/open-chefs-say.md new file mode 100644 index 0000000000000..85cb7ccb0c9b9 --- /dev/null +++ b/.changeset/open-chefs-say.md @@ -0,0 +1,6 @@ +--- +"@gradio/imageeditor": minor +"gradio": minor +--- + +feat:Fix `gr.ImageEditor` toolbar cutoff diff --git a/.changeset/petite-months-fold.md b/.changeset/petite-months-fold.md new file mode 100644 index 0000000000000..b676e155e4bbb --- /dev/null +++ b/.changeset/petite-months-fold.md @@ -0,0 +1,8 @@ +--- +"@gradio/core": minor +"@self/app": minor +"@self/spa": minor +"gradio": minor +--- + +feat:Fix reload mode diff --git a/.changeset/pink-shirts-fry.md b/.changeset/pink-shirts-fry.md new file mode 100644 index 0000000000000..9a2fa2217b0d0 --- /dev/null +++ b/.changeset/pink-shirts-fry.md @@ -0,0 +1,10 @@ +--- +"@gradio/lite": minor +"@gradio/theme": minor +"@self/component-test": minor +"@self/storybook": minor +"gradio": minor +"website": minor +--- + +feat:🔡 Update default core Gradio font diff --git a/.changeset/plenty-dragons-fold.md b/.changeset/plenty-dragons-fold.md new file mode 100644 index 0000000000000..aa7d7d464bd35 --- /dev/null +++ b/.changeset/plenty-dragons-fold.md @@ -0,0 +1,6 @@ +--- +"@gradio/wasm": minor +"gradio": minor +--- + +feat:Pre/post-processing download requests diff --git a/.changeset/polite-bugs-vanish.md b/.changeset/polite-bugs-vanish.md new file mode 100644 index 0000000000000..6fac00fa43ec4 --- /dev/null +++ b/.changeset/polite-bugs-vanish.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Lighten secondary button grey fill diff --git a/.changeset/pre.json b/.changeset/pre.json new file mode 100644 index 0000000000000..aee7bfd8a7128 --- /dev/null +++ b/.changeset/pre.json @@ -0,0 +1,229 @@ +{ + "mode": "pre", + "tag": "beta", + "initialVersions": { + "@gradio/client": "1.3.0", + "gradio_client": "1.1.0", + "gradio": "4.38.1", + "@gradio/cdn-test": "0.0.1", + "@gradio/spaces-test": "0.0.1", + "website": "0.34.0", + "@gradio/accordion": "0.3.18", + "@gradio/annotatedimage": "0.6.13", + "@gradio/app": "1.38.1", + "@gradio/atoms": "0.7.6", + "@gradio/audio": "0.12.2", + "@gradio/box": "0.1.20", + "@gradio/button": "0.2.46", + "@gradio/chatbot": "0.12.1", + "@gradio/checkbox": "0.3.8", + "@gradio/checkboxgroup": "0.5.8", + "@gradio/code": "0.7.0", + "@gradio/colorpicker": "0.3.8", + "@gradio/column": "0.1.2", + "@gradio/dataframe": "0.8.13", + "@gradio/dataset": "0.2.0", + "@gradio/datetime": "0.0.2", + "@gradio/downloadbutton": "0.1.23", + "@gradio/dropdown": "0.7.8", + "@gradio/fallback": "0.3.8", + "@gradio/file": "0.8.5", + "@gradio/fileexplorer": "0.4.14", + "@gradio/form": "0.1.20", + "@gradio/gallery": "0.11.2", + "@gradio/group": "0.1.1", + "@gradio/highlightedtext": "0.7.2", + "@gradio/html": "0.3.1", + "@gradio/icons": "0.6.0", + "@gradio/image": "0.12.2", + "@gradio/imageeditor": "0.7.13", + "@gradio/json": "0.2.8", + "@gradio/label": "0.3.8", + "@gradio/lite": "4.38.1", + "@gradio/markdown": "0.8.1", + "@gradio/model3d": "0.11.0", + "@gradio/multimodaltextbox": "0.5.2", + "@gradio/number": "0.4.8", + "@gradio/paramviewer": "0.4.17", + "@gradio/plot": "0.6.0", + "@gradio/preview": "0.10.1", + "gradio_test": "0.5.0", + "@gradio/radio": "0.5.8", + "@gradio/row": "0.1.3", + "@gradio/simpledropdown": "0.2.8", + "@gradio/simpleimage": "0.6.2", + "@gradio/simpletextbox": "0.2.8", + "@gradio/slider": "0.4.8", + "@gradio/state": "0.1.0", + "@gradio/statustracker": "0.7.1", + "@gradio/storybook": "0.6.0", + "@gradio/tabitem": "0.2.12", + "@gradio/tabs": "0.2.11", + "@gradio/textbox": "0.6.7", + "@gradio/theme": "0.2.3", + "@gradio/timer": "0.3.0", + "@gradio/tooltip": "0.1.0", + "@gradio/tootils": "0.6.0", + "@gradio/upload": "0.11.5", + "@gradio/uploadbutton": "0.6.14", + "@gradio/utils": "0.5.1", + "@gradio/video": "0.9.2", + "@gradio/wasm": "0.11.0", + "@self/cdn-test": "0.0.1", + "@self/spaces-test": "0.0.1", + "@self/app": "1.40.0", + "@self/build": "0.0.2", + "@self/component-test": "0.1.0", + "@gradio/core": "0.0.2", + "@gradio/nativeplot": "0.3.1", + "@self/spa": "0.0.2", + "@self/storybook": "0.6.1", + "@self/tootils": "0.6.4" + }, + "changesets": [ + "beige-houses-shine", + "blue-zoos-marry", + "breezy-olives-wonder", + "bright-apes-fly", + "bright-garlics-melt", + "brown-queens-begin", + "busy-jokes-sit", + "busy-lizards-heal", + "busy-tools-chew", + "calm-jobs-hope", + "calm-monkeys-argue", + "calm-planets-send", + "chatty-houses-do", + "chilly-dragons-smoke", + "chilly-jars-sip", + "chilly-places-sniff", + "chilly-socks-poke", + "chubby-hairs-beam", + "clean-pigs-arrive", + "cold-lies-mate", + "cruel-heads-hunt", + "cuddly-queens-melt", + "cute-rules-write", + "cyan-spies-check", + "dark-cougars-fold", + "dark-moose-shine", + "deep-bananas-switch", + "deep-memes-cheat", + "deep-ways-wink", + "dirty-lions-follow", + "dirty-pugs-hunt", + "dry-frogs-argue", + "dull-plants-trade", + "easy-files-serve", + "easy-snakes-arrive", + "every-breads-float", + "every-candies-sell", + "every-geese-shave", + "evil-clocks-visit", + "fancy-pianos-dig", + "fast-trams-grin", + "few-clowns-notice", + "five-teeth-relax", + "flat-chairs-fly", + "floppy-keys-heal", + "floppy-nails-grab", + "floppy-pandas-appear", + "funny-games-train", + "gold-bats-return", + "gold-kiwis-share", + "great-ghosts-find", + "green-pigs-wonder", + "green-wasps-invent", + "heavy-goats-grab", + "heavy-memes-create", + "honest-plums-peel", + "huge-corners-tease", + "hungry-dragons-drum", + "hungry-tips-sin", + "khaki-ducks-stare", + "khaki-ways-agree", + "large-buttons-look", + "large-impalas-take", + "lazy-clubs-help", + "legal-masks-pay", + "light-bats-arrive", + "long-donkeys-hang", + "lovely-ties-live", + "lucky-hotels-sort", + "many-moons-like", + "mighty-goats-dance", + "modern-baboons-swim", + "nasty-moments-mix", + "nasty-zebras-cheat", + "neat-bushes-sit", + "nice-badgers-admire", + "nice-donuts-invite", + "nice-rats-flash", + "nine-hotels-juggle", + "old-items-sink", + "open-chefs-say", + "petite-months-fold", + "pink-shirts-fry", + "plenty-dragons-fold", + "polite-bugs-vanish", + "pretty-hairs-rest", + "proud-memes-fold", + "public-baboons-dig", + "public-carpets-behave", + "puny-bats-smell", + "quiet-gifts-cheer", + "red-brooms-bow", + "rich-crews-suffer", + "rotten-bears-bathe", + "rotten-dingos-cross", + "sad-chicken-sleep", + "sad-steaks-shout", + "salty-vans-behave", + "seven-deer-occur", + "sharp-bikes-sleep", + "shy-foxes-flow", + "silent-moose-push", + "silly-chefs-marry", + "six-bobcats-cry", + "six-melons-serve", + "slow-nails-fetch", + "smooth-fans-hide", + "smooth-places-walk", + "social-lizards-tickle", + "solid-chicken-love", + "some-clocks-think", + "sour-ties-repair", + "strong-stars-count", + "stupid-eggs-sing", + "stupid-memes-turn", + "stupid-tires-stare", + "sweet-papers-fail", + "tall-moose-yawn", + "tame-zoos-care", + "tasty-tigers-mate", + "thick-geese-divide", + "thick-wasps-love", + "thin-boxes-matter", + "thirty-cloths-taste", + "thirty-insects-unite", + "three-toys-knock", + "tidy-bobcats-marry", + "tired-moons-tell", + "tough-rooms-flash", + "true-pigs-build", + "two-geckos-accept", + "warm-lemons-mate", + "weak-dryers-show", + "weak-glasses-enter", + "wet-memes-smash", + "wicked-snails-drum", + "wicked-swans-wink", + "wide-dodos-peel", + "witty-rice-fix", + "witty-worlds-grin", + "young-candles-stare", + "young-ears-vanish", + "young-memes-shake", + "yummy-weeks-learn" + ] +} diff --git a/.changeset/pretty-hairs-rest.md b/.changeset/pretty-hairs-rest.md new file mode 100644 index 0000000000000..45d7e62c28f57 --- /dev/null +++ b/.changeset/pretty-hairs-rest.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Fixes race condition in `update_root_in_config` diff --git a/.changeset/proud-memes-fold.md b/.changeset/proud-memes-fold.md new file mode 100644 index 0000000000000..cd6724501219d --- /dev/null +++ b/.changeset/proud-memes-fold.md @@ -0,0 +1,6 @@ +--- +"@gradio/wasm": minor +"gradio": minor +--- + +feat:Proposal: remove `gr.make_waveform` and remove `matplotlib` as a dependency diff --git a/.changeset/public-baboons-dig.md b/.changeset/public-baboons-dig.md new file mode 100644 index 0000000000000..7d0a652a33366 --- /dev/null +++ b/.changeset/public-baboons-dig.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Tweaks to SSR mode diff --git a/.changeset/public-carpets-behave.md b/.changeset/public-carpets-behave.md new file mode 100644 index 0000000000000..40d3610e6b50a --- /dev/null +++ b/.changeset/public-carpets-behave.md @@ -0,0 +1,6 @@ +--- +"gradio": minor +"gradio_client": minor +--- + +feat:Api info fix diff --git a/.changeset/puny-bats-smell.md b/.changeset/puny-bats-smell.md new file mode 100644 index 0000000000000..9674004f0873b --- /dev/null +++ b/.changeset/puny-bats-smell.md @@ -0,0 +1,5 @@ +--- +"gradio": patch +--- + +feat:Raise WasmUnsupportedError for ffmpeg usage on Lite diff --git a/.changeset/quiet-gifts-cheer.md b/.changeset/quiet-gifts-cheer.md new file mode 100644 index 0000000000000..92fa394d58d98 --- /dev/null +++ b/.changeset/quiet-gifts-cheer.md @@ -0,0 +1,7 @@ +--- +"@gradio/chatbot": minor +"@gradio/multimodaltextbox": minor +"gradio": minor +--- + +feat:Some more chatbot fixes diff --git a/.changeset/red-brooms-bow.md b/.changeset/red-brooms-bow.md new file mode 100644 index 0000000000000..89a7ffa1fdeff --- /dev/null +++ b/.changeset/red-brooms-bow.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Use or `pathlib.Path` objects to indicate filepaths for `css`, `js`, and `head` parameters diff --git a/.changeset/rich-crews-suffer.md b/.changeset/rich-crews-suffer.md new file mode 100644 index 0000000000000..89ae6fb2a6292 --- /dev/null +++ b/.changeset/rich-crews-suffer.md @@ -0,0 +1,7 @@ +--- +"@gradio/dataframe": minor +"gradio": minor +"website": minor +--- + +feat:Fix. Triggered dataframe change event for header change diff --git a/.changeset/rotten-bears-bathe.md b/.changeset/rotten-bears-bathe.md new file mode 100644 index 0000000000000..248148d7a457f --- /dev/null +++ b/.changeset/rotten-bears-bathe.md @@ -0,0 +1,5 @@ +--- +"gradio": major +--- + +feat:Deprecate for 5.0 diff --git a/.changeset/rotten-dingos-cross.md b/.changeset/rotten-dingos-cross.md new file mode 100644 index 0000000000000..111c0d26f4f7a --- /dev/null +++ b/.changeset/rotten-dingos-cross.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Reduce analytics that are collected diff --git a/.changeset/sad-chicken-sleep.md b/.changeset/sad-chicken-sleep.md new file mode 100644 index 0000000000000..afdf4049fd32d --- /dev/null +++ b/.changeset/sad-chicken-sleep.md @@ -0,0 +1,6 @@ +--- +"@gradio/image": patch +"gradio": patch +--- + +feat:Fix stop recording button colors diff --git a/.changeset/sad-steaks-shout.md b/.changeset/sad-steaks-shout.md new file mode 100644 index 0000000000000..e183cc18831e7 --- /dev/null +++ b/.changeset/sad-steaks-shout.md @@ -0,0 +1,5 @@ +--- +"website": minor +--- + +feat:Fixes annoying height bug in playground diff --git a/.changeset/salty-vans-behave.md b/.changeset/salty-vans-behave.md new file mode 100644 index 0000000000000..2c12d93f800b0 --- /dev/null +++ b/.changeset/salty-vans-behave.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Deprecate passing a tuple for gr.Code value diff --git a/.changeset/seven-deer-occur.md b/.changeset/seven-deer-occur.md new file mode 100644 index 0000000000000..377f5d8098d59 --- /dev/null +++ b/.changeset/seven-deer-occur.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Raise error instead of warning if checksums for binary do not match diff --git a/.changeset/sharp-bikes-sleep.md b/.changeset/sharp-bikes-sleep.md new file mode 100644 index 0000000000000..b8cae17cd3820 --- /dev/null +++ b/.changeset/sharp-bikes-sleep.md @@ -0,0 +1,6 @@ +--- +"@gradio/lite": patch +"gradio": patch +--- + +feat:Stop using `multiprocessing` in `flagging.CSVLogger` on Lite v5 diff --git a/.changeset/shy-foxes-flow.md b/.changeset/shy-foxes-flow.md new file mode 100644 index 0000000000000..bc2103fa1e787 --- /dev/null +++ b/.changeset/shy-foxes-flow.md @@ -0,0 +1,9 @@ +--- +"@gradio/audio": minor +"@gradio/checkbox": minor +"@gradio/checkboxgroup": minor +"@gradio/radio": minor +"gradio": minor +--- + +feat:Decrease component radii and remove input shadows diff --git a/.changeset/silent-moose-push.md b/.changeset/silent-moose-push.md new file mode 100644 index 0000000000000..0556b7ea4f982 --- /dev/null +++ b/.changeset/silent-moose-push.md @@ -0,0 +1,6 @@ +--- +"@gradio/chatbot": minor +"gradio": minor +--- + +feat:Fixes: Chatbot examples for custom chatbot + rename `suggestions` -> `examples` diff --git a/.changeset/silly-chefs-marry.md b/.changeset/silly-chefs-marry.md new file mode 100644 index 0000000000000..dcfe465686011 --- /dev/null +++ b/.changeset/silly-chefs-marry.md @@ -0,0 +1,5 @@ +--- +"website": minor +--- + +feat:Refactoring playground diff --git a/.changeset/six-bobcats-cry.md b/.changeset/six-bobcats-cry.md new file mode 100644 index 0000000000000..908d89302a80a --- /dev/null +++ b/.changeset/six-bobcats-cry.md @@ -0,0 +1,12 @@ +--- +"@gradio/audio": minor +"@gradio/client": minor +"@gradio/core": minor +"@gradio/image": minor +"@gradio/statustracker": minor +"@gradio/video": minor +"gradio": minor +"gradio_client": minor +--- + +feat:Streaming inputs for 5.0 diff --git a/.changeset/six-melons-serve.md b/.changeset/six-melons-serve.md new file mode 100644 index 0000000000000..8ec78a87ca838 --- /dev/null +++ b/.changeset/six-melons-serve.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Deep equal check with hash diff --git a/.changeset/slow-nails-fetch.md b/.changeset/slow-nails-fetch.md new file mode 100644 index 0000000000000..a30b29a6092af --- /dev/null +++ b/.changeset/slow-nails-fetch.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Change caching to occur not at the creation of a `gr.Examples()` but when the Blocks is actually launched diff --git a/.changeset/smooth-fans-hide.md b/.changeset/smooth-fans-hide.md new file mode 100644 index 0000000000000..6af0a9c12d53f --- /dev/null +++ b/.changeset/smooth-fans-hide.md @@ -0,0 +1,7 @@ +--- +"@gradio/multimodaltextbox": minor +"@gradio/textbox": minor +"gradio": minor +--- + +feat:Disable the submit button and enter-key submit when the text is empty diff --git a/.changeset/smooth-places-walk.md b/.changeset/smooth-places-walk.md new file mode 100644 index 0000000000000..f6066cfa90a7e --- /dev/null +++ b/.changeset/smooth-places-walk.md @@ -0,0 +1,6 @@ +--- +"@gradio/dropdown": minor +"gradio": minor +--- + +feat:Fix single select dropdown diff --git a/.changeset/social-lizards-tickle.md b/.changeset/social-lizards-tickle.md new file mode 100644 index 0000000000000..db581ce7a6d4f --- /dev/null +++ b/.changeset/social-lizards-tickle.md @@ -0,0 +1,6 @@ +--- +"@gradio/model3d": patch +"gradio": patch +--- + +feat:Update babylon.js to `v7` for `gr.Model3D` diff --git a/.changeset/solid-chicken-love.md b/.changeset/solid-chicken-love.md new file mode 100644 index 0000000000000..7e04a9156de55 --- /dev/null +++ b/.changeset/solid-chicken-love.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:DNS resolver on ip check diff --git a/.changeset/some-clocks-think.md b/.changeset/some-clocks-think.md new file mode 100644 index 0000000000000..6d5a917d2ee32 --- /dev/null +++ b/.changeset/some-clocks-think.md @@ -0,0 +1,5 @@ +--- +"website": minor +--- + +feat:Fix gradio.js aws path diff --git a/.changeset/sour-ties-repair.md b/.changeset/sour-ties-repair.md new file mode 100644 index 0000000000000..79dd6c8459ff7 --- /dev/null +++ b/.changeset/sour-ties-repair.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Small changes to caching diff --git a/.changeset/strong-stars-count.md b/.changeset/strong-stars-count.md new file mode 100644 index 0000000000000..77d8a82fea547 --- /dev/null +++ b/.changeset/strong-stars-count.md @@ -0,0 +1,12 @@ +--- +"@gradio/atoms": minor +"@gradio/chatbot": minor +"@gradio/dataframe": minor +"@gradio/fileexplorer": minor +"@gradio/json": minor +"@gradio/markdown": minor +"@gradio/row": minor +"gradio": minor +--- + +feat:Standardize `height` across components and add `max_height` and `min_height` parameters where appropriate diff --git a/.changeset/stupid-eggs-sing.md b/.changeset/stupid-eggs-sing.md new file mode 100644 index 0000000000000..c28be48fd8d6a --- /dev/null +++ b/.changeset/stupid-eggs-sing.md @@ -0,0 +1,9 @@ +--- +"@gradio/audio": minor +"@gradio/client": minor +"@gradio/core": minor +"@self/app": minor +"gradio": minor +--- + +feat:fix SSR apps on spaces diff --git a/.changeset/stupid-memes-turn.md b/.changeset/stupid-memes-turn.md new file mode 100644 index 0000000000000..454d81d7a5ddb --- /dev/null +++ b/.changeset/stupid-memes-turn.md @@ -0,0 +1,6 @@ +--- +"@gradio/nativeplot": minor +"gradio": minor +--- + +feat:Hide x axis labels diff --git a/.changeset/stupid-tires-stare.md b/.changeset/stupid-tires-stare.md new file mode 100644 index 0000000000000..e57d563bd8425 --- /dev/null +++ b/.changeset/stupid-tires-stare.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Fix SSR mode flag with `mount_gradio_app` and revert changes to pytests diff --git a/.changeset/sweet-papers-fail.md b/.changeset/sweet-papers-fail.md new file mode 100644 index 0000000000000..e0c2115edeab8 --- /dev/null +++ b/.changeset/sweet-papers-fail.md @@ -0,0 +1,9 @@ +--- +"@gradio/core": minor +"@gradio/nativeplot": minor +"@gradio/plot": minor +"@self/spa": minor +"gradio": minor +--- + +feat:Fix plots diff --git a/.changeset/tall-moose-yawn.md b/.changeset/tall-moose-yawn.md new file mode 100644 index 0000000000000..9a55181cc2cc4 --- /dev/null +++ b/.changeset/tall-moose-yawn.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Further tweak to is_in_or_equal diff --git a/.changeset/tame-zoos-care.md b/.changeset/tame-zoos-care.md new file mode 100644 index 0000000000000..99a61cd484fe6 --- /dev/null +++ b/.changeset/tame-zoos-care.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Update object detection guide diff --git a/.changeset/tasty-tigers-mate.md b/.changeset/tasty-tigers-mate.md new file mode 100644 index 0000000000000..516e37721358e --- /dev/null +++ b/.changeset/tasty-tigers-mate.md @@ -0,0 +1,6 @@ +--- +"@gradio/chatbot": minor +"gradio": minor +--- + +feat:Fix `gr.Chatbot` panels layout diff --git a/.changeset/thick-geese-divide.md b/.changeset/thick-geese-divide.md new file mode 100644 index 0000000000000..f0c7ea1f2c1da --- /dev/null +++ b/.changeset/thick-geese-divide.md @@ -0,0 +1,9 @@ +--- +"@gradio/audio": minor +"@gradio/box": minor +"@self/build": minor +"gradio": minor +"website": minor +--- + +feat:5.0 merge take 2 diff --git a/.changeset/thick-wasps-love.md b/.changeset/thick-wasps-love.md new file mode 100644 index 0000000000000..c080d4ba6f7ce --- /dev/null +++ b/.changeset/thick-wasps-love.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Allow skipping an arbitrary number of output components, and also raise a warning if the number of output components does not match the number of values returned from a function diff --git a/.changeset/thin-boxes-matter.md b/.changeset/thin-boxes-matter.md new file mode 100644 index 0000000000000..fbd518932a621 --- /dev/null +++ b/.changeset/thin-boxes-matter.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Fix node process to run with correct server name diff --git a/.changeset/thirty-cloths-taste.md b/.changeset/thirty-cloths-taste.md new file mode 100644 index 0000000000000..0a6ac20c1be7b --- /dev/null +++ b/.changeset/thirty-cloths-taste.md @@ -0,0 +1,6 @@ +--- +"gradio": patch +"gradio_client": patch +--- + +fix:Check for `file_types` parameter in the backend diff --git a/.changeset/thirty-insects-unite.md b/.changeset/thirty-insects-unite.md new file mode 100644 index 0000000000000..a8d038a91c6d2 --- /dev/null +++ b/.changeset/thirty-insects-unite.md @@ -0,0 +1,6 @@ +--- +"@self/app": minor +"gradio": minor +--- + +feat:Fix favicon in ssr mode diff --git a/.changeset/three-toys-knock.md b/.changeset/three-toys-knock.md new file mode 100644 index 0000000000000..55fd74439bf2b --- /dev/null +++ b/.changeset/three-toys-knock.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Lite: HTTPX client improvement diff --git a/.changeset/tidy-bobcats-marry.md b/.changeset/tidy-bobcats-marry.md new file mode 100644 index 0000000000000..c85e9466b00bb --- /dev/null +++ b/.changeset/tidy-bobcats-marry.md @@ -0,0 +1,5 @@ +--- +"website": minor +--- + +feat:Improve UI on the Playground diff --git a/.changeset/tired-moons-tell.md b/.changeset/tired-moons-tell.md new file mode 100644 index 0000000000000..ff26a10af7700 --- /dev/null +++ b/.changeset/tired-moons-tell.md @@ -0,0 +1,5 @@ +--- +"website": minor +--- + +feat:File access security guide diff --git a/.changeset/tough-rooms-flash.md b/.changeset/tough-rooms-flash.md new file mode 100644 index 0000000000000..adcc29dd50ffc --- /dev/null +++ b/.changeset/tough-rooms-flash.md @@ -0,0 +1,6 @@ +--- +"@gradio/multimodaltextbox": patch +"gradio": patch +--- + +fix:Fix File Types for MultimodalTextbox diff --git a/.changeset/true-pigs-build.md b/.changeset/true-pigs-build.md new file mode 100644 index 0000000000000..48b0a65c6a4e9 --- /dev/null +++ b/.changeset/true-pigs-build.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Change dark mode color theme from `gray` to `zinc` diff --git a/.changeset/two-geckos-accept.md b/.changeset/two-geckos-accept.md new file mode 100644 index 0000000000000..ad41b3f479729 --- /dev/null +++ b/.changeset/two-geckos-accept.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Minor fixes to docs and a demo diff --git a/.changeset/warm-lemons-mate.md b/.changeset/warm-lemons-mate.md new file mode 100644 index 0000000000000..a54ac7e12e1d4 --- /dev/null +++ b/.changeset/warm-lemons-mate.md @@ -0,0 +1,8 @@ +--- +"@gradio/icons": minor +"@gradio/multimodaltextbox": minor +"@gradio/textbox": minor +"gradio": minor +--- + +feat:Match style of textbox stop button to submit button diff --git a/.changeset/weak-dryers-show.md b/.changeset/weak-dryers-show.md new file mode 100644 index 0000000000000..b973bd23536e8 --- /dev/null +++ b/.changeset/weak-dryers-show.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Add `css_paths` and `head_paths` parameters diff --git a/.changeset/weak-glasses-enter.md b/.changeset/weak-glasses-enter.md new file mode 100644 index 0000000000000..0e76c994d10d8 --- /dev/null +++ b/.changeset/weak-glasses-enter.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:replace ip addresses with machine-specific hashes diff --git a/.changeset/wet-memes-smash.md b/.changeset/wet-memes-smash.md new file mode 100644 index 0000000000000..be30e13fe3e4a --- /dev/null +++ b/.changeset/wet-memes-smash.md @@ -0,0 +1,6 @@ +--- +"@gradio/textbox": patch +"gradio": patch +--- + +fix:Set the color of placeholder in a disabled textbox to gray instead of black, and disable typing while a response is generating in `gr.ChatInterface`, allow `gr.MultimodalTextbox` to accept string values diff --git a/.changeset/wicked-snails-drum.md b/.changeset/wicked-snails-drum.md new file mode 100644 index 0000000000000..5837379d271c9 --- /dev/null +++ b/.changeset/wicked-snails-drum.md @@ -0,0 +1,7 @@ +--- +"@gradio/dropdown": minor +"@gradio/radio": minor +"gradio": minor +--- + +feat:Fix change triggers for dropdown and radio diff --git a/.changeset/wicked-swans-wink.md b/.changeset/wicked-swans-wink.md new file mode 100644 index 0000000000000..3152ab867c2ec --- /dev/null +++ b/.changeset/wicked-swans-wink.md @@ -0,0 +1,5 @@ +--- +"website": minor +--- + +feat:Add info about Powershell client diff --git a/.changeset/wide-dodos-peel.md b/.changeset/wide-dodos-peel.md new file mode 100644 index 0000000000000..acbdb30e58362 --- /dev/null +++ b/.changeset/wide-dodos-peel.md @@ -0,0 +1,30 @@ +--- +"@gradio/audio": minor +"@gradio/chatbot": minor +"@gradio/client": minor +"@gradio/core": minor +"@gradio/dataframe": minor +"@gradio/file": minor +"@gradio/gallery": minor +"@gradio/image": minor +"@gradio/imageeditor": minor +"@gradio/lite": minor +"@gradio/markdown": minor +"@gradio/model3d": minor +"@gradio/multimodaltextbox": minor +"@gradio/nativeplot": minor +"@gradio/simpleimage": minor +"@gradio/statustracker": minor +"@gradio/tabs": minor +"@gradio/upload": minor +"@gradio/uploadbutton": minor +"@gradio/utils": minor +"@gradio/video": minor +"@self/app": minor +"@self/build": minor +"@self/spa": minor +"gradio": minor +"gradio_client": minor +--- + +feat:Ssr part 2 diff --git a/.changeset/witty-rice-fix.md b/.changeset/witty-rice-fix.md new file mode 100644 index 0000000000000..884a1ac5bfc33 --- /dev/null +++ b/.changeset/witty-rice-fix.md @@ -0,0 +1,8 @@ +--- +"@gradio/client": minor +"@gradio/core": minor +"gradio": minor +"gradio_client": minor +--- + +feat:prefix api routes diff --git a/.changeset/witty-worlds-grin.md b/.changeset/witty-worlds-grin.md new file mode 100644 index 0000000000000..c539d062540d1 --- /dev/null +++ b/.changeset/witty-worlds-grin.md @@ -0,0 +1,5 @@ +--- +"website": minor +--- + +feat:Adds LLM to the Playground diff --git a/.changeset/young-candles-stare.md b/.changeset/young-candles-stare.md new file mode 100644 index 0000000000000..1d290441f42ed --- /dev/null +++ b/.changeset/young-candles-stare.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Do not attach `content_disposition_type = "attachment"` headers for files explicitly allowed by developer diff --git a/.changeset/young-ears-vanish.md b/.changeset/young-ears-vanish.md new file mode 100644 index 0000000000000..1988d752423ee --- /dev/null +++ b/.changeset/young-ears-vanish.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Refactor lazy caching diff --git a/.changeset/young-memes-shake.md b/.changeset/young-memes-shake.md new file mode 100644 index 0000000000000..0efd5807db1c5 --- /dev/null +++ b/.changeset/young-memes-shake.md @@ -0,0 +1,5 @@ +--- +"@self/app": minor +--- + +feat:Ensure media queries work for SSR mode diff --git a/.changeset/yummy-weeks-learn.md b/.changeset/yummy-weeks-learn.md new file mode 100644 index 0000000000000..edf5dcc1ba26e --- /dev/null +++ b/.changeset/yummy-weeks-learn.md @@ -0,0 +1,7 @@ +--- +"@gradio/code": minor +"@self/component-test": minor +"gradio": minor +--- + +feat:Added max lines and overflow scrollbar for `gr.Code` diff --git a/.config/.prettierignore b/.config/.prettierignore index f45be57177d56..b5411e40f32bb 100644 --- a/.config/.prettierignore +++ b/.config/.prettierignore @@ -21,7 +21,7 @@ ../js/lite/src/theme.css ../js/storybook/theme.css **/gradio_cached_examples/** -**/storybook-static/** +**/storybook-static/** **/.vscode/** sweep.yaml **/.vercel/** @@ -32,4 +32,4 @@ sweep.yaml **/venv/** ../js/app/src/api_docs/CodeSnippet.svelte ../js/app/src/api_docs/RecordingSnippet.svelte -.changeset/pre.json \ No newline at end of file +../.changeset/pre.json diff --git a/.config/copy_frontend.py b/.config/copy_frontend.py index 3b0e0793b2ac1..a62ac6711de8d 100644 --- a/.config/copy_frontend.py +++ b/.config/copy_frontend.py @@ -15,6 +15,9 @@ def copy_js_code(root: str | pathlib.Path): "playwright-report", "workbench", "tooltils", + "component-test", + "core", + "spa", ] for entry in (pathlib.Path(root) / "js").iterdir(): if ( @@ -46,7 +49,7 @@ def ignore(s, names): shutil.copytree( str(pathlib.Path(root) / "client" / "js"), str(pathlib.Path("gradio") / "_frontend_code" / "client"), - ignore=lambda d, names: ["node_modules"], + ignore=lambda d, names: ["node_modules", "test"], dirs_exist_ok=True, ) diff --git a/.config/eslint.config.js b/.config/eslint.config.js index b1fda3c0a5fd3..294bb9293e438 100644 --- a/.config/eslint.config.js +++ b/.config/eslint.config.js @@ -73,6 +73,7 @@ export default [ "js/spa/test/**/*", "**/*vite.config.ts", "**/_website/**/*", + "**/app/**/*", "**/_spaces-test/**/*", "**/preview/test/**/*", "**/component-test/**/*", diff --git a/.config/playwright.config.js b/.config/playwright.config.js index 3d17b35e0cfa3..6b5f971c16b67 100644 --- a/.config/playwright.config.js +++ b/.config/playwright.config.js @@ -14,8 +14,8 @@ const base = defineConfig({ ] } }, - expect: { timeout: 15000 }, - timeout: 30000, + expect: { timeout: 10000 }, + timeout: 10000, testMatch: /.*\.spec\.ts/, testDir: "..", workers: process.env.CI ? 1 : undefined, @@ -51,7 +51,8 @@ const lite = defineConfig(base, { "**/file_component_events.spec.ts", "**/kitchen_sink.spec.ts", "**/gallery_component_events.spec.ts", - "**/image_remote_url.spec.ts" // To detect the bugs on Lite fixed in https://github.com/gradio-app/gradio/pull/8011 and https://github.com/gradio-app/gradio/pull/8026 + "**/image_remote_url.spec.ts", // To detect the bugs on Lite fixed in https://github.com/gradio-app/gradio/pull/8011 and https://github.com/gradio-app/gradio/pull/8026 + "**/outbreak_forecast.spec.ts" // To test matplotlib on Lite ], workers: 1, retries: 3, diff --git a/.dockerignore b/.dockerignore index 916fc50c4d5d5..17e4ab1213839 100644 --- a/.dockerignore +++ b/.dockerignore @@ -21,6 +21,7 @@ gradio/templates/frontend/cdn *.db *.sqlite3 gradio/launches.json +gradio/hash_seed.txt # Tests .coverage diff --git a/.github/actions/install-all-deps/action.yml b/.github/actions/install-all-deps/action.yml index 9b60e434207c4..0381d9908eb07 100644 --- a/.github/actions/install-all-deps/action.yml +++ b/.github/actions/install-all-deps/action.yml @@ -13,7 +13,7 @@ inputs: default: "false" python_version: description: "Python version" - default: "3.8" + default: "3.10" os: description: "OS" default: "ubuntu-latest" diff --git a/.github/workflows/delete-stale-spaces.yml b/.github/workflows/delete-stale-spaces.yml index e1e02a227ddc0..0179405029434 100644 --- a/.github/workflows/delete-stale-spaces.yml +++ b/.github/workflows/delete-stale-spaces.yml @@ -25,7 +25,7 @@ jobs: - name: Install Python uses: actions/setup-python@v5 with: - python-version: "3.9" + python-version: "3.10" - name: Install pip run: python -m pip install pip wheel requests - name: Install Hub Client Library diff --git a/.github/workflows/previews-build.yml b/.github/workflows/previews-build.yml index ee7a156c61b14..bbedf95bb2b21 100644 --- a/.github/workflows/previews-build.yml +++ b/.github/workflows/previews-build.yml @@ -45,7 +45,7 @@ jobs: - name: install dependencies uses: "gradio-app/gradio/.github/actions/install-all-deps@main" with: - python_version: "3.9" + python_version: "3.10" build_lite: "true" - name: Package Lite NPM package working-directory: js/lite diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index bcb10713201f5..56aea79ae2189 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -5,6 +5,7 @@ on: push: branches: - main + - 5.0-dev env: CI: true @@ -23,6 +24,7 @@ jobs: - name: install dependencies uses: "gradio-app/gradio/.github/actions/install-all-deps@main" with: + python_version: "3.10" skip_build: "false" - name: Build packages run: | diff --git a/.github/workflows/storybook-build.yml b/.github/workflows/storybook-build.yml index a8538cfdfe018..00a2415d119de 100644 --- a/.github/workflows/storybook-build.yml +++ b/.github/workflows/storybook-build.yml @@ -52,6 +52,7 @@ jobs: - name: install dependencies uses: "gradio-app/gradio/.github/actions/install-all-deps@main" with: + python_version: "3.10" skip_build: "true" - name: build client run: pnpm --filter @gradio/client build diff --git a/.github/workflows/test-functional-lite.yml b/.github/workflows/test-functional-lite.yml index f10499b7baeb4..c58ce3c432f77 100644 --- a/.github/workflows/test-functional-lite.yml +++ b/.github/workflows/test-functional-lite.yml @@ -48,6 +48,8 @@ jobs: with: build_lite: true skip_build: true + python_version: "3.10" + test: true - run: pnpm exec playwright install chromium firefox - name: Run Lite E2E tests run: | diff --git a/.github/workflows/test-functional.yml b/.github/workflows/test-functional.yml index 8fed9b2533312..b74aea46a23dc 100644 --- a/.github/workflows/test-functional.yml +++ b/.github/workflows/test-functional.yml @@ -34,24 +34,32 @@ jobs: filter: "functional" token: ${{ secrets.GITHUB_TOKEN }} test: + name: "functional-test-SSR=${{ matrix.SSR }}" permissions: contents: read - name: "functional-test" runs-on: ubuntu-latest needs: changes + strategy: + matrix: + ssr: [true, false] if: needs.changes.outputs.should_run == 'true' steps: - uses: actions/checkout@v4 - name: install dependencies id: install_deps uses: "gradio-app/gradio/.github/actions/install-all-deps@main" - - name: install outbreak_forecast dependencies + with: + python_version: "3.10" + test: true + - name: install dependencies for specific tests run: | . venv/bin/activate python -m pip install -r demo/outbreak_forecast/requirements.txt - python -m pip install -r demo/gradio_pdf_demo/requirements.txt + python -m pip install -r demo/stream_video_out/requirements.txt - run: pnpm exec playwright install chromium firefox - name: run browser tests + env: + GRADIO_SSR_MODE: ${{ matrix.ssr }} run: | . venv/bin/activate CI=1 pnpm test:browser diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 07c06e823474a..bb493865fc97d 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -59,7 +59,7 @@ jobs: id: install_deps uses: "gradio-app/gradio/.github/actions/install-all-deps@main" with: - python_version: "3.8" + python_version: "3.10" os: ${{ matrix.os }} test: true - name: Lint diff --git a/.github/workflows/website-docs-build.yml b/.github/workflows/website-docs-build.yml index edf4ee9a34279..69a43d30e2f9b 100644 --- a/.github/workflows/website-docs-build.yml +++ b/.github/workflows/website-docs-build.yml @@ -51,8 +51,9 @@ jobs: - name: install dependencies uses: "gradio-app/gradio/.github/actions/install-all-deps@main" with: - skip_build: true python_version: "3.10" + skip_build: true + test: true # generated when installing deps - name: upload website json artifacts diff --git a/.gitignore b/.gitignore index b69ad66caf7d2..8886c5414862e 100644 --- a/.gitignore +++ b/.gitignore @@ -31,10 +31,10 @@ js/gradio-preview/test/* *.db *.sqlite3 gradio/launches.json -flagged/ -gradio_cached_examples/ -tmp.zip gradio/hash_seed.txt +.gradio/ + +tmp.zip # Tests .coverage @@ -56,6 +56,9 @@ demo/annotatedimage_component/*.png demo/fake_diffusion_with_gif/*.gif demo/cancel_events/cancel_events_output_log.txt demo/unload_event_test/output_log.txt +demo/stream_video_out/output_*.ts +demo/stream_video_out/output_*.mp4 +demo/stream_audio_out/*.mp3 # Etc .idea/* diff --git a/CHANGELOG.md b/CHANGELOG.md index 1973f9fdc6daa..9cee66c08936a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,210 @@ # gradio +## 5.0.0-beta.10 + +### Fixes + +- [#9600](https://github.com/gradio-app/gradio/pull/9600) [`9f71086`](https://github.com/gradio-app/gradio/commit/9f71086036339bfdd14f3aab29729041a01fc2d4) - Ensure undo/try shows for final bot message in gr.Chatbot. Thanks @hannahblair! + +## 5.0.0-beta.9 + +### Features + +- [#9437](https://github.com/gradio-app/gradio/pull/9437) [`c3d93be`](https://github.com/gradio-app/gradio/commit/c3d93bef94b9401747a363f7bad88a1d347d535b) - Adding new themes to Gradio 5.0. Thanks @allisonwhilden! +- [#9593](https://github.com/gradio-app/gradio/pull/9593) [`cc61fe7`](https://github.com/gradio-app/gradio/commit/cc61fe7047ac61779a61cce52c666400b9517daa) - Some more chatbot fixes. Thanks @dawoodkhan82! +- [#9583](https://github.com/gradio-app/gradio/pull/9583) [`b92a762`](https://github.com/gradio-app/gradio/commit/b92a7623e1ebd801587041e1ccca058a61058da9) - Disable the submit button and enter-key submit when the text is empty. Thanks @whitphx! +- [#9590](https://github.com/gradio-app/gradio/pull/9590) [`e853c41`](https://github.com/gradio-app/gradio/commit/e853c413583d91186aef3aceb0849d0ec0494834) - SSR e2e + fixes. Thanks @pngwn! +- [#9591](https://github.com/gradio-app/gradio/pull/9591) [`139152f`](https://github.com/gradio-app/gradio/commit/139152fe18bfdc5644a59d7bdfec9891b644f0bf) - Equal height in row false by default. Thanks @aliabid94! +- [#9589](https://github.com/gradio-app/gradio/pull/9589) [`477f45c`](https://github.com/gradio-app/gradio/commit/477f45cb43be957684eb392e3d62c09490c22391) - Only move files to the cache that have a meta key. Thanks @freddyaboulton! +- [#9584](https://github.com/gradio-app/gradio/pull/9584) [`6f8fa54`](https://github.com/gradio-app/gradio/commit/6f8fa5405528ad684084e4c7facfe36624fe7784) - Chat Interface Multimodal Fix & Fallback to `gr.Examples()`. Thanks @dawoodkhan82! +- [#9482](https://github.com/gradio-app/gradio/pull/9482) [`bd6c5f2`](https://github.com/gradio-app/gradio/commit/bd6c5f237b0631d86273c7684c3bf2b1011992a3) - Fix custom component CLI on main/5.0. Thanks @freddyaboulton! +- [#9601](https://github.com/gradio-app/gradio/pull/9601) [`c078892`](https://github.com/gradio-app/gradio/commit/c07889223cb64661b17560b707b977248809470a) - Tweak gr.Dataframe menu UX. Thanks @hannahblair! +- [#9575](https://github.com/gradio-app/gradio/pull/9575) [`4ec2feb`](https://github.com/gradio-app/gradio/commit/4ec2feb04e452d2c77482c09543c59948567be67) - Update gr.Dataframe UI with action popover. Thanks @hannahblair! +- [#9582](https://github.com/gradio-app/gradio/pull/9582) [`43a7f42`](https://github.com/gradio-app/gradio/commit/43a7f420d8ac34c7f7fa71d6e630a4c8618d3780) - Chatbot autoscroll. Thanks @whitphx! +- [#9598](https://github.com/gradio-app/gradio/pull/9598) [`ffc33fa`](https://github.com/gradio-app/gradio/commit/ffc33facaec1bcc92add5892afb86b7b5ba037d4) - Fix markdown code copy/check button in gr.Chatbot. Thanks @hannahblair! +- [#9576](https://github.com/gradio-app/gradio/pull/9576) [`430a26a`](https://github.com/gradio-app/gradio/commit/430a26a4fbcbabb5e9ddb6173bf658a00960e88e) - Fix reload mode. Thanks @freddyaboulton! +- [#9580](https://github.com/gradio-app/gradio/pull/9580) [`a9ac396`](https://github.com/gradio-app/gradio/commit/a9ac396f19218eafc441b7614289b4828cde853d) - Deep equal check with hash. Thanks @aliabid94! +- [#9499](https://github.com/gradio-app/gradio/pull/9499) [`17e6c84`](https://github.com/gradio-app/gradio/commit/17e6c84d6b11651cd03c1d47caec85de62030ea0) - Fix `gr.Chatbot` panels layout. Thanks @hannahblair! +- [#9592](https://github.com/gradio-app/gradio/pull/9592) [`24fe222`](https://github.com/gradio-app/gradio/commit/24fe222fd17583d04dd31aebf60b649224e8382f) - Fix favicon in ssr mode. Thanks @freddyaboulton! + +## 5.0.0-beta.8 + +### Features + +- [#9550](https://github.com/gradio-app/gradio/pull/9550) [`b0fedd7`](https://github.com/gradio-app/gradio/commit/b0fedd7ef718c0df797ec277db7e773543a70a4d) - Fix most flaky Python tests in `5.0-dev` branch. Thanks @abidlabs! +- [#9577](https://github.com/gradio-app/gradio/pull/9577) [`9f532e0`](https://github.com/gradio-app/gradio/commit/9f532e03a6b91b9a5592152c19b9b2611774cae7) - Equal height columns. Thanks @aliabid94! +- [#9570](https://github.com/gradio-app/gradio/pull/9570) [`e0ee3d5`](https://github.com/gradio-app/gradio/commit/e0ee3d5bb1020744cefa1abf66009fbf07da8cbe) - Update gr.ColorPicker UI. Thanks @hannahblair! +- [#9483](https://github.com/gradio-app/gradio/pull/9483) [`8dc7c12`](https://github.com/gradio-app/gradio/commit/8dc7c12389311b60efcde1b9d3e3668a34d2dc00) - Send Streaming data over Websocket if possible. Also support base64 output format for images. Thanks @freddyaboulton! +- [#9521](https://github.com/gradio-app/gradio/pull/9521) [`06ef22e`](https://github.com/gradio-app/gradio/commit/06ef22e83cdd27e7afb381396d153d9db3dea16e) - Allow `info=` to render markdown. Thanks @dawoodkhan82! +- [#9571](https://github.com/gradio-app/gradio/pull/9571) [`148345d`](https://github.com/gradio-app/gradio/commit/148345d107763754710505281ad70368ebc6f3ec) - Fix chatinterface embedding height issues. Thanks @aliabid94! +- [#9525](https://github.com/gradio-app/gradio/pull/9525) [`7c367b6`](https://github.com/gradio-app/gradio/commit/7c367b6cf0472d478671b7c7476e892b4c61c812) - Fix cut off in gr.ImageEditor. Thanks @hannahblair! +- [#9522](https://github.com/gradio-app/gradio/pull/9522) [`3b71ed2`](https://github.com/gradio-app/gradio/commit/3b71ed21b7e2ecb67eb68fb946d25565169cb4df) - Api info fix. Thanks @freddyaboulton! +- [#9508](https://github.com/gradio-app/gradio/pull/9508) [`b260389`](https://github.com/gradio-app/gradio/commit/b26038932a64f024bd149a56b1539e9e75802f29) - Change caching to occur not at the creation of a `gr.Examples()` but when the Blocks is actually launched. Thanks @aliabid94! +- [#9524](https://github.com/gradio-app/gradio/pull/9524) [`cf39640`](https://github.com/gradio-app/gradio/commit/cf396404ec8052c7fbf5f045955028bc54a274a5) - Add `css_paths` and `head_paths` parameters. Thanks @abidlabs! + +## 5.0.0-beta.7 + +### Features + +- [#9546](https://github.com/gradio-app/gradio/pull/9546) [`b82aa6f`](https://github.com/gradio-app/gradio/commit/b82aa6f115f6ad9a9690bdecb0fe63c162dc75c0) - Disable sagemaker_check() for now. Thanks @vmatt! +- [#9545](https://github.com/gradio-app/gradio/pull/9545) [`098a009`](https://github.com/gradio-app/gradio/commit/098a009c05243967bd9e5acc28864eb40a135f6a) - Add Jinja2 language to Code component. Thanks @CISC! +- [#9526](https://github.com/gradio-app/gradio/pull/9526) [`f60bb68`](https://github.com/gradio-app/gradio/commit/f60bb68f52cd0863d9087b3fbc56635e297adef1) - Fix single select dropdown. Thanks @whitphx! +- [#9497](https://github.com/gradio-app/gradio/pull/9497) [`d826faa`](https://github.com/gradio-app/gradio/commit/d826faa8c2584cf0772d4e66b4073e33b83f3a00) - Hide x axis labels. Thanks @aliabid94! + +## 5.0.0-beta.6 + +### Features + +- [#9460](https://github.com/gradio-app/gradio/pull/9460) [`7352a89`](https://github.com/gradio-app/gradio/commit/7352a89722da91461c32fd33588531f3edce9c48) - Playground requirements tab. Thanks @whitphx! +- [#9496](https://github.com/gradio-app/gradio/pull/9496) [`1647ebd`](https://github.com/gradio-app/gradio/commit/1647ebddc3e2ed6fc143a62629409e32afcc5801) - UI theme fixes. Thanks @aliabid94! +- [#9450](https://github.com/gradio-app/gradio/pull/9450) [`991883e`](https://github.com/gradio-app/gradio/commit/991883e217dc0a3512b3ae3245378812f373b8db) - Improve `gr.Code`. Thanks @hannahblair! +- [#9504](https://github.com/gradio-app/gradio/pull/9504) [`d054262`](https://github.com/gradio-app/gradio/commit/d054262f611d5f1eb1a1c936db7152347a891f8e) - Centre components within `Block` when height and width are set. Thanks @hannahblair! +- [#9481](https://github.com/gradio-app/gradio/pull/9481) [`2510a6e`](https://github.com/gradio-app/gradio/commit/2510a6e978a49432d7820e9518f164a70cf8acc8) - Fix `slider-color` var. Thanks @hannahblair! +- [#9495](https://github.com/gradio-app/gradio/pull/9495) [`488ef76`](https://github.com/gradio-app/gradio/commit/488ef768ccc5008401f7e0aa4c357b93311190ff) - Fix custom component CLI unit tests. Thanks @freddyaboulton! +- [#9488](https://github.com/gradio-app/gradio/pull/9488) [`4e6a47f`](https://github.com/gradio-app/gradio/commit/4e6a47f5a29cb885d5bc01a79ca4cc45d298f0b1) - Fixes: Chatbot examples for custom chatbot + rename `suggestions` -> `examples`. Thanks @dawoodkhan82! +- [#9506](https://github.com/gradio-app/gradio/pull/9506) [`861f5e9`](https://github.com/gradio-app/gradio/commit/861f5e97ffde5f59e42cfa213364f19e84d799fd) - Fix node process to run with correct server name. Thanks @abidlabs! +- [#9493](https://github.com/gradio-app/gradio/pull/9493) [`c307a0c`](https://github.com/gradio-app/gradio/commit/c307a0c9b81b66bde21f0af4a9f7d5726ea7a30d) - Minor fixes to docs and a demo. Thanks @abidlabs! +- [#9519](https://github.com/gradio-app/gradio/pull/9519) [`0ab6ac5`](https://github.com/gradio-app/gradio/commit/0ab6ac5dc01b69e4f2462d00c4910f3354441227) - Fix change triggers for dropdown and radio. Thanks @dawoodkhan82! + +### Fixes + +- [#9431](https://github.com/gradio-app/gradio/pull/9431) [`7065e11`](https://github.com/gradio-app/gradio/commit/7065e11e465fcdfe14688bd6ca2aeed0a25fcc36) - Check for `file_types` parameter in the backend. Thanks @dawoodkhan82! + +## 5.0.0-beta.5 + +### Features + +- [#9470](https://github.com/gradio-app/gradio/pull/9470) [`b406139`](https://github.com/gradio-app/gradio/commit/b40613928c0e0d224ff2b4db5d0b45727c178560) - Add support for 3rd party providers to `gr.load`, and provide a better UX for conversational models. Thanks @abidlabs! +- [#9383](https://github.com/gradio-app/gradio/pull/9383) [`30d13ac`](https://github.com/gradio-app/gradio/commit/30d13ac8a932d53abfd236d3e4e845570480e701) - Pre/post-processing download requests. Thanks @aliabid94! +- [#9464](https://github.com/gradio-app/gradio/pull/9464) [`3ac5d9c`](https://github.com/gradio-app/gradio/commit/3ac5d9c972576d82bc365a6532e6e12f55441a30) - Fix plots. Thanks @pngwn! + +## 5.0.0-beta.4 + +### Features + +- [#9419](https://github.com/gradio-app/gradio/pull/9419) [`018c140`](https://github.com/gradio-app/gradio/commit/018c140ef86cacc8211df05b57b26924dab7fa08) - Start/stop recoding from the backend. Add guide on conversational chatbots. Thanks @freddyaboulton! +- [#9453](https://github.com/gradio-app/gradio/pull/9453) [`56dbf77`](https://github.com/gradio-app/gradio/commit/56dbf77671012015efd3c745bc33e5074ab7158f) - Chatbot bug fixes. Thanks @dawoodkhan82! +- [#9448](https://github.com/gradio-app/gradio/pull/9448) [`e7a415b`](https://github.com/gradio-app/gradio/commit/e7a415b1ef923d31754b302b3cd8b5e71a9ea446) - Use or `pathlib.Path` objects to indicate filepaths for `css`, `js`, and `head` parameters. Thanks @abidlabs! +- [#9469](https://github.com/gradio-app/gradio/pull/9469) [`f7c3396`](https://github.com/gradio-app/gradio/commit/f7c3396f55a5b8364d3880a29d766bd092d7f840) - Fix. Triggered dataframe change event for header change. Thanks @Joodith! +- [#9447](https://github.com/gradio-app/gradio/pull/9447) [`afbd8e7`](https://github.com/gradio-app/gradio/commit/afbd8e7a2faadeea5a431f67a753dea14dd5829f) - Reduce analytics that are collected. Thanks @abidlabs! +- [#9438](https://github.com/gradio-app/gradio/pull/9438) [`8f469e1`](https://github.com/gradio-app/gradio/commit/8f469e1d1d9d1636e4dedfb1c09e76a5e2ba8d4e) - Small changes to caching. Thanks @abidlabs! +- [#9446](https://github.com/gradio-app/gradio/pull/9446) [`0c8fafb`](https://github.com/gradio-app/gradio/commit/0c8fafb31df7ef3ef5812d6efb47ca342a3bad3c) - Fix SSR mode flag with `mount_gradio_app` and revert changes to pytests. Thanks @abidlabs! +- [#9456](https://github.com/gradio-app/gradio/pull/9456) [`4d75f02`](https://github.com/gradio-app/gradio/commit/4d75f029aacf0561f50c7afebbe8d54da2cb0af4) - Update object detection guide. Thanks @freddyaboulton! +- [#9406](https://github.com/gradio-app/gradio/pull/9406) [`74f3b9d`](https://github.com/gradio-app/gradio/commit/74f3b9ded1ce0ff6f2f0dbcb113edd64b7bb8f60) - Allow skipping an arbitrary number of output components, and also raise a warning if the number of output components does not match the number of values returned from a function. Thanks @abidlabs! +- [#9413](https://github.com/gradio-app/gradio/pull/9413) [`a16787a`](https://github.com/gradio-app/gradio/commit/a16787abdc8d507d75b986dbe882d21be4540908) - Lite: HTTPX client improvement. Thanks @whitphx! + +## 5.0.0-beta.3 + +### Features + +- [#9376](https://github.com/gradio-app/gradio/pull/9376) [`d92c26f`](https://github.com/gradio-app/gradio/commit/d92c26fe63f6b88e16c356cb84c55b61f795db73) - Small fixes to `gr.Dataframe` and chatbot docs. Thanks @abidlabs! +- [#9412](https://github.com/gradio-app/gradio/pull/9412) [`c2c2fd9`](https://github.com/gradio-app/gradio/commit/c2c2fd989348f826566773c07c0e0bda200199ff) - fix SSR apps on spaces. Thanks @pngwn! + +### Fixes + +- [#9405](https://github.com/gradio-app/gradio/pull/9405) [`bf27ff4`](https://github.com/gradio-app/gradio/commit/bf27ff4ac8ada33ea03dd26d5c1c1115aa1f318a) - Center icon in button when no text is present. Thanks @abidlabs! + +## 5.0.0-beta.2 + +### Features + +- [#9359](https://github.com/gradio-app/gradio/pull/9359) [`50c3a7f`](https://github.com/gradio-app/gradio/commit/50c3a7f1541f632853a96f3d979ebeef6ad82869) - Small tweak to how thoughts are shown in `gr.Chatbot`. Thanks @abidlabs! +- [#9323](https://github.com/gradio-app/gradio/pull/9323) [`06babda`](https://github.com/gradio-app/gradio/commit/06babda0395fd3fbd323c1c3cb33704ecfd6deb0) - Disable liking user message in chatbot by default but make it configurable. Thanks @freddyaboulton! +- [#8966](https://github.com/gradio-app/gradio/pull/8966) [`8e52b6a`](https://github.com/gradio-app/gradio/commit/8e52b6a3e75957462bc7fdbf6ff9c280084d5f08) - Chatbot Examples. Thanks @dawoodkhan82! +- [#9261](https://github.com/gradio-app/gradio/pull/9261) [`73647a0`](https://github.com/gradio-app/gradio/commit/73647a07b0439efabe3dd218ff6c366ffa3b84a0) - Move icons into `IconButtonWrapper`. Thanks @hannahblair! +- [#9316](https://github.com/gradio-app/gradio/pull/9316) [`4338f29`](https://github.com/gradio-app/gradio/commit/4338f29bce2430d765f20070d1823ecc19d940cb) - 9227 chatinterface retry bug. Thanks @freddyaboulton! +- [#9313](https://github.com/gradio-app/gradio/pull/9313) [`1fef9d9`](https://github.com/gradio-app/gradio/commit/1fef9d9a26f0ebce4de18c486702661f6539b1c6) - Standardize `height` across components and add `max_height` and `min_height` parameters where appropriate. Thanks @abidlabs! +- [#9339](https://github.com/gradio-app/gradio/pull/9339) [`4c8c6f2`](https://github.com/gradio-app/gradio/commit/4c8c6f2fe603081941c5fdc43f48a0632b9f31ad) - Ssr part 2. Thanks @pngwn! +- [#9250](https://github.com/gradio-app/gradio/pull/9250) [`350b0a5`](https://github.com/gradio-app/gradio/commit/350b0a5cafb9176f914f62e7c90de51d4352cc77) - Improve Icon Button consistency. Thanks @hannahblair! +- [#9269](https://github.com/gradio-app/gradio/pull/9269) [`e05f568`](https://github.com/gradio-app/gradio/commit/e05f568f47e9fa33ef91dbbe5cc477d32762bc36) - Fix reload mode and streaming in 5.0 dev. Thanks @freddyaboulton! +- [#9356](https://github.com/gradio-app/gradio/pull/9356) [`1daf259`](https://github.com/gradio-app/gradio/commit/1daf259b52d0b1ce16d916ff25a15d322b51ecf5) - Use `container` param in `gr.Markdown`. Thanks @hannahblair! +- [#9321](https://github.com/gradio-app/gradio/pull/9321) [`81a356d`](https://github.com/gradio-app/gradio/commit/81a356d802f95b6a9a7aeb3759e05e47febbd0d3) - Remove two dependencies: `importlib_resources` and `urllib3` (if not in Wasm). Thanks @abidlabs! +- [#9253](https://github.com/gradio-app/gradio/pull/9253) [`99648ec`](https://github.com/gradio-app/gradio/commit/99648ec7c4443e74799941e47b0015ac9ca581e1) - Adds ability to block event trigger when file is uploading. Thanks @dawoodkhan82! +- [#9341](https://github.com/gradio-app/gradio/pull/9341) [`02369b3`](https://github.com/gradio-app/gradio/commit/02369b3159df72b2f4a36ce5684574eb65065731) - Improve is_in_or_equal and fuzzer. Thanks @freddyaboulton! +- [#9333](https://github.com/gradio-app/gradio/pull/9333) [`5b86e2f`](https://github.com/gradio-app/gradio/commit/5b86e2f2a2bedcde79e425fd470473bc1fd6ae2e) - Enhance Lite E2E tests and fix a networking problem on Lite. Thanks @whitphx! +- [#9338](https://github.com/gradio-app/gradio/pull/9338) [`19f6b31`](https://github.com/gradio-app/gradio/commit/19f6b31a73c6114093cbb5a7e69131175efa8a79) - Fix typo in `tunneling.py`. Thanks @abidlabs! +- [#9336](https://github.com/gradio-app/gradio/pull/9336) [`736046f`](https://github.com/gradio-app/gradio/commit/736046f17db073b56023b5e077b0ae5ae4adeb02) - Object Detection From Webcam Stream Guide. Thanks @freddyaboulton! +- [#9300](https://github.com/gradio-app/gradio/pull/9300) [`6309a48`](https://github.com/gradio-app/gradio/commit/6309a48e3a89a13137ec9d61c1c722eb59b8e3dc) - Raise ChecksumMismatchError. Thanks @abidlabs! +- [#9373](https://github.com/gradio-app/gradio/pull/9373) [`6443062`](https://github.com/gradio-app/gradio/commit/64430620449ab5b19ea32b02ab82a2d1804dcb2e) - Fix Cached Examples for Streamed Media. Thanks @freddyaboulton! +- [#9367](https://github.com/gradio-app/gradio/pull/9367) [`1c94328`](https://github.com/gradio-app/gradio/commit/1c94328cfe6ce0676c3850f5e9da5bcabf9ee570) - add local fonts and update themes. Thanks @hannahblair! +- [#9335](https://github.com/gradio-app/gradio/pull/9335) [`b543465`](https://github.com/gradio-app/gradio/commit/b543465d06d7d1b399c4d0755da05e022611a97f) - Remove lite/theme.css from the Git-managed file tree. Thanks @whitphx! +- [#9358](https://github.com/gradio-app/gradio/pull/9358) [`16c0485`](https://github.com/gradio-app/gradio/commit/16c0485a32be324a5f1c7252f5ce09fff79f7d67) - Small tweaks to improve the DX for the "tuples"/"messages" argument in `gr.Chatbot`. Thanks @abidlabs! +- [#9303](https://github.com/gradio-app/gradio/pull/9303) [`34f46b0`](https://github.com/gradio-app/gradio/commit/34f46b0512fe30b4db9c9901cb23987d3cecc48d) - Dont move files to cache automatically in chatbot postprocess. Thanks @freddyaboulton! +- [#9363](https://github.com/gradio-app/gradio/pull/9363) [`3ad28c7`](https://github.com/gradio-app/gradio/commit/3ad28c7e310e8589e0c53b7efee8031e129bece8) - Prevent HTML and Markdown height changing when status is hidden. Thanks @hannahblair! +- [#9260](https://github.com/gradio-app/gradio/pull/9260) [`d47dd1f`](https://github.com/gradio-app/gradio/commit/d47dd1f8417a878ef731d2eeabf60f3069289dee) - Fix overflowing markdown in Chatbot. Thanks @hannahblair! +- [#9320](https://github.com/gradio-app/gradio/pull/9320) [`98cbcae`](https://github.com/gradio-app/gradio/commit/98cbcaef827de7267462ccba180c7b2ffb1e825d) - chore: fix docs style. Thanks @imba-tjd! +- [#9314](https://github.com/gradio-app/gradio/pull/9314) [`299879d`](https://github.com/gradio-app/gradio/commit/299879d02adf3bacb012c76a467aaf5df5b31493) - Make `gr.Image` preprocessing more efficient. Thanks @abidlabs! +- [#9371](https://github.com/gradio-app/gradio/pull/9371) [`7bf3e99`](https://github.com/gradio-app/gradio/commit/7bf3e9989392b7edcdc18c1d840fb8130b15040e) - Fix `gr.ImageEditor` toolbar cutoff. Thanks @hannahblair! +- [#9306](https://github.com/gradio-app/gradio/pull/9306) [`f3f0fef`](https://github.com/gradio-app/gradio/commit/f3f0fef199c7779aac9aaef794dd4af1861ce50f) - Fixes race condition in `update_root_in_config`. Thanks @abidlabs! +- [#9312](https://github.com/gradio-app/gradio/pull/9312) [`7c0780b`](https://github.com/gradio-app/gradio/commit/7c0780b5677f8a1c05b9d2eee136e982917829b8) - Proposal: remove `gr.make_waveform` and remove `matplotlib` as a dependency. Thanks @abidlabs! +- [#9339](https://github.com/gradio-app/gradio/pull/9339) [`4c8c6f2`](https://github.com/gradio-app/gradio/commit/4c8c6f2fe603081941c5fdc43f48a0632b9f31ad) - Tweaks to SSR mode. Thanks @pngwn! +- [#9270](https://github.com/gradio-app/gradio/pull/9270) [`b0b8500`](https://github.com/gradio-app/gradio/commit/b0b850081d8d10c1287b5d179b8db37482e21c8d) - Fix stop recording button colors. Thanks @freddyaboulton! +- [#9268](https://github.com/gradio-app/gradio/pull/9268) [`c469d40`](https://github.com/gradio-app/gradio/commit/c469d40b0d9d807abb9fa92c67069c08833ce6bc) - Raise error instead of warning if checksums for binary do not match. Thanks @abidlabs! +- [#9377](https://github.com/gradio-app/gradio/pull/9377) [`618e9fe`](https://github.com/gradio-app/gradio/commit/618e9fe941744cef4a4a06eb7840763c64b15e32) - Update babylon.js to `v7` for `gr.Model3D`. Thanks @abidlabs! +- [#9282](https://github.com/gradio-app/gradio/pull/9282) [`54ea485`](https://github.com/gradio-app/gradio/commit/54ea485ba92165be96137ae35e2d3f2fc62a2873) - Further tweak to is_in_or_equal. Thanks @freddyaboulton! +- [#9326](https://github.com/gradio-app/gradio/pull/9326) [`7afb9a1`](https://github.com/gradio-app/gradio/commit/7afb9a14fa64310eb8b70f43a3bad373e46e36c1) - 5.0 merge take 2. Thanks @pngwn! +- [#9280](https://github.com/gradio-app/gradio/pull/9280) [`7122420`](https://github.com/gradio-app/gradio/commit/712242047fde3a594dfde7f48a44c7ea16239dc8) - Match style of textbox stop button to submit button. Thanks @freddyaboulton! +- [#9348](https://github.com/gradio-app/gradio/pull/9348) [`61f794b`](https://github.com/gradio-app/gradio/commit/61f794bba78ef59e55beca0ba743548f33f3a3c3) - Do not attach `content_disposition_type = "attachment"` headers for files explicitly allowed by developer. Thanks @abidlabs! +- [#9361](https://github.com/gradio-app/gradio/pull/9361) [`5eb860f`](https://github.com/gradio-app/gradio/commit/5eb860f739a187217ded1fc569676e0edd16bab0) - Refactor lazy caching. Thanks @abidlabs! +- [#9311](https://github.com/gradio-app/gradio/pull/9311) [`c4afdcd`](https://github.com/gradio-app/gradio/commit/c4afdcdb1f1f80c5f95ab45d527236e9364ace82) - Added max lines and overflow scrollbar for `gr.Code`. Thanks @micpst! + +### Fixes + +- [#9299](https://github.com/gradio-app/gradio/pull/9299) [`aa35b07`](https://github.com/gradio-app/gradio/commit/aa35b0788e613fdd45446d267513e6f94fa208ea) - Trigger state change event on iterators. Thanks @freddyaboulton! +- [#9393](https://github.com/gradio-app/gradio/pull/9393) [`53ed0f0`](https://github.com/gradio-app/gradio/commit/53ed0f030551ad876a1fea28a9db9015ba6ec33e) - Fix File Types for MultimodalTextbox. Thanks @dawoodkhan82! +- [#9328](https://github.com/gradio-app/gradio/pull/9328) [`6a7f631`](https://github.com/gradio-app/gradio/commit/6a7f63180b4105622298dd742d6a0d25216ea629) - Set the color of placeholder in a disabled textbox to gray instead of black, and disable typing while a response is generating in `gr.ChatInterface`, allow `gr.MultimodalTextbox` to accept string values. Thanks @abidlabs! + +## 5.0.0-beta.1 + +### Features + +- [#9235](https://github.com/gradio-app/gradio/pull/9235) [`f8b411f`](https://github.com/gradio-app/gradio/commit/f8b411fe282ff0316ed4abebc0a043b044bf4dd9) - Built-in submit and stop buttons in `gr.ChatInterface(multimodal=False)`, adding `submit_btn` and `stop_btn` props to `gr.Textbox()` and `gr.MultimodalText()`. Thanks @whitphx! +- [#9201](https://github.com/gradio-app/gradio/pull/9201) [`5492e74`](https://github.com/gradio-app/gradio/commit/5492e742b1f1fa618208cce523f50ad22a6e86f1) - Move buttons from chat_interface into Chatbot. Thanks @freddyaboulton! +- [#9199](https://github.com/gradio-app/gradio/pull/9199) [`3175c7a`](https://github.com/gradio-app/gradio/commit/3175c7aebc6ad2466d31d6949580f5a3cb4cd698) - Redesign `gr.Tabs()`. Thanks @hannahblair! +- [#9167](https://github.com/gradio-app/gradio/pull/9167) [`e9e737e`](https://github.com/gradio-app/gradio/commit/e9e737eeeb61d0bbf43277c75b6ffed8b34aa445) - Redesign `gr.Button()`. Thanks @hannahblair! +- [#9218](https://github.com/gradio-app/gradio/pull/9218) [`4a832f4`](https://github.com/gradio-app/gradio/commit/4a832f4b0a8f35a10bc2301a56b711519e85034b) - Adds TLS to FRP tunnel. Thanks @abidlabs! +- [#9166](https://github.com/gradio-app/gradio/pull/9166) [`8a75559`](https://github.com/gradio-app/gradio/commit/8a755596317c59bfb10803edc0f8642e62d7cecd) - Minor changes to flagging for 5.0. Thanks @abidlabs! +- [#9254](https://github.com/gradio-app/gradio/pull/9254) [`03f3735`](https://github.com/gradio-app/gradio/commit/03f3735fba1fd4f1978b5431af9e67de3b6e7945) - Adds a "huggingface" button variant, and makes it the default for `gr.LoginButton` and `gr.DuplicateButton`. Thanks @abidlabs! +- [#9187](https://github.com/gradio-app/gradio/pull/9187) [`5bf00b7`](https://github.com/gradio-app/gradio/commit/5bf00b7524ebf399b48719120a49d15bb21bd65c) - make all component SSR compatible. Thanks @pngwn! +- [#9236](https://github.com/gradio-app/gradio/pull/9236) [`dd8e2e3`](https://github.com/gradio-app/gradio/commit/dd8e2e32c6c1ec42e13c55af870d0da291117dd3) - Improve button consistency across light/dark mode. Thanks @hannahblair! +- [#9225](https://github.com/gradio-app/gradio/pull/9225) [`5f2e047`](https://github.com/gradio-app/gradio/commit/5f2e047c2ce114cebc95d5dba16c4df10fa73eb1) - Add a 'None' option to the gradio.Image component to disable image_m…. Thanks @GeeMoose! +- [#9204](https://github.com/gradio-app/gradio/pull/9204) [`3c73f00`](https://github.com/gradio-app/gradio/commit/3c73f00e3016b16917ebfe0bad390f2dff683457) - 🔡 Update default core Gradio font. Thanks @hannahblair! +- [#9245](https://github.com/gradio-app/gradio/pull/9245) [`c8cfe93`](https://github.com/gradio-app/gradio/commit/c8cfe93c0971d904c29da60410952fd20c9439c0) - Lighten secondary button grey fill. Thanks @hannahblair! +- [#9246](https://github.com/gradio-app/gradio/pull/9246) [`38cf712`](https://github.com/gradio-app/gradio/commit/38cf71234bf57fe9da6eea2d32b1d6e7ef35c700) - Stop using `multiprocessing` in `flagging.CSVLogger` on Lite v5. Thanks @whitphx! +- [#9216](https://github.com/gradio-app/gradio/pull/9216) [`e137b30`](https://github.com/gradio-app/gradio/commit/e137b30b1a53ca32d3cd809d31e97d5d54a4e479) - Decrease component radii and remove input shadows. Thanks @hannahblair! +- [#9200](https://github.com/gradio-app/gradio/pull/9200) [`2e179d3`](https://github.com/gradio-app/gradio/commit/2e179d35be6ed60a5a6bfc7303178d63e41781ad) - prefix api routes. Thanks @pngwn! + +## 5.0.0-beta.0 + +### Features + +- [#9069](https://github.com/gradio-app/gradio/pull/9069) [`f9f84bf`](https://github.com/gradio-app/gradio/commit/f9f84bfe7064634164501d1023591b415ad2a03b) - No token passed by default in `gr.load()`. Thanks @abidlabs! +- [#9160](https://github.com/gradio-app/gradio/pull/9160) [`8f5a895`](https://github.com/gradio-app/gradio/commit/8f5a8950c949996f7c439b11a7aa40edda3e8562) - Fix native plot lite demos. Thanks @aliabd! +- [#9197](https://github.com/gradio-app/gradio/pull/9197) [`6773c4d`](https://github.com/gradio-app/gradio/commit/6773c4da22f957a11b2a07f032ce13c7b4c94f8c) - Redesign `gr.Slider()`. Thanks @hannahblair! +- [#9140](https://github.com/gradio-app/gradio/pull/9140) [`c054ec8`](https://github.com/gradio-app/gradio/commit/c054ec85e49ab102b15afd305583ee394151d16c) - Drop python 3.8 and 3.9. Thanks @abidlabs! +- [#8978](https://github.com/gradio-app/gradio/pull/8978) [`fe9d1cb`](https://github.com/gradio-app/gradio/commit/fe9d1cb0870a5b07d51e8cb05401af47efbacd13) - Improve url downloads for file objects. Thanks @aliabid94! +- [#8810](https://github.com/gradio-app/gradio/pull/8810) [`4cf8af9`](https://github.com/gradio-app/gradio/commit/4cf8af9407a44ee914e0be567da38b29f00eff8e) - Prevent invalid values from being submitted to dropdown, etc. Thanks @abidlabs! +- [#9194](https://github.com/gradio-app/gradio/pull/9194) [`20c0836`](https://github.com/gradio-app/gradio/commit/20c0836ed0e0698dbc81d2a4bda04363fd857334) - Deprecate type='tuples for chatbot and focus chatbot docs on 'messages' type. Thanks @freddyaboulton! +- [#9122](https://github.com/gradio-app/gradio/pull/9122) [`2672ea2`](https://github.com/gradio-app/gradio/commit/2672ea297ef28414ecf2eeab7984b1e4b4ee40b8) - Postprocess hardening. Thanks @freddyaboulton! +- [#9149](https://github.com/gradio-app/gradio/pull/9149) [`3d7a9b8`](https://github.com/gradio-app/gradio/commit/3d7a9b81f6fef06187eca832471dc1692eb493a0) - Open audio/image input stream only when queue is ready. Thanks @freddyaboulton! +- [#9173](https://github.com/gradio-app/gradio/pull/9173) [`66349fe`](https://github.com/gradio-app/gradio/commit/66349fe26827e3a3c15b738a1177e95fec7f5554) - Streaming Guides. Thanks @freddyaboulton! +- [#9185](https://github.com/gradio-app/gradio/pull/9185) [`2daf3d1`](https://github.com/gradio-app/gradio/commit/2daf3d10f5986675f6ceb75ebb50c9d991c282bf) - Adding `maxlength` attribute handling of `textarea` and `input` HTML element for the `gr.TextBox()` component via a `max_length` parameter. Thanks @WH-Yoshi! +- [#8959](https://github.com/gradio-app/gradio/pull/8959) [`a0aac66`](https://github.com/gradio-app/gradio/commit/a0aac6694076529ff925ccd34b3503d35e86cb49) - Adds `strict_cors` parameter to `launch()`. Thanks @abidlabs! +- [#9052](https://github.com/gradio-app/gradio/pull/9052) [`f3652eb`](https://github.com/gradio-app/gradio/commit/f3652ebe08211e12739df73c15fd97e5ff81276a) - Video gallery. Thanks @dawoodkhan82! +- [#9213](https://github.com/gradio-app/gradio/pull/9213) [`ab4580b`](https://github.com/gradio-app/gradio/commit/ab4580bd5f755a07c9a9bd2a775220a9a2085f8c) - Remove grey background behind all components. Thanks @hannahblair! +- [#9073](https://github.com/gradio-app/gradio/pull/9073) [`0d8a358`](https://github.com/gradio-app/gradio/commit/0d8a358cc86331aa0c83380326b30d04597f9ef9) - Set default `format` in `gr.Audio` to be `None` to avoid unnecessary preprocessing. Thanks @abidlabs! +- [#9130](https://github.com/gradio-app/gradio/pull/9130) [`864cd0f`](https://github.com/gradio-app/gradio/commit/864cd0fd6aa85691b53bd0bf3a50af05b778813c) - Raise WasmUnsupportedError for ffmpeg usage on Lite. Thanks @whitphx! +- [#8797](https://github.com/gradio-app/gradio/pull/8797) [`6e6818c`](https://github.com/gradio-app/gradio/commit/6e6818c3af836051fffdd070a9e33889b246186e) - Deprecate for 5.0. Thanks @abidlabs! +- [#9132](https://github.com/gradio-app/gradio/pull/9132) [`5cedf16`](https://github.com/gradio-app/gradio/commit/5cedf162f2120e30dd58bf3a8eab27115030b4f5) - Deprecate passing a tuple for gr.Code value. Thanks @freddyaboulton! +- [#8941](https://github.com/gradio-app/gradio/pull/8941) [`97a7bf6`](https://github.com/gradio-app/gradio/commit/97a7bf66a79179d1b91a3199d68e5c11216ca500) - Streaming inputs for 5.0. Thanks @freddyaboulton! +- [#9150](https://github.com/gradio-app/gradio/pull/9150) [`80c966a`](https://github.com/gradio-app/gradio/commit/80c966af6e3d947abe96058de9b683ecf05d9803) - DNS resolver on ip check. Thanks @aliabid94! +- [#9175](https://github.com/gradio-app/gradio/pull/9175) [`e6d456a`](https://github.com/gradio-app/gradio/commit/e6d456a9c3b7e80e0c9a16cd365288deff706635) - Change dark mode color theme from `gray` to `zinc`. Thanks @hannahblair! +- [#8884](https://github.com/gradio-app/gradio/pull/8884) [`3408dba`](https://github.com/gradio-app/gradio/commit/3408dba7560a17371be679d0f01564a5606dc90b) - replace ip addresses with machine-specific hashes. Thanks @abidlabs! + +### Fixes + +- [#9189](https://github.com/gradio-app/gradio/pull/9189) [`ab142ee`](https://github.com/gradio-app/gradio/commit/ab142ee13d19070b75b5eb03efcda7193b8993c2) - Fix serialization error in curl api. Thanks @freddyaboulton! + ## 4.44.1 ### Features diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 56c70b5797e47..b0b66fd1acc42 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -7,7 +7,7 @@ More than 200 awesome developers have contributed to the `gradio` library, and w Prerequisites: -- [Python 3.8+](https://www.python.org/downloads/) +- [Python 3.10+](https://www.python.org/downloads/) - [Node.js v16.14+](https://nodejs.dev/en/download/package-manager/) (only needed if you are making changes to the frontend) - [pnpm 8.1+](https://pnpm.io/8.x/installation) (only needed if you are making changes to the frontend) diff --git a/README.md b/README.md index f9892c36971c1..d542d571376ed 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ [![gradio-ui](https://github.com/gradio-app/gradio/actions/workflows/tests-js.yml/badge.svg)](https://github.com/gradio-app/gradio/actions/workflows/tests-js.yml) [![PyPI](https://img.shields.io/pypi/v/gradio)](https://pypi.org/project/gradio/) [![PyPI downloads](https://img.shields.io/pypi/dm/gradio)](https://pypi.org/project/gradio/) -![Python version](https://img.shields.io/badge/python-3.8+-important) +![Python version](https://img.shields.io/badge/python-3.10+-important) [![Twitter follow](https://img.shields.io/twitter/follow/gradio?style=social&label=follow)](https://twitter.com/gradio) [Website](https://gradio.app) @@ -26,19 +26,19 @@ Gradio is an open-source Python package that allows you to quickly **build** a demo or web application for your machine learning model, API, or any arbitrary Python function. You can then **share** a link to your demo or web application in just a few seconds using Gradio's built-in sharing features. *No JavaScript, CSS, or web hosting experience needed!* - + -It just takes a few lines of Python to create a beautiful demo like the one above, so let's get started 💫 +It just takes a few lines of Python to create your own demo, so let's get started 💫 ### Installation -**Prerequisite**: Gradio requires [Python 3.8 or higher](https://www.python.org/downloads/) +**Prerequisite**: Gradio 5 requires [Python 3.10 or higher](https://www.python.org/downloads/) We recommend installing Gradio using `pip`, which is included by default in Python. Run this in your terminal or command prompt: ``` -pip install gradio +pip install --upgrade gradio ``` diff --git a/client/js/CHANGELOG.md b/client/js/CHANGELOG.md index b103a45965f7a..0ddf129a85b50 100644 --- a/client/js/CHANGELOG.md +++ b/client/js/CHANGELOG.md @@ -1,5 +1,41 @@ # @gradio/client +## 1.6.0-beta.4 + +### Features + +- [#9483](https://github.com/gradio-app/gradio/pull/9483) [`8dc7c12`](https://github.com/gradio-app/gradio/commit/8dc7c12389311b60efcde1b9d3e3668a34d2dc00) - Send Streaming data over Websocket if possible. Also support base64 output format for images. Thanks @freddyaboulton! + +## 1.6.0-beta.3 + +### Features + +- [#9412](https://github.com/gradio-app/gradio/pull/9412) [`c2c2fd9`](https://github.com/gradio-app/gradio/commit/c2c2fd989348f826566773c07c0e0bda200199ff) - fix SSR apps on spaces. Thanks @pngwn! + +## 1.6.0-beta.2 + +### Features + +- [#9323](https://github.com/gradio-app/gradio/pull/9323) [`06babda`](https://github.com/gradio-app/gradio/commit/06babda0395fd3fbd323c1c3cb33704ecfd6deb0) - Disable liking user message in chatbot by default but make it configurable. Thanks @freddyaboulton! +- [#9339](https://github.com/gradio-app/gradio/pull/9339) [`4c8c6f2`](https://github.com/gradio-app/gradio/commit/4c8c6f2fe603081941c5fdc43f48a0632b9f31ad) - Ssr part 2. Thanks @pngwn! + +### Fixes + +- [#9299](https://github.com/gradio-app/gradio/pull/9299) [`aa35b07`](https://github.com/gradio-app/gradio/commit/aa35b0788e613fdd45446d267513e6f94fa208ea) - Trigger state change event on iterators. Thanks @freddyaboulton! + +## 1.6.0-beta.1 + +### Features + +- [#9200](https://github.com/gradio-app/gradio/pull/9200) [`2e179d3`](https://github.com/gradio-app/gradio/commit/2e179d35be6ed60a5a6bfc7303178d63e41781ad) - prefix api routes. Thanks @pngwn! + +## 1.6.0-beta.0 + +### Features + +- [#9149](https://github.com/gradio-app/gradio/pull/9149) [`3d7a9b8`](https://github.com/gradio-app/gradio/commit/3d7a9b81f6fef06187eca832471dc1692eb493a0) - Open audio/image input stream only when queue is ready. Thanks @freddyaboulton! +- [#8941](https://github.com/gradio-app/gradio/pull/8941) [`97a7bf6`](https://github.com/gradio-app/gradio/commit/97a7bf66a79179d1b91a3199d68e5c11216ca500) - Streaming inputs for 5.0. Thanks @freddyaboulton! + ## 1.5.2 ### Fixes diff --git a/client/js/package.json b/client/js/package.json index 299964d47c02b..7d6c3302cf914 100644 --- a/client/js/package.json +++ b/client/js/package.json @@ -1,6 +1,6 @@ { "name": "@gradio/client", - "version": "1.5.2", + "version": "1.6.0-beta.4", "description": "Gradio API client", "type": "module", "main": "dist/index.js", diff --git a/client/js/src/client.ts b/client/js/src/client.ts index 21c09f7edf520..47058adb75522 100644 --- a/client/js/src/client.ts +++ b/client/js/src/client.ts @@ -32,13 +32,19 @@ import { } from "./helpers/init_helpers"; import { check_and_wake_space, check_space_status } from "./helpers/spaces"; import { open_stream, readable_stream, close_stream } from "./utils/stream"; -import { API_INFO_ERROR_MSG, CONFIG_ERROR_MSG } from "./constants"; +import { + API_INFO_ERROR_MSG, + CONFIG_ERROR_MSG, + HEARTBEAT_URL, + COMPONENT_SERVER_URL +} from "./constants"; export class Client { app_reference: string; options: ClientOptions; config: Config | undefined; + api_prefix = ""; api_info: ApiInfo | undefined; api_map: Record = {}; session_hash: string = Math.random().toString(36).substring(2); @@ -56,6 +62,8 @@ export class Client { heartbeat_event: EventSource | null = null; abort_controller: AbortController | null = null; stream_instance: EventSource | null = null; + current_payload: any; + ws_map: Record = {}; fetch(input: RequestInfo | URL, init?: RequestInit): Promise { const headers = new Headers(init?.headers || {}); @@ -130,7 +138,7 @@ export class Client { } this.options = options; - + this.current_payload = {}; this.view_api = view_api.bind(this); this.upload_files = upload_files.bind(this); this.handle_blob = handle_blob.bind(this); @@ -155,18 +163,14 @@ export class Client { global.WebSocket = ws.WebSocket as unknown as typeof WebSocket; } - try { - if (this.options.auth) { - await this.resolve_cookies(); - } - - await this._resolve_config().then(({ config }) => - this._resolve_hearbeat(config) - ); - } catch (e: any) { - throw Error(e); + if (this.options.auth) { + await this.resolve_cookies(); } + await this._resolve_config().then(({ config }) => + this._resolve_hearbeat(config) + ); + this.api_info = await this.view_api(); this.api_map = map_names_to_ids(this.config?.dependencies || []); } @@ -174,6 +178,8 @@ export class Client { async _resolve_hearbeat(_config: Config): Promise { if (_config) { this.config = _config; + this.api_prefix = _config.api_prefix || ""; + if (this.config && this.config.connect_heartbeat) { if (this.config.space_id && this.options.hf_token) { this.jwt = await get_jwt( @@ -192,7 +198,7 @@ export class Client { if (this.config && this.config.connect_heartbeat) { // connect to the heartbeat endpoint via GET request const heartbeat_url = new URL( - `${this.config.root}/heartbeat/${this.session_hash}` + `${this.config.root}${this.api_prefix}/${HEARTBEAT_URL}/${this.session_hash}` ); // if the jwt is available, add it to the query params @@ -222,6 +228,10 @@ export class Client { close_stream(this.stream_status, this.abort_controller); } + set_current_payload(payload: any): void { + this.current_payload = payload; + } + static async duplicate( app_reference: string, options: DuplicateOptions = { @@ -277,6 +287,7 @@ export class Client { _config: Config ): Promise { this.config = _config; + this.api_prefix = _config.api_prefix || ""; if (typeof window !== "undefined" && typeof document !== "undefined") { if (window.location.protocol === "https:") { @@ -306,6 +317,8 @@ export class Client { if (status.status === "running") { try { this.config = await this._resolve_config(); + this.api_prefix = this?.config?.api_prefix || ""; + if (!this.config) { throw new Error(CONFIG_ERROR_MSG); } @@ -385,12 +398,15 @@ export class Client { } try { - const response = await this.fetch(`${root_url}/component_server/`, { - method: "POST", - body: body, - headers, - credentials: "include" - }); + const response = await this.fetch( + `${root_url}${this.api_prefix}/${COMPONENT_SERVER_URL}/`, + { + method: "POST", + body: body, + headers, + credentials: "include" + } + ); if (!response.ok) { throw new Error( @@ -418,6 +434,60 @@ export class Client { component_server: this.component_server }; } + + private async connect_ws(url: string): Promise { + return new Promise((resolve, reject) => { + let ws; + try { + ws = new WebSocket(url); + } catch (e) { + this.ws_map[url] = "failed"; + return; + } + + ws.onopen = () => { + resolve(); + }; + + ws.onerror = (error) => { + console.error("WebSocket error:", error); + this.close_ws(url); + this.ws_map[url] = "failed"; + resolve(); + }; + + ws.onclose = () => { + delete this.ws_map[url]; + this.ws_map[url] = "failed"; + }; + + ws.onmessage = (event) => {}; + this.ws_map[url] = ws; + }); + } + + async send_ws_message(url: string, data: any): Promise { + // connect if not connected + if (!(url in this.ws_map)) { + await this.connect_ws(url); + } + const ws = this.ws_map[url]; + if (ws instanceof WebSocket) { + ws.send(JSON.stringify(data)); + } else { + this.post_data(url, data); + } + } + + async close_ws(url: string): Promise { + if (url in this.ws_map) { + const ws = this.ws_map[url]; + if (ws instanceof WebSocket) { + ws.close(); + delete this.ws_map[url]; + } + } + } } /** diff --git a/client/js/src/constants.ts b/client/js/src/constants.ts index 488ae760b110a..1dbce9880c314 100644 --- a/client/js/src/constants.ts +++ b/client/js/src/constants.ts @@ -1,20 +1,24 @@ // endpoints -export const HOST_URL = "host"; -export const API_URL = "api/predict/"; -export const SSE_URL_V0 = "queue/join"; -export const SSE_DATA_URL_V0 = "queue/data"; -export const SSE_URL = "queue/data"; -export const SSE_DATA_URL = "queue/join"; -export const UPLOAD_URL = "upload"; -export const LOGIN_URL = "login"; -export const CONFIG_URL = "config"; -export const API_INFO_URL = "info"; -export const RUNTIME_URL = "runtime"; -export const SLEEPTIME_URL = "sleeptime"; -export const RAW_API_INFO_URL = "info?serialize=False"; +export const HOST_URL = `host`; +export const API_URL = `predict/`; +export const SSE_URL_V0 = `queue/join`; +export const SSE_DATA_URL_V0 = `queue/data`; +export const SSE_URL = `queue/data`; +export const SSE_DATA_URL = `queue/join`; +export const UPLOAD_URL = `upload`; +export const LOGIN_URL = `login`; +export const CONFIG_URL = `config`; +export const API_INFO_URL = `info`; +export const RUNTIME_URL = `runtime`; +export const SLEEPTIME_URL = `sleeptime`; +export const HEARTBEAT_URL = `heartbeat`; +export const COMPONENT_SERVER_URL = `component_server`; +export const RESET_URL = `reset`; +export const CANCEL_URL = `cancel`; + +export const RAW_API_INFO_URL = `info?serialize=False`; export const SPACE_FETCHER_URL = "https://gradio-space-api-fetcher-v2.hf.space/api"; -export const RESET_URL = "reset"; export const SPACE_URL = "https://hf.space/{}"; // messages diff --git a/client/js/src/helpers/api_info.ts b/client/js/src/helpers/api_info.ts index fda8896872804..b3e5a655a2dc5 100644 --- a/client/js/src/helpers/api_info.ts +++ b/client/js/src/helpers/api_info.ts @@ -243,9 +243,11 @@ export function handle_message( | "log" | "none" | "heartbeat" + | "streaming" | "unexpected_error"; data?: any; status?: Status; + original_msg?: string; } { const queue = true; switch (data.msg) { @@ -313,10 +315,27 @@ export function handle_message( stage: data.success ? "generating" : "error", code: data.code, progress_data: data.progress_data, - eta: data.average_duration + eta: data.average_duration, + changed_state_ids: data.success + ? data.output.changed_state_ids + : undefined }, data: data.success ? data.output : null }; + case "process_streaming": + return { + type: "streaming", + status: { + queue, + message: data.output.error, + stage: "streaming", + time_limit: data.time_limit, + code: data.code, + progress_data: data.progress_data, + eta: data.eta + }, + data: data.output + }; case "process_completed": if ("error" in data.output) { return { @@ -358,7 +377,8 @@ export function handle_message( position: 0, success: data.success, eta: data.eta - } + }, + original_msg: "process_starts" }; } diff --git a/client/js/src/helpers/init_helpers.ts b/client/js/src/helpers/init_helpers.ts index f4945a5acaf3b..82b6fc1719d95 100644 --- a/client/js/src/helpers/init_helpers.ts +++ b/client/js/src/helpers/init_helpers.ts @@ -87,6 +87,7 @@ export async function resolve_config( return { ...config, path } as Config; } else if (endpoint) { const config_url = join_urls(endpoint, CONFIG_URL); + const response = await this.fetch(config_url, { headers, credentials: "include" @@ -204,10 +205,11 @@ export function determine_protocol(endpoint: string): { } // default to secure if no protocol is provided + return { ws_protocol: "wss", http_protocol: "https:", - host: endpoint + host: new URL(endpoint).host }; } diff --git a/client/js/src/index.ts b/client/js/src/index.ts index 7717d94f887c3..6ba30fc37a3e6 100644 --- a/client/js/src/index.ts +++ b/client/js/src/index.ts @@ -14,7 +14,8 @@ export type { UploadResponse, RenderMessage, LogMessage, - Payload + Payload, + Config } from "./types"; // todo: remove in @gradio/client v1.0 diff --git a/client/js/src/test/api_info.test.ts b/client/js/src/test/api_info.test.ts index 40e9d1f57fd5f..a717069723168 100644 --- a/client/js/src/test/api_info.test.ts +++ b/client/js/src/test/api_info.test.ts @@ -238,6 +238,7 @@ describe("handle_message", () => { const result = handle_message(data, last_status); expect(result).toEqual({ type: "update", + original_msg: "process_starts", status: { queue: true, stage: "pending", diff --git a/client/js/src/test/init_helpers.test.ts b/client/js/src/test/init_helpers.test.ts index 2462dd4ea2691..055ad077363a5 100644 --- a/client/js/src/test/init_helpers.test.ts +++ b/client/js/src/test/init_helpers.test.ts @@ -84,16 +84,6 @@ describe("determine_protocol", () => { host: "lite.local" }); }); - - it('should return the default protocols and host when the endpoint does not start with "http" or "file"', () => { - const endpoint = "huggingface.co"; - const result = determine_protocol(endpoint); - expect(result).toEqual({ - ws_protocol: "wss", - http_protocol: "https:", - host: "huggingface.co" - }); - }); }); describe("parse_and_set_cookies", () => { diff --git a/client/js/src/types.ts b/client/js/src/types.ts index b929e96cb16dc..ac4f2b54721db 100644 --- a/client/js/src/types.ts +++ b/client/js/src/types.ts @@ -102,6 +102,7 @@ export type client_return = { export interface SubmitIterable extends AsyncIterable { [Symbol.asyncIterator](): AsyncIterator; cancel: () => Promise; + event_id: () => string; } export type PredictReturn = { @@ -182,6 +183,9 @@ export interface Config { max_file_size?: number; theme_hash?: number; username: string | null; + api_prefix?: string; + fill_height?: boolean; + fill_width?: boolean; } // todo: DRY up types @@ -251,6 +255,11 @@ export interface Dependency { show_api: boolean; zerogpu?: boolean; rendered_in: number | null; + connection: "stream" | "sse"; + time_limit: number; + stream_every: number; + like_user_message: boolean; + event_specific_args: string[]; } export interface DependencyTypes { @@ -335,7 +344,7 @@ export interface Status { queue: boolean; code?: string; success?: boolean; - stage: "pending" | "error" | "complete" | "generating"; + stage: "pending" | "error" | "complete" | "generating" | "streaming"; duration?: number; visible?: boolean; broken?: boolean; @@ -352,12 +361,14 @@ export interface Status { }[]; time?: Date; changed_state_ids?: number[]; + time_limit?: number; } export interface StatusMessage extends Status { type: "status"; endpoint: string; fn_index: number; + original_msg?: string; } export interface PayloadMessage extends Payload { diff --git a/client/js/src/upload.ts b/client/js/src/upload.ts index 0dfe714f7da6a..175093a2c40f8 100644 --- a/client/js/src/upload.ts +++ b/client/js/src/upload.ts @@ -1,4 +1,3 @@ -import type { UploadResponse } from "./types"; import type { Client } from "./client"; export async function upload( @@ -34,7 +33,7 @@ export async function upload( const file = new FileData({ ...file_data[i], path: f, - url: root_url + "/file=" + f + url: `${root_url}${this.api_prefix}/file=${f}` }); return file; }); @@ -73,6 +72,7 @@ export class FileData { is_stream?: boolean; mime_type?: string; alt_text?: string; + b64?: string; readonly meta = { _type: "gradio.FileData" }; constructor({ @@ -83,7 +83,8 @@ export class FileData { blob, is_stream, mime_type, - alt_text + alt_text, + b64 }: { path: string; url?: string; @@ -93,6 +94,7 @@ export class FileData { is_stream?: boolean; mime_type?: string; alt_text?: string; + b64?: string; }) { this.path = path; this.url = url; @@ -102,5 +104,6 @@ export class FileData { this.is_stream = is_stream; this.mime_type = mime_type; this.alt_text = alt_text; + this.b64 = b64; } } diff --git a/client/js/src/utils/stream.ts b/client/js/src/utils/stream.ts index 7ae061cc3aea1..edfe5e555e8af 100644 --- a/client/js/src/utils/stream.ts +++ b/client/js/src/utils/stream.ts @@ -1,4 +1,4 @@ -import { BROKEN_CONNECTION_MSG } from "../constants"; +import { BROKEN_CONNECTION_MSG, SSE_URL } from "../constants"; import type { Client } from "../client"; import { stream } from "fetch-event-stream"; @@ -25,7 +25,7 @@ export async function open_stream(this: Client): Promise { session_hash: this.session_hash }).toString(); - let url = new URL(`${config.root}/queue/data?${params}`); + let url = new URL(`${config.root}${this.api_prefix}/${SSE_URL}?${params}`); if (jwt) { url.searchParams.set("__sign", jwt); diff --git a/client/js/src/utils/submit.ts b/client/js/src/utils/submit.ts index ee3277dcfb03b..f5863b478e45f 100644 --- a/client/js/src/utils/submit.ts +++ b/client/js/src/utils/submit.ts @@ -19,7 +19,14 @@ import { process_endpoint } from "../helpers/api_info"; import semiver from "semiver"; -import { BROKEN_CONNECTION_MSG, QUEUE_FULL_MSG } from "../constants"; +import { + BROKEN_CONNECTION_MSG, + QUEUE_FULL_MSG, + SSE_URL, + SSE_DATA_URL, + RESET_URL, + CANCEL_URL +} from "../constants"; import { apply_diff_stream, close_stream } from "./stream"; import { Client } from "../client"; @@ -46,7 +53,8 @@ export function submit( event_callbacks, unclosed_events, post_data, - options + options, + api_prefix } = this; const that = this; @@ -66,6 +74,8 @@ export function submit( let websocket: WebSocket; let stream: EventSource | null; let protocol = config.protocol ?? "ws"; + let event_id_final = ""; + let event_id_cb: () => string = () => event_id_final; const _endpoint = typeof endpoint === "number" ? "/predict" : endpoint; let payload: Payload; @@ -131,14 +141,14 @@ export function submit( } if ("event_id" in cancel_request) { - await fetch(`${config.root}/cancel`, { + await fetch(`${config.root}${api_prefix}/${CANCEL_URL}`, { headers: { "Content-Type": "application/json" }, method: "POST", body: JSON.stringify(cancel_request) }); } - await fetch(`${config.root}/reset`, { + await fetch(`${config.root}${api_prefix}/${RESET_URL}`, { headers: { "Content-Type": "application/json" }, method: "POST", body: JSON.stringify(reset_request) @@ -205,7 +215,7 @@ export function submit( }); post_data( - `${config.root}/run${ + `${config.root}${api_prefix}/run${ _endpoint.startsWith("/") ? _endpoint : `/${_endpoint}` }${url_params ? "?" + url_params : ""}`, { @@ -411,7 +421,7 @@ export function submit( session_hash: session_hash }).toString(); let url = new URL( - `${config.root}/queue/join?${ + `${config.root}${api_prefix}/${SSE_URL}?${ url_params ? url_params + "&" : "" }${params}` ); @@ -449,12 +459,14 @@ export function submit( close(); } } else if (type === "data") { - event_id = _data.event_id as string; - let [_, status] = await post_data(`${config.root}/queue/data`, { - ...payload, - session_hash, - event_id - }); + let [_, status] = await post_data( + `${config.root}${api_prefix}/queue/data`, + { + ...payload, + session_hash, + event_id + } + ); if (status !== 200) { fire_event({ type: "status", @@ -480,7 +492,7 @@ export function submit( visible: data.visible, fn_index }); - } else if (type === "generating") { + } else if (type === "generating" || type === "streaming") { fire_event({ type: "status", time: new Date(), @@ -563,7 +575,7 @@ export function submit( : Promise.resolve(null); const post_data_promise = zerogpu_auth_promise.then((headers) => { return post_data( - `${config.root}/queue/join?${url_params}`, + `${config.root}${api_prefix}/${SSE_DATA_URL}?${url_params}`, { ...payload, session_hash @@ -594,9 +606,10 @@ export function submit( }); } else { event_id = response.event_id as string; + event_id_final = event_id; let callback = async function (_data: object): Promise { try { - const { type, status, data } = handle_message( + const { type, status, data, original_msg } = handle_message( _data, last_status[fn_index] ); @@ -612,6 +625,7 @@ export function submit( endpoint: _endpoint, fn_index, time: new Date(), + original_msg: original_msg, ...status }); } else if (type === "complete") { @@ -639,7 +653,7 @@ export function submit( fn_index }); return; - } else if (type === "generating") { + } else if (type === "generating" || type === "streaming") { fire_event({ type: "status", time: new Date(), @@ -651,6 +665,7 @@ export function submit( }); if ( data && + dependency.connection !== "stream" && ["sse_v2", "sse_v2.1", "sse_v3"].includes(protocol) ) { apply_diff_stream(pending_diff_streams, event_id!, data); @@ -790,7 +805,8 @@ export function submit( close(); return next(); }, - cancel + cancel, + event_id: event_id_cb }; return iterator; diff --git a/client/js/src/utils/upload_files.ts b/client/js/src/utils/upload_files.ts index db2f55729d933..6babe212bd494 100644 --- a/client/js/src/utils/upload_files.ts +++ b/client/js/src/utils/upload_files.ts @@ -27,8 +27,8 @@ export async function upload_files( }); try { const upload_url = upload_id - ? `${root_url}/${UPLOAD_URL}?upload_id=${upload_id}` - : `${root_url}/${UPLOAD_URL}`; + ? `${root_url}${this.api_prefix}/${UPLOAD_URL}?upload_id=${upload_id}` + : `${root_url}${this.api_prefix}/${UPLOAD_URL}`; response = await this.fetch(upload_url, { method: "POST", diff --git a/client/js/src/utils/view_api.ts b/client/js/src/utils/view_api.ts index 1bfcdfdc201b4..52268bd745e3b 100644 --- a/client/js/src/utils/view_api.ts +++ b/client/js/src/utils/view_api.ts @@ -41,7 +41,7 @@ export async function view_api(this: Client): Promise { credentials: "include" }); } else { - const url = join_urls(config.root, API_INFO_URL); + const url = join_urls(config.root, this.api_prefix, API_INFO_URL); response = await this.fetch(url, { headers, credentials: "include" diff --git a/client/python/CHANGELOG.md b/client/python/CHANGELOG.md index 7aa7b80266792..d4c36b3e12e00 100644 --- a/client/python/CHANGELOG.md +++ b/client/python/CHANGELOG.md @@ -1,5 +1,44 @@ # gradio_client +## 1.4.0-beta.5 + +### Features + +- [#9589](https://github.com/gradio-app/gradio/pull/9589) [`477f45c`](https://github.com/gradio-app/gradio/commit/477f45cb43be957684eb392e3d62c09490c22391) - Only move files to the cache that have a meta key. Thanks @freddyaboulton! + +## 1.4.0-beta.4 + +### Features + +- [#9550](https://github.com/gradio-app/gradio/pull/9550) [`b0fedd7`](https://github.com/gradio-app/gradio/commit/b0fedd7ef718c0df797ec277db7e773543a70a4d) - Fix most flaky Python tests in `5.0-dev` branch. Thanks @abidlabs! +- [#9483](https://github.com/gradio-app/gradio/pull/9483) [`8dc7c12`](https://github.com/gradio-app/gradio/commit/8dc7c12389311b60efcde1b9d3e3668a34d2dc00) - Send Streaming data over Websocket if possible. Also support base64 output format for images. Thanks @freddyaboulton! +- [#9522](https://github.com/gradio-app/gradio/pull/9522) [`3b71ed2`](https://github.com/gradio-app/gradio/commit/3b71ed21b7e2ecb67eb68fb946d25565169cb4df) - Api info fix. Thanks @freddyaboulton! + +## 1.4.0-beta.3 + +### Fixes + +- [#9431](https://github.com/gradio-app/gradio/pull/9431) [`7065e11`](https://github.com/gradio-app/gradio/commit/7065e11e465fcdfe14688bd6ca2aeed0a25fcc36) - Check for `file_types` parameter in the backend. Thanks @dawoodkhan82! + +## 1.4.0-beta.2 + +### Features + +- [#9339](https://github.com/gradio-app/gradio/pull/9339) [`4c8c6f2`](https://github.com/gradio-app/gradio/commit/4c8c6f2fe603081941c5fdc43f48a0632b9f31ad) - Ssr part 2. Thanks @pngwn! + +## 1.4.0-beta.1 + +### Features + +- [#9200](https://github.com/gradio-app/gradio/pull/9200) [`2e179d3`](https://github.com/gradio-app/gradio/commit/2e179d35be6ed60a5a6bfc7303178d63e41781ad) - prefix api routes. Thanks @pngwn! + +## 1.4.0-beta.0 + +### Features + +- [#9140](https://github.com/gradio-app/gradio/pull/9140) [`c054ec8`](https://github.com/gradio-app/gradio/commit/c054ec85e49ab102b15afd305583ee394151d16c) - Drop python 3.8 and 3.9. Thanks @abidlabs! +- [#8941](https://github.com/gradio-app/gradio/pull/8941) [`97a7bf6`](https://github.com/gradio-app/gradio/commit/97a7bf66a79179d1b91a3199d68e5c11216ca500) - Streaming inputs for 5.0. Thanks @freddyaboulton! + ## 1.3.0 ### Features diff --git a/client/python/README.md b/client/python/README.md index 0482b30d16211..07049e8c23da1 100644 --- a/client/python/README.md +++ b/client/python/README.md @@ -25,7 +25,7 @@ The Gradio client works with any Gradio Space, whether it be an image generator, If you already have a recent version of `gradio`, then the `gradio_client` is included as a dependency. -Otherwise, the lightweight `gradio_client` package can be installed from pip (or pip3) and works with Python versions 3.8 or higher: +Otherwise, the lightweight `gradio_client` package can be installed from pip (or pip3) and works with Python versions 3.10 or higher: ```bash $ pip install gradio_client diff --git a/client/python/gradio_client/CHANGELOG.md b/client/python/gradio_client/CHANGELOG.md index 7aa7b80266792..d4c36b3e12e00 100644 --- a/client/python/gradio_client/CHANGELOG.md +++ b/client/python/gradio_client/CHANGELOG.md @@ -1,5 +1,44 @@ # gradio_client +## 1.4.0-beta.5 + +### Features + +- [#9589](https://github.com/gradio-app/gradio/pull/9589) [`477f45c`](https://github.com/gradio-app/gradio/commit/477f45cb43be957684eb392e3d62c09490c22391) - Only move files to the cache that have a meta key. Thanks @freddyaboulton! + +## 1.4.0-beta.4 + +### Features + +- [#9550](https://github.com/gradio-app/gradio/pull/9550) [`b0fedd7`](https://github.com/gradio-app/gradio/commit/b0fedd7ef718c0df797ec277db7e773543a70a4d) - Fix most flaky Python tests in `5.0-dev` branch. Thanks @abidlabs! +- [#9483](https://github.com/gradio-app/gradio/pull/9483) [`8dc7c12`](https://github.com/gradio-app/gradio/commit/8dc7c12389311b60efcde1b9d3e3668a34d2dc00) - Send Streaming data over Websocket if possible. Also support base64 output format for images. Thanks @freddyaboulton! +- [#9522](https://github.com/gradio-app/gradio/pull/9522) [`3b71ed2`](https://github.com/gradio-app/gradio/commit/3b71ed21b7e2ecb67eb68fb946d25565169cb4df) - Api info fix. Thanks @freddyaboulton! + +## 1.4.0-beta.3 + +### Fixes + +- [#9431](https://github.com/gradio-app/gradio/pull/9431) [`7065e11`](https://github.com/gradio-app/gradio/commit/7065e11e465fcdfe14688bd6ca2aeed0a25fcc36) - Check for `file_types` parameter in the backend. Thanks @dawoodkhan82! + +## 1.4.0-beta.2 + +### Features + +- [#9339](https://github.com/gradio-app/gradio/pull/9339) [`4c8c6f2`](https://github.com/gradio-app/gradio/commit/4c8c6f2fe603081941c5fdc43f48a0632b9f31ad) - Ssr part 2. Thanks @pngwn! + +## 1.4.0-beta.1 + +### Features + +- [#9200](https://github.com/gradio-app/gradio/pull/9200) [`2e179d3`](https://github.com/gradio-app/gradio/commit/2e179d35be6ed60a5a6bfc7303178d63e41781ad) - prefix api routes. Thanks @pngwn! + +## 1.4.0-beta.0 + +### Features + +- [#9140](https://github.com/gradio-app/gradio/pull/9140) [`c054ec8`](https://github.com/gradio-app/gradio/commit/c054ec85e49ab102b15afd305583ee394151d16c) - Drop python 3.8 and 3.9. Thanks @abidlabs! +- [#8941](https://github.com/gradio-app/gradio/pull/8941) [`97a7bf6`](https://github.com/gradio-app/gradio/commit/97a7bf66a79179d1b91a3199d68e5c11216ca500) - Streaming inputs for 5.0. Thanks @freddyaboulton! + ## 1.3.0 ### Features diff --git a/client/python/gradio_client/cli/deploy_discord.py b/client/python/gradio_client/cli/deploy_discord.py index 9a828a32a0655..e653007428a6b 100644 --- a/client/python/gradio_client/cli/deploy_discord.py +++ b/client/python/gradio_client/cli/deploy_discord.py @@ -1,7 +1,6 @@ -from typing import List, Optional +from typing import Annotated, Optional from typer import Option -from typing_extensions import Annotated from gradio_client import Client @@ -17,7 +16,7 @@ def main( str, Option(help="Discord bot token. Get one on the discord website.") ] = None, api_names: Annotated[ - List[str], Option(help="Api names to turn into discord bots") + list[str], Option(help="Api names to turn into discord bots") ] = None, to_id: Annotated[ Optional[str], Option(help="Name of the space used to host the discord bot") diff --git a/client/python/gradio_client/client.py b/client/python/gradio_client/client.py index c6dc3eff3f3f4..157d4184cb2c1 100644 --- a/client/python/gradio_client/client.py +++ b/client/python/gradio_client/client.py @@ -16,13 +16,14 @@ import urllib.parse import uuid import warnings +from collections.abc import Callable from concurrent.futures import Future from dataclasses import dataclass from datetime import datetime from functools import partial from pathlib import Path from threading import Lock -from typing import Any, Callable, Literal +from typing import Any, Literal import httpx import huggingface_hub @@ -75,7 +76,7 @@ class Client: def __init__( self, src: str, - hf_token: str | None = None, + hf_token: str | Literal[False] | None = False, max_workers: int = 40, verbose: bool = True, auth: tuple[str, str] | None = None, @@ -88,14 +89,14 @@ def __init__( ): """ Parameters: - src: Either the name of the Hugging Face Space to load, (e.g. "abidlabs/whisper-large-v2") or the full URL (including "http" or "https") of the hosted Gradio app to load (e.g. "http://mydomain.com/app" or "https://bec81a83-5b5c-471e.gradio.live/"). - hf_token: The Hugging Face token to use to access private Spaces. Automatically fetched if you are logged in via the Hugging Face Hub CLI. Obtain from: https://huggingface.co/settings/token - max_workers: The maximum number of thread workers that can be used to make requests to the remote Gradio app simultaneously. - verbose: Whether the client should print statements to the console. - headers: Additional headers to send to the remote Gradio app on every request. By default only the HF authorization and user-agent headers are sent. This parameter will override the default headers if they have the same keys. - download_files: Directory where the client should download output files on the local machine from the remote API. By default, uses the value of the GRADIO_TEMP_DIR environment variable which, if not set by the user, is a temporary directory on your machine. If False, the client does not download files and returns a FileData dataclass object with the filepath on the remote machine instead. - ssl_verify: If False, skips certificate validation which allows the client to connect to Gradio apps that are using self-signed certificates. - httpx_kwargs: Additional keyword arguments to pass to `httpx.Client`, `httpx.stream`, `httpx.get` and `httpx.post`. This can be used to set timeouts, proxies, http auth, etc. + src: either the name of the Hugging Face Space to load, (e.g. "abidlabs/whisper-large-v2") or the full URL (including "http" or "https") of the hosted Gradio app to load (e.g. "http://mydomain.com/app" or "https://bec81a83-5b5c-471e.gradio.live/"). + hf_token: optional Hugging Face token to use to access private Spaces. By default, no token is sent to the server. Set `hf_token=None` to use the locally saved token if there is one (warning: only provide a token if you are loading a trusted private Space as the token can be read by the Space you are loading). Find your tokens here: https://huggingface.co/settings/tokens. + max_workers: maximum number of thread workers that can be used to make requests to the remote Gradio app simultaneously. + verbose: whether the client should print statements to the console. + headers: additional headers to send to the remote Gradio app on every request. By default only the HF authorization and user-agent headers are sent. This parameter will override the default headers if they have the same keys. + download_files: directory where the client should download output files on the local machine from the remote API. By default, uses the value of the GRADIO_TEMP_DIR environment variable which, if not set by the user, is a temporary directory on your machine. If False, the client does not download files and returns a FileData dataclass object with the filepath on the remote machine instead. + ssl_verify: if False, skips certificate validation which allows the client to connect to Gradio apps that are using self-signed certificates. + httpx_kwargs: additional keyword arguments to pass to `httpx.Client`, `httpx.stream`, `httpx.get` and `httpx.post`. This can be used to set timeouts, proxies, http auth, etc. """ self.verbose = verbose self.hf_token = hf_token @@ -153,20 +154,27 @@ def __init__( self.protocol: Literal["ws", "sse", "sse_v1", "sse_v2", "sse_v2.1"] = ( self.config.get("protocol", "ws") ) - self.api_url = urllib.parse.urljoin(self.src, utils.API_URL) + api_prefix: str = self.config.get("api_prefix", "") + self.api_prefix = api_prefix.lstrip("/") + "/" + self.src_prefixed = urllib.parse.urljoin(self.src, api_prefix).rstrip("/") + "/" + + self.api_url = urllib.parse.urljoin(self.src_prefixed, utils.API_URL) self.sse_url = urllib.parse.urljoin( - self.src, utils.SSE_URL_V0 if self.protocol == "sse" else utils.SSE_URL + self.src_prefixed, + utils.SSE_URL_V0 if self.protocol == "sse" else utils.SSE_URL, + ) + self.heartbeat_url = urllib.parse.urljoin( + self.src_prefixed, utils.HEARTBEAT_URL ) - self.heartbeat_url = urllib.parse.urljoin(self.src, utils.HEARTBEAT_URL) self.sse_data_url = urllib.parse.urljoin( - self.src, + self.src_prefixed, utils.SSE_DATA_URL_V0 if self.protocol == "sse" else utils.SSE_DATA_URL, ) self.ws_url = urllib.parse.urljoin( - self.src.replace("http", "ws", 1), utils.WS_URL + self.src_prefixed.replace("http", "ws", 1), utils.WS_URL ) - self.upload_url = urllib.parse.urljoin(self.src, utils.UPLOAD_URL) - self.reset_url = urllib.parse.urljoin(self.src, utils.RESET_URL) + self.upload_url = urllib.parse.urljoin(self.src_prefixed, utils.UPLOAD_URL) + self.reset_url = urllib.parse.urljoin(self.src_prefixed, utils.RESET_URL) self.app_version = version.parse(self.config.get("version", "2.0")) self._info = self._get_api_info() self.session_hash = str(uuid.uuid4()) @@ -322,7 +330,7 @@ def duplicate( cls, from_id: str, to_id: str | None = None, - hf_token: str | None = None, + hf_token: str | Literal[False] | None = False, private: bool = True, hardware: Literal[ "cpu-basic", @@ -355,7 +363,7 @@ def duplicate( Parameters: from_id: The name of the Hugging Face Space to duplicate in the format "{username}/{space_id}", e.g. "gradio/whisper". to_id: The name of the new Hugging Face Space to create, e.g. "abidlabs/whisper-duplicate". If not provided, the new Space will be named "{your_HF_username}/{space_id}". - hf_token: The Hugging Face token to use to access private Spaces. Automatically fetched if you are logged in via the Hugging Face Hub CLI. Obtain from: https://huggingface.co/settings/token + hf_token: optional Hugging Face token to use to duplicating private Spaces. By default, no token is sent to the server. Set `hf_token=None` to use the locally saved token if there is one. Find your tokens here: https://huggingface.co/settings/tokens. private: Whether the new Space should be private (True) or public (False). Defaults to True. hardware: The hardware tier to use for the new Space. Defaults to the same hardware tier as the original Space. Options include "cpu-basic", "cpu-upgrade", "t4-small", "t4-medium", "a10g-small", "a10g-large", "a100-large", subject to availability. secrets: A dictionary of (secret key, secret value) to pass to the new Space. Defaults to None. Secrets are only used when the Space is duplicated for the first time, and are not updated if the duplicated Space already exists. @@ -551,7 +559,7 @@ def fn(future): return job def _get_api_info(self): - api_info_url = urllib.parse.urljoin(self.src, utils.RAW_API_INFO_URL) + api_info_url = urllib.parse.urljoin(self.src_prefixed, utils.RAW_API_INFO_URL) if self.app_version > version.Version("3.36.1"): r = httpx.get( api_info_url, @@ -863,7 +871,7 @@ def _get_config(self) -> dict: ) else: # to support older versions of Gradio r = httpx.get( - self.src, + self.src_prefixed, headers=self.headers, cookies=self.cookies, verify=self.ssl_verify, @@ -890,7 +898,7 @@ def deploy_discord( discord_bot_token: str | None = None, api_names: list[str | tuple[str, str]] | None = None, to_id: str | None = None, - hf_token: str | None = None, + hf_token: str | Literal[False] | None = False, private: bool = False, ): """ @@ -1069,8 +1077,7 @@ def __init__( self._get_component_type(id_) for id_ in dependency["outputs"] ] self.parameters_info = self._get_parameters_info() - - self.root_url = client.src + "/" if not client.src.endswith("/") else client.src + self.root_url = self.client.src_prefixed # Disallow hitting endpoints that the Gradio app has disabled self.is_valid = self.api_name is not False @@ -1138,7 +1145,7 @@ def make_cancel( if helper is None: return if self.client.app_version > version.Version("4.29.0"): - url = urllib.parse.urljoin(self.client.src, utils.CANCEL_URL) + url = urllib.parse.urljoin(self.client.src_prefixed, utils.CANCEL_URL) # The event_id won't be set on the helper until later # so need to create the data in a function that's run at cancel time @@ -1303,7 +1310,11 @@ def download_files(self, *data) -> tuple: def remove_skipped_components(self, *data) -> tuple: """""" - data = [d for d, oct in zip(data, self.output_component_types) if not oct.skip] + data = [ + d + for d, oct in zip(data, self.output_component_types, strict=False) + if not oct.skip + ] return tuple(data) def reduce_singleton_output(self, *data) -> Any: @@ -1355,9 +1366,8 @@ def _upload_file(self, f: dict, data_index: int) -> dict[str, str]: # use the suffix of the original name to determine format to save it to in cache. return { "path": file_path, - "orig_name": utils.strip_invalid_filename_characters(orig_name.name) - if orig_name.suffix - else None, + "orig_name": utils.strip_invalid_filename_characters(orig_name.name), + "meta": {"_type": "gradio.FileData"} if orig_name.suffix else None, } def _download_file(self, x: dict) -> str: diff --git a/client/python/gradio_client/compatibility.py b/client/python/gradio_client/compatibility.py index ca5f323b74a6d..8348cf0c191a8 100644 --- a/client/python/gradio_client/compatibility.py +++ b/client/python/gradio_client/compatibility.py @@ -172,7 +172,7 @@ def _upload( "orig_name": Path(f).name, "data": None, } - for f, o in zip(fs, output) + for f, o in zip(fs, output, strict=False) ] else: o = next(o for ix, o in enumerate(result) if indices[ix] == i) @@ -207,7 +207,7 @@ def insert_state(self, *data) -> tuple: def remove_skipped_components(self, *data) -> tuple: data = [ d - for d, oct in zip(data, self.output_component_types) + for d, oct in zip(data, self.output_component_types, strict=False) if oct not in utils.SKIP_COMPONENTS ] return tuple(data) @@ -235,13 +235,15 @@ def serialize(self, *data) -> tuple: files = [ f - for f, t in zip(data, self.input_component_types) + for f, t in zip(data, self.input_component_types, strict=False) if t in ["file", "uploadbutton"] ] uploaded_files = self._upload(files) data = list(data) self._add_uploaded_files_to_data(uploaded_files, data) - o = tuple([s.serialize(d) for s, d in zip(self.serializers, data)]) + o = tuple( + [s.serialize(d) for s, d in zip(self.serializers, data, strict=False)] + ) return o def deserialize(self, *data) -> tuple: @@ -257,7 +259,7 @@ def deserialize(self, *data) -> tuple: hf_token=self.client.hf_token, root_url=self.root_url, ) - for s, d in zip(self.deserializers, data) + for s, d in zip(self.deserializers, data, strict=False) ] ) return outputs diff --git a/client/python/gradio_client/documentation.py b/client/python/gradio_client/documentation.py index 741ad78abb88f..a3ab65493002e 100644 --- a/client/python/gradio_client/documentation.py +++ b/client/python/gradio_client/documentation.py @@ -6,8 +6,8 @@ import inspect import warnings from collections import defaultdict +from collections.abc import Callable from functools import lru_cache -from typing import Callable classes_to_document = defaultdict(list) classes_inherit_documentation = {} diff --git a/client/python/gradio_client/package.json b/client/python/gradio_client/package.json index 3fc305b8bb50d..929a15e03804a 100644 --- a/client/python/gradio_client/package.json +++ b/client/python/gradio_client/package.json @@ -1,6 +1,6 @@ { "name": "gradio_client", - "version": "1.3.0", + "version": "1.4.0-beta.5", "description": "", "python": "true", "main_changeset": true, diff --git a/client/python/gradio_client/utils.py b/client/python/gradio_client/utils.py index 8b3a4bb8c013f..e49648afe7d8d 100644 --- a/client/python/gradio_client/utils.py +++ b/client/python/gradio_client/utils.py @@ -13,12 +13,13 @@ import tempfile import time import warnings +from collections.abc import Callable, Coroutine from dataclasses import dataclass, field from datetime import datetime from enum import Enum from pathlib import Path from threading import Lock -from typing import TYPE_CHECKING, Any, Callable, Coroutine, Literal, Optional, TypedDict +from typing import TYPE_CHECKING, Any, Literal, Optional, TypedDict import fsspec.asyn import httpx @@ -122,6 +123,7 @@ class ServerMessage(str, Enum): server_stopped = "Server stopped unexpectedly." unexpected_error = "unexpected_error" close_stream = "close_stream" + process_streaming = "process_streaming" class Status(Enum): @@ -685,6 +687,23 @@ def get_extension(encoding: str) -> str | None: return extension +def is_valid_file(file_path: str, file_types: list[str]) -> bool: + mime_type = get_mimetype(file_path) + if mime_type is None: + return False + for file_type in file_types: + if file_type == "file": + return True + if file_type.startswith("."): + file_type = file_type.lstrip(".").lower() + mime_type_split = mime_type.lower().split("/") + if file_type == mime_type_split[1]: + return True + elif mime_type.startswith(f"{file_type}/"): + return True + return False + + def encode_file_to_base64(f: str | Path): with open(f, "rb") as file: encoded_string = base64.b64encode(file.read()) @@ -881,6 +900,7 @@ def get_type(schema: dict): FILE_DATA_FORMATS = [ + "Dict(path: str | None (Path to a local file), url: str | None (Publicly available url or base64 encoded image), size: int | None (Size of image in bytes), orig_name: str | None (Original filename), mime_type: str | None (mime type of image), is_stream: bool (Can always be set to False), meta: Dict())", "Dict(path: str, url: str | None, size: int | None, orig_name: str | None, mime_type: str | None)", "Dict(path: str, url: str | None, size: int | None, orig_name: str | None, mime_type: str | None, is_stream: bool)", "Dict(path: str, url: str | None, size: int | None, orig_name: str | None, mime_type: str | None, is_stream: bool, meta: Dict())", diff --git a/client/python/pyproject.toml b/client/python/pyproject.toml index 54778eb45e78c..c7dcf0846f28d 100644 --- a/client/python/pyproject.toml +++ b/client/python/pyproject.toml @@ -7,7 +7,7 @@ name = "gradio_client" dynamic = ["version", "dependencies", "readme"] description = "Python library for easily interacting with trained machine learning models" license = "Apache-2.0" -requires-python = ">=3.8" +requires-python = ">=3.10" authors = [ { name = "Abubakar Abid", email = "gradio-team@huggingface.co" }, { name = "Ali Abid", email = "gradio-team@huggingface.co" }, diff --git a/client/python/test/conftest.py b/client/python/test/conftest.py index a46ea99c54857..b6a867b5e146f 100644 --- a/client/python/test/conftest.py +++ b/client/python/test/conftest.py @@ -255,12 +255,10 @@ def show(n): num = gr.Number(value=10) with gr.Row(): count_btn = gr.Button("Count") - count_forever = gr.Button("Count forever") with gr.Column(): out = gr.Textbox() count_btn.click(count, num, out, api_name="count") - count_forever.click(show, num, out, api_name="count_forever", every=3) return demo diff --git a/client/python/test/test_client.py b/client/python/test/test_client.py index c827041e3aecc..c64216b3689a6 100644 --- a/client/python/test/test_client.py +++ b/client/python/test/test_client.py @@ -1,7 +1,6 @@ from __future__ import annotations import json -import os import pathlib import tempfile import time @@ -16,15 +15,11 @@ import httpx import huggingface_hub import pytest -import uvicorn -from fastapi import FastAPI -from gradio.http_server import Server -from huggingface_hub import HfFolder from huggingface_hub.utils import RepositoryNotFoundError from gradio_client import Client, handle_file from gradio_client.client import DEFAULT_TEMP_DIR -from gradio_client.exceptions import AppError, AuthenticationError +from gradio_client.exceptions import AuthenticationError from gradio_client.utils import ( Communicator, ProgressUnit, @@ -33,7 +28,7 @@ StatusUpdate, ) -HF_TOKEN = os.getenv("HF_TOKEN") or HfFolder.get_token() +HF_TOKEN = huggingface_hub.get_token() @contextmanager @@ -133,7 +128,7 @@ def test_private_space(self): space_id = "gradio-tests/not-actually-private-space" api = huggingface_hub.HfApi() assert api.space_info(space_id).private - client = Client(space_id) + client = Client(space_id, hf_token=HF_TOKEN) output = client.predict("abc", api_name="/predict") assert output == "abc" @@ -144,6 +139,7 @@ def test_private_space_v4(self): assert api.space_info(space_id).private client = Client( space_id, + hf_token=HF_TOKEN, ) output = client.predict("abc", api_name="/predict") assert output == "abc" @@ -155,6 +151,7 @@ def test_private_space_v4_sse_v1(self): assert api.space_info(space_id).private client = Client( space_id, + hf_token=HF_TOKEN, ) output = client.predict("abc", api_name="/predict") assert output == "abc" @@ -562,36 +559,36 @@ def test_does_not_upload_dir(self, stateful_chatbot): ret = client.predict(message, initial_history, api_name="/submit") assert ret == ("", [["", None], ["Hello", "I love you"]]) - def test_can_call_mounted_app_via_api(self): - def greet(name): - return "Hello " + name + "!" - - gradio_app = gr.Interface( - fn=greet, - inputs=gr.Textbox(lines=2, placeholder="Name Here..."), - outputs="text", - ) - - app = FastAPI() - app = gr.mount_gradio_app(app, gradio_app, path="/test/gradio") - config = uvicorn.Config( - app=app, - port=8000, - log_level="info", - ) - server = Server(config=config) - # Using the gradio Server class to not have - # to implement code again to run uvicorn in a separate thread - # However, that means we need to set this flag to prevent - # run_in_thread_from_blocking - server.started = True - try: - server.run_in_thread() - time.sleep(1) - client = Client("http://127.0.0.1:8000/test/gradio/") - assert client.predict("freddy") == "Hello freddy!" - finally: - server.thread.join(timeout=1) + # def test_can_call_mounted_app_via_api(self): + # def greet(name): + # return "Hello " + name + "!" + + # gradio_app = gr.Interface( + # fn=greet, + # inputs=gr.Textbox(lines=2, placeholder="Name Here..."), + # outputs="text", + # ) + + # app = FastAPI() + # app = gr.mount_gradio_app(app, gradio_app, path="/test/gradio") + # config = uvicorn.Config( + # app=app, + # port=8000, + # log_level="info", + # ) + # server = Server(config=config) + # # Using the gradio Server class to not have + # # to implement code again to run uvicorn in a separate thread + # # However, that means we need to set this flag to prevent + # # run_in_thread_from_blocking + # server.started = True + # try: + # server.run_in_thread() + # time.sleep(1) + # client = Client("http://127.0.0.1:8000/test/gradio/") + # assert client.predict("freddy") == "Hello freddy!" + # finally: + # server.thread.join(timeout=1) @pytest.mark.flaky def test_predict_with_space_with_api_name_false(self): @@ -1020,6 +1017,7 @@ def test_state_does_not_appear(self, state_demo): def test_private_space(self): client = Client( "gradio-tests/not-actually-private-space", + hf_token=HF_TOKEN, ) assert len(client.endpoints) == 3 assert len([e for e in client.endpoints.values() if e.is_valid]) == 2 @@ -1132,29 +1130,18 @@ def test_file_io(self, file_io_demo): outputs = info["named_endpoints"]["/predict"]["returns"] assert inputs[0]["type"]["type"] == "array" - assert inputs[0]["python_type"] == { - "type": "List[filepath]", - "description": "", - } + assert inputs[0]["python_type"]["type"] == "List[filepath]" + assert isinstance(inputs[0]["example_input"], list) assert isinstance(inputs[0]["example_input"][0], dict) - assert inputs[1]["python_type"] == { - "type": "filepath", - "description": "", - } + assert inputs[1]["python_type"]["type"] == "filepath" assert isinstance(inputs[1]["example_input"], dict) - assert outputs[0]["python_type"] == { - "type": "List[filepath]", - "description": "", - } + assert outputs[0]["python_type"]["type"] == "List[filepath]" assert outputs[0]["type"]["type"] == "array" - assert outputs[1]["python_type"] == { - "type": "filepath", - "description": "", - } + assert outputs[1]["python_type"]["type"] == "filepath" def test_layout_components_in_output(self, hello_world_with_group): with connect(hello_world_with_group) as client: @@ -1255,6 +1242,7 @@ class TestEndpoints: def test_upload(self): client = Client( src="gradio-tests/not-actually-private-file-upload", + hf_token=HF_TOKEN, ) response = MagicMock(status_code=200) response.json.return_value = [ @@ -1398,21 +1386,6 @@ def test_add_secrets(self, mock_time, mock_init, mock_duplicate, mock_add_secret ) -def test_upstream_exceptions(count_generator_demo_exception): - with connect(count_generator_demo_exception, show_error=True) as client: - with pytest.raises( - AppError, match="The upstream Gradio app has raised an exception: Oh no!" - ): - client.predict(7, api_name="/count") - - with connect(count_generator_demo_exception) as client: - with pytest.raises( - AppError, - match="The upstream Gradio app has raised an exception but has not enabled verbose error reporting.", - ): - client.predict(7, api_name="/count") - - def test_httpx_kwargs(increment_demo): with connect( increment_demo, client_kwargs={"httpx_kwargs": {"timeout": 5}} diff --git a/client/python/test/test_utils.py b/client/python/test/test_utils.py index c8f3fd6967cc4..68272611cfc1a 100644 --- a/client/python/test/test_utils.py +++ b/client/python/test/test_utils.py @@ -71,6 +71,21 @@ def test_decode_base64_to_file(): assert isinstance(temp_file, tempfile._TemporaryFileWrapper) +@pytest.mark.parametrize( + "path_or_url, file_types, expected_result", + [ + ("/home/user/documents/example.pdf", [".json", "text", ".mp3", ".pdf"], True), + ("C:\\Users\\user\\documents\\example.png", [".png"], True), + ("C:\\Users\\user\\documents\\example.png", ["image"], True), + ("C:\\Users\\user\\documents\\example.png", ["file"], True), + ("/home/user/documents/example.pdf", [".json", "text", ".mp3"], False), + ("https://example.com/avatar/xxxx.mp4", ["audio", ".png", ".jpg"], False), + ], +) +def test_is_valid_file_type(path_or_url, file_types, expected_result): + assert utils.is_valid_file(path_or_url, file_types) is expected_result + + @pytest.mark.parametrize( "orig_filename, new_filename", [ diff --git a/demo/all_demos/run.py b/demo/all_demos/run.py index 11ba2b45209e6..4544255288b67 100644 --- a/demo/all_demos/run.py +++ b/demo/all_demos/run.py @@ -19,11 +19,20 @@ names = sorted(os.listdir("./demos")) + @app.get("/") def index(request: Request): names = [[p[0], p[2]] for p in all_demos] - return templates.TemplateResponse("index.html", {"request": request, "names": names, - "initial_demo": names[0][0], "is_space": get_space()}) + return templates.TemplateResponse( + "index.html", + { + "request": request, + "names": names, + "initial_demo": names[0][0], + "is_space": get_space(), + }, + ) + all_demos = [] demo_module = None diff --git a/demo/annotatedimage_component/requirements.txt b/demo/annotatedimage_component/requirements.txt new file mode 100644 index 0000000000000..6dd520b61558d --- /dev/null +++ b/demo/annotatedimage_component/requirements.txt @@ -0,0 +1,3 @@ +numpy +requests +Pillow diff --git a/demo/annotatedimage_component/run.ipynb b/demo/annotatedimage_component/run.ipynb index 3095d3d05ab09..9f03b026ac61e 100644 --- a/demo/annotatedimage_component/run.ipynb +++ b/demo/annotatedimage_component/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: annotatedimage_component"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import pathlib\n", "from PIL import Image\n", "import numpy as np\n", "import urllib.request\n", "\n", "source_dir = pathlib.Path(__file__).parent\n", "\n", "urllib.request.urlretrieve(\n", " 'https://gradio-builds.s3.amazonaws.com/demo-files/base.png',\n", " str(source_dir / \"base.png\")\n", ")\n", "urllib.request.urlretrieve(\n", " \"https://gradio-builds.s3.amazonaws.com/demo-files/buildings.png\",\n", " str(source_dir / \"buildings.png\")\n", ")\n", "\n", "base_image = Image.open(str(source_dir / \"base.png\"))\n", "building_image = Image.open(str(source_dir / \"buildings.png\"))\n", "\n", "# Create segmentation mask\n", "building_image = np.asarray(building_image)[:, :, -1] > 0\n", "\n", "with gr.Blocks() as demo:\n", " gr.AnnotatedImage(\n", " value=(base_image, [(building_image, \"buildings\")]),\n", " height=500,\n", " )\n", "\n", "demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: annotatedimage_component"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy requests Pillow "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import numpy as np \n", "import requests \n", "from io import BytesIO\n", "from PIL import Image\n", "\n", "base_image = \"https://gradio-docs-json.s3.us-west-2.amazonaws.com/base.png\"\n", "building_image = requests.get(\"https://gradio-docs-json.s3.us-west-2.amazonaws.com/buildings.png\")\n", "building_image = np.asarray(Image.open(BytesIO(building_image.content)))[:, :, -1] > 0\n", "\n", "with gr.Blocks() as demo:\n", " gr.AnnotatedImage(\n", " value=(base_image, [(building_image, \"buildings\")]),\n", " height=500,\n", " )\n", "\n", "demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/annotatedimage_component/run.py b/demo/annotatedimage_component/run.py index 517d1f4c62642..ad3a8784be397 100644 --- a/demo/annotatedimage_component/run.py +++ b/demo/annotatedimage_component/run.py @@ -1,25 +1,12 @@ import gradio as gr -import pathlib +import numpy as np +import requests +from io import BytesIO from PIL import Image -import numpy as np -import urllib.request -source_dir = pathlib.Path(__file__).parent - -urllib.request.urlretrieve( - 'https://gradio-builds.s3.amazonaws.com/demo-files/base.png', - str(source_dir / "base.png") -) -urllib.request.urlretrieve( - "https://gradio-builds.s3.amazonaws.com/demo-files/buildings.png", - str(source_dir / "buildings.png") -) - -base_image = Image.open(str(source_dir / "base.png")) -building_image = Image.open(str(source_dir / "buildings.png")) - -# Create segmentation mask -building_image = np.asarray(building_image)[:, :, -1] > 0 +base_image = "https://gradio-docs-json.s3.us-west-2.amazonaws.com/base.png" +building_image = requests.get("https://gradio-docs-json.s3.us-west-2.amazonaws.com/buildings.png") +building_image = np.asarray(Image.open(BytesIO(building_image.content)))[:, :, -1] > 0 with gr.Blocks() as demo: gr.AnnotatedImage( @@ -27,4 +14,4 @@ height=500, ) -demo.launch() +demo.launch() \ No newline at end of file diff --git a/demo/bar_plot_demo/requirements.txt b/demo/bar_plot_demo/requirements.txt new file mode 100644 index 0000000000000..fb6c7ed7ec60d --- /dev/null +++ b/demo/bar_plot_demo/requirements.txt @@ -0,0 +1 @@ +pandas diff --git a/demo/bar_plot_demo/run.ipynb b/demo/bar_plot_demo/run.ipynb index 0131a915ea050..9449c062a2ec5 100644 --- a/demo/bar_plot_demo/run.ipynb +++ b/demo/bar_plot_demo/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: bar_plot_demo"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import pandas as pd\n", "from random import randint, random\n", "import gradio as gr\n", "\n", "\n", "temp_sensor_data = pd.DataFrame(\n", " {\n", " \"time\": pd.date_range(\"2021-01-01\", end=\"2021-01-05\", periods=200),\n", " \"temperature\": [randint(50 + 10 * (i % 2), 65 + 15 * (i % 2)) for i in range(200)],\n", " \"humidity\": [randint(50 + 10 * (i % 2), 65 + 15 * (i % 2)) for i in range(200)],\n", " \"location\": [\"indoor\", \"outdoor\"] * 100,\n", " }\n", ")\n", "\n", "food_rating_data = pd.DataFrame(\n", " {\n", " \"cuisine\": [[\"Italian\", \"Mexican\", \"Chinese\"][i % 3] for i in range(100)],\n", " \"rating\": [random() * 4 + 0.5 * (i % 3) for i in range(100)],\n", " \"price\": [randint(10, 50) + 4 * (i % 3) for i in range(100)],\n", " \"wait\": [random() for i in range(100)],\n", " }\n", ")\n", "\n", "with gr.Blocks() as bar_plots:\n", " with gr.Row():\n", " start = gr.DateTime(\"2021-01-01 00:00:00\", label=\"Start\")\n", " end = gr.DateTime(\"2021-01-05 00:00:00\", label=\"End\")\n", " apply_btn = gr.Button(\"Apply\", scale=0)\n", " with gr.Row():\n", " group_by = gr.Radio([\"None\", \"30m\", \"1h\", \"4h\", \"1d\"], value=\"None\", label=\"Group by\")\n", " aggregate = gr.Radio([\"sum\", \"mean\", \"median\", \"min\", \"max\"], value=\"sum\", label=\"Aggregation\")\n", "\n", " temp_by_time = gr.BarPlot(\n", " temp_sensor_data,\n", " x=\"time\",\n", " y=\"temperature\",\n", " )\n", " temp_by_time_location = gr.BarPlot(\n", " temp_sensor_data,\n", " x=\"time\",\n", " y=\"temperature\",\n", " color=\"location\",\n", " )\n", "\n", " time_graphs = [temp_by_time, temp_by_time_location]\n", " group_by.change(\n", " lambda group: [gr.BarPlot(x_bin=None if group == \"None\" else group)] * len(time_graphs),\n", " group_by,\n", " time_graphs\n", " )\n", " aggregate.change(\n", " lambda aggregate: [gr.BarPlot(y_aggregate=aggregate)] * len(time_graphs),\n", " aggregate,\n", " time_graphs\n", " )\n", "\n", " def rescale(select: gr.SelectData):\n", " return select.index\n", " rescale_evt = gr.on([plot.select for plot in time_graphs], rescale, None, [start, end])\n", "\n", " for trigger in [apply_btn.click, rescale_evt.then]:\n", " trigger(\n", " lambda start, end: [gr.BarPlot(x_lim=[start, end])] * len(time_graphs), [start, end], time_graphs\n", " )\n", "\n", " with gr.Row():\n", " price_by_cuisine = gr.BarPlot(\n", " food_rating_data,\n", " x=\"cuisine\",\n", " y=\"price\",\n", " )\n", " with gr.Column(scale=0):\n", " gr.Button(\"Sort $ > $$$\").click(lambda: gr.BarPlot(sort=\"y\"), None, price_by_cuisine)\n", " gr.Button(\"Sort $$$ > $\").click(lambda: gr.BarPlot(sort=\"-y\"), None, price_by_cuisine)\n", " gr.Button(\"Sort A > Z\").click(lambda: gr.BarPlot(sort=[\"Chinese\", \"Italian\", \"Mexican\"]), None, price_by_cuisine)\n", "\n", " with gr.Row():\n", " price_by_rating = gr.BarPlot(\n", " food_rating_data,\n", " x=\"rating\",\n", " y=\"price\",\n", " x_bin=1,\n", " )\n", " price_by_rating_color = gr.BarPlot(\n", " food_rating_data,\n", " x=\"rating\",\n", " y=\"price\",\n", " color=\"cuisine\",\n", " x_bin=1,\n", " color_map={\"Italian\": \"red\", \"Mexican\": \"green\", \"Chinese\": \"blue\"},\n", " )\n", "\n", "if __name__ == \"__main__\":\n", " bar_plots.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: bar_plot_demo"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio pandas "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import pandas as pd\n", "from random import randint, random\n", "import gradio as gr\n", "\n", "\n", "temp_sensor_data = pd.DataFrame(\n", " {\n", " \"time\": pd.date_range(\"2021-01-01\", end=\"2021-01-05\", periods=200),\n", " \"temperature\": [randint(50 + 10 * (i % 2), 65 + 15 * (i % 2)) for i in range(200)],\n", " \"humidity\": [randint(50 + 10 * (i % 2), 65 + 15 * (i % 2)) for i in range(200)],\n", " \"location\": [\"indoor\", \"outdoor\"] * 100,\n", " }\n", ")\n", "\n", "food_rating_data = pd.DataFrame(\n", " {\n", " \"cuisine\": [[\"Italian\", \"Mexican\", \"Chinese\"][i % 3] for i in range(100)],\n", " \"rating\": [random() * 4 + 0.5 * (i % 3) for i in range(100)],\n", " \"price\": [randint(10, 50) + 4 * (i % 3) for i in range(100)],\n", " \"wait\": [random() for i in range(100)],\n", " }\n", ")\n", "\n", "with gr.Blocks() as bar_plots:\n", " with gr.Row():\n", " start = gr.DateTime(\"2021-01-01 00:00:00\", label=\"Start\")\n", " end = gr.DateTime(\"2021-01-05 00:00:00\", label=\"End\")\n", " apply_btn = gr.Button(\"Apply\", scale=0)\n", " with gr.Row():\n", " group_by = gr.Radio([\"None\", \"30m\", \"1h\", \"4h\", \"1d\"], value=\"None\", label=\"Group by\")\n", " aggregate = gr.Radio([\"sum\", \"mean\", \"median\", \"min\", \"max\"], value=\"sum\", label=\"Aggregation\")\n", "\n", " temp_by_time = gr.BarPlot(\n", " temp_sensor_data,\n", " x=\"time\",\n", " y=\"temperature\",\n", " )\n", " temp_by_time_location = gr.BarPlot(\n", " temp_sensor_data,\n", " x=\"time\",\n", " y=\"temperature\",\n", " color=\"location\",\n", " )\n", "\n", " time_graphs = [temp_by_time, temp_by_time_location]\n", " group_by.change(\n", " lambda group: [gr.BarPlot(x_bin=None if group == \"None\" else group)] * len(time_graphs),\n", " group_by,\n", " time_graphs\n", " )\n", " aggregate.change(\n", " lambda aggregate: [gr.BarPlot(y_aggregate=aggregate)] * len(time_graphs),\n", " aggregate,\n", " time_graphs\n", " )\n", "\n", " def rescale(select: gr.SelectData):\n", " return select.index\n", " rescale_evt = gr.on([plot.select for plot in time_graphs], rescale, None, [start, end])\n", "\n", " for trigger in [apply_btn.click, rescale_evt.then]:\n", " trigger(\n", " lambda start, end: [gr.BarPlot(x_lim=[start, end])] * len(time_graphs), [start, end], time_graphs\n", " )\n", "\n", " with gr.Row():\n", " price_by_cuisine = gr.BarPlot(\n", " food_rating_data,\n", " x=\"cuisine\",\n", " y=\"price\",\n", " )\n", " with gr.Column(scale=0):\n", " gr.Button(\"Sort $ > $$$\").click(lambda: gr.BarPlot(sort=\"y\"), None, price_by_cuisine)\n", " gr.Button(\"Sort $$$ > $\").click(lambda: gr.BarPlot(sort=\"-y\"), None, price_by_cuisine)\n", " gr.Button(\"Sort A > Z\").click(lambda: gr.BarPlot(sort=[\"Chinese\", \"Italian\", \"Mexican\"]), None, price_by_cuisine)\n", "\n", " with gr.Row():\n", " price_by_rating = gr.BarPlot(\n", " food_rating_data,\n", " x=\"rating\",\n", " y=\"price\",\n", " x_bin=1,\n", " )\n", " price_by_rating_color = gr.BarPlot(\n", " food_rating_data,\n", " x=\"rating\",\n", " y=\"price\",\n", " color=\"cuisine\",\n", " x_bin=1,\n", " color_map={\"Italian\": \"red\", \"Mexican\": \"green\", \"Chinese\": \"blue\"},\n", " )\n", "\n", "if __name__ == \"__main__\":\n", " bar_plots.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/barplot_component/requirements.txt b/demo/barplot_component/requirements.txt new file mode 100644 index 0000000000000..fb6c7ed7ec60d --- /dev/null +++ b/demo/barplot_component/requirements.txt @@ -0,0 +1 @@ +pandas diff --git a/demo/barplot_component/run.ipynb b/demo/barplot_component/run.ipynb index 309aef4a438eb..01a8109e0544d 100644 --- a/demo/barplot_component/run.ipynb +++ b/demo/barplot_component/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: barplot_component"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import pandas as pd\n", "\n", "simple = pd.DataFrame(\n", " {\n", " \"item\": [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\"],\n", " \"inventory\": [28, 55, 43, 91, 81, 53, 19, 87, 52],\n", " }\n", ")\n", "\n", "with gr.Blocks() as demo:\n", " gr.BarPlot(\n", " value=simple,\n", " x=\"item\",\n", " y=\"inventory\",\n", " title=\"Simple Bar Plot\",\n", " container=False,\n", " )\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: barplot_component"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio pandas "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import pandas as pd\n", "\n", "simple = pd.DataFrame(\n", " {\n", " \"item\": [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\"],\n", " \"inventory\": [28, 55, 43, 91, 81, 53, 19, 87, 52],\n", " }\n", ")\n", "\n", "with gr.Blocks() as demo:\n", " gr.BarPlot(\n", " value=simple,\n", " x=\"item\",\n", " y=\"inventory\",\n", " title=\"Simple Bar Plot\",\n", " container=False,\n", " )\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/blocks_essay/run.ipynb b/demo/blocks_essay/run.ipynb index 2d42d896083c1..8878ec76a8de8 100644 --- a/demo/blocks_essay/run.ipynb +++ b/demo/blocks_essay/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: blocks_essay"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "countries_cities_dict = {\n", " \"USA\": [\"New York\", \"Los Angeles\", \"Chicago\"],\n", " \"Canada\": [\"Toronto\", \"Montreal\", \"Vancouver\"],\n", " \"Pakistan\": [\"Karachi\", \"Lahore\", \"Islamabad\"],\n", "}\n", "\n", "def change_textbox(choice):\n", " if choice == \"short\":\n", " return gr.Textbox(lines=2, visible=True), gr.Button(interactive=True)\n", " elif choice == \"long\":\n", " return gr.Textbox(lines=8, visible=True, value=\"Lorem ipsum dolor sit amet\"), gr.Button(interactive=True)\n", " else:\n", " return gr.Textbox(visible=False), gr.Button(interactive=False)\n", "\n", "with gr.Blocks() as demo:\n", " radio = gr.Radio(\n", " [\"short\", \"long\", \"none\"], label=\"What kind of essay would you like to write?\"\n", " )\n", " text = gr.Textbox(lines=2, interactive=True, show_copy_button=True)\n", "\n", " with gr.Row():\n", " num = gr.Number(minimum=0, maximum=100, label=\"input\")\n", " out = gr.Number(label=\"output\")\n", " minimum_slider = gr.Slider(0, 100, 0, label=\"min\")\n", " maximum_slider = gr.Slider(0, 100, 100, label=\"max\")\n", " submit_btn = gr.Button(\"Submit\", variant=\"primary\")\n", "\n", " with gr.Row():\n", " country = gr.Dropdown(list(countries_cities_dict.keys()), label=\"Country\")\n", " cities = gr.Dropdown([], label=\"Cities\")\n", "\n", " @country.change(inputs=country, outputs=cities)\n", " def update_cities(country):\n", " cities = list(countries_cities_dict[country])\n", " return gr.Dropdown(choices=cities, value=cities[0], interactive=True)\n", "\n", " def reset_bounds(minimum, maximum):\n", " return gr.Number(minimum=minimum, maximum=maximum)\n", "\n", " radio.change(fn=change_textbox, inputs=radio, outputs=[text, submit_btn])\n", " gr.on(\n", " [minimum_slider.change, maximum_slider.change],\n", " reset_bounds,\n", " [minimum_slider, maximum_slider],\n", " outputs=num,\n", " )\n", " num.submit(lambda x: x, num, out)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: blocks_essay"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "countries_cities_dict = {\n", " \"USA\": [\"New York\", \"Los Angeles\", \"Chicago\"],\n", " \"Canada\": [\"Toronto\", \"Montreal\", \"Vancouver\"],\n", " \"Pakistan\": [\"Karachi\", \"Lahore\", \"Islamabad\"],\n", "}\n", "\n", "def change_textbox(choice):\n", " if choice == \"short\":\n", " return gr.Textbox(lines=2, visible=True), gr.Button(interactive=True)\n", " elif choice == \"long\":\n", " return gr.Textbox(lines=8, visible=True, value=\"Lorem ipsum dolor sit amet\"), gr.Button(interactive=True)\n", " else:\n", " return gr.Textbox(visible=False), gr.Button(interactive=False)\n", "\n", "with gr.Blocks() as demo:\n", " radio = gr.Radio(\n", " [\"short\", \"long\", \"none\"], label=\"What kind of essay would you like to write?\"\n", " )\n", " text = gr.Textbox(lines=2, interactive=True, show_copy_button=True)\n", "\n", " with gr.Row():\n", " num = gr.Number(minimum=0, maximum=100, label=\"input\")\n", " out = gr.Number(label=\"output\")\n", " minimum_slider = gr.Slider(0, 100, 0, label=\"min\")\n", " maximum_slider = gr.Slider(0, 100, 100, label=\"max\")\n", " submit_btn = gr.Button(\"Submit\", variant=\"primary\")\n", "\n", " with gr.Row():\n", " country = gr.Dropdown(list(countries_cities_dict.keys()), label=\"Country\")\n", " cities = gr.Dropdown([], label=\"Cities\")\n", " @country.change(inputs=country, outputs=cities)\n", " def update_cities(country):\n", " cities = list(countries_cities_dict[country])\n", " return gr.Dropdown(choices=cities, value=cities[0], interactive=True)\n", "\n", " def reset_bounds(minimum, maximum):\n", " return gr.Number(minimum=minimum, maximum=maximum)\n", "\n", " radio.change(fn=change_textbox, inputs=radio, outputs=[text, submit_btn])\n", " gr.on(\n", " [minimum_slider.change, maximum_slider.change],\n", " reset_bounds,\n", " [minimum_slider, maximum_slider],\n", " outputs=num,\n", " )\n", " num.submit(lambda x: x, num, out)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/blocks_essay/run.py b/demo/blocks_essay/run.py index b30933094cbbd..f06f10a88f7fd 100644 --- a/demo/blocks_essay/run.py +++ b/demo/blocks_essay/run.py @@ -30,7 +30,6 @@ def change_textbox(choice): with gr.Row(): country = gr.Dropdown(list(countries_cities_dict.keys()), label="Country") cities = gr.Dropdown([], label="Cities") - @country.change(inputs=country, outputs=cities) def update_cities(country): cities = list(countries_cities_dict[country]) diff --git a/demo/blocks_flipper/requirements.txt b/demo/blocks_flipper/requirements.txt new file mode 100644 index 0000000000000..24ce15ab7ead3 --- /dev/null +++ b/demo/blocks_flipper/requirements.txt @@ -0,0 +1 @@ +numpy diff --git a/demo/blocks_flipper/run.ipynb b/demo/blocks_flipper/run.ipynb index b8ca13a3ce607..124837422e228 100644 --- a/demo/blocks_flipper/run.ipynb +++ b/demo/blocks_flipper/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: blocks_flipper"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import numpy as np\n", "import gradio as gr\n", "\n", "def flip_text(x):\n", " return x[::-1]\n", "\n", "def flip_image(x):\n", " return np.fliplr(x)\n", "\n", "with gr.Blocks() as demo:\n", " gr.Markdown(\"Flip text or image files using this demo.\")\n", " with gr.Tab(\"Flip Text\"):\n", " text_input = gr.Textbox()\n", " text_output = gr.Textbox()\n", " text_button = gr.Button(\"Flip\")\n", " with gr.Tab(\"Flip Image\"):\n", " with gr.Row():\n", " image_input = gr.Image()\n", " image_output = gr.Image()\n", " image_button = gr.Button(\"Flip\")\n", "\n", " with gr.Accordion(\"Open for More!\", open=False):\n", " gr.Markdown(\"Look at me...\")\n", " temp_slider = gr.Slider(\n", " 0, 1,\n", " value=0.1,\n", " step=0.1,\n", " interactive=True,\n", " label=\"Slide me\",\n", " )\n", "\n", " text_button.click(flip_text, inputs=text_input, outputs=text_output)\n", " image_button.click(flip_image, inputs=image_input, outputs=image_output)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: blocks_flipper"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import numpy as np\n", "import gradio as gr\n", "\n", "def flip_text(x):\n", " return x[::-1]\n", "\n", "def flip_image(x):\n", " return np.fliplr(x)\n", "\n", "with gr.Blocks() as demo:\n", " gr.Markdown(\"Flip text or image files using this demo.\")\n", " with gr.Tab(\"Flip Text\"):\n", " text_input = gr.Textbox()\n", " text_output = gr.Textbox()\n", " text_button = gr.Button(\"Flip\")\n", " with gr.Tab(\"Flip Image\"):\n", " with gr.Row():\n", " image_input = gr.Image()\n", " image_output = gr.Image()\n", " image_button = gr.Button(\"Flip\")\n", "\n", " with gr.Accordion(\"Open for More!\", open=False):\n", " gr.Markdown(\"Look at me...\")\n", " temp_slider = gr.Slider(\n", " 0, 1,\n", " value=0.1,\n", " step=0.1,\n", " interactive=True,\n", " label=\"Slide me\",\n", " )\n", "\n", " text_button.click(flip_text, inputs=text_input, outputs=text_output)\n", " image_button.click(flip_image, inputs=image_input, outputs=image_output)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/blocks_kinematics/requirements.txt b/demo/blocks_kinematics/requirements.txt new file mode 100644 index 0000000000000..5da331cf67f41 --- /dev/null +++ b/demo/blocks_kinematics/requirements.txt @@ -0,0 +1,2 @@ +numpy +pandas diff --git a/demo/blocks_kinematics/run.ipynb b/demo/blocks_kinematics/run.ipynb index fd1818ea3357d..75bc851477c64 100644 --- a/demo/blocks_kinematics/run.ipynb +++ b/demo/blocks_kinematics/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: blocks_kinematics"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import pandas as pd\n", "import numpy as np\n", "\n", "import gradio as gr\n", "\n", "def plot(v, a):\n", " g = 9.81\n", " theta = a / 180 * 3.14\n", " tmax = ((2 * v) * np.sin(theta)) / g\n", " timemat = tmax * np.linspace(0, 1, 40)\n", "\n", " x = (v * timemat) * np.cos(theta)\n", " y = ((v * timemat) * np.sin(theta)) - ((0.5 * g) * (timemat**2))\n", " df = pd.DataFrame({\"x\": x, \"y\": y})\n", " return df\n", "\n", "demo = gr.Blocks()\n", "\n", "with demo:\n", " gr.Markdown(\n", " r\"Let's do some kinematics! Choose the speed and angle to see the trajectory. Remember that the range $R = v_0^2 \\cdot \\frac{\\sin(2\\theta)}{g}$\"\n", " )\n", "\n", " with gr.Row():\n", " speed = gr.Slider(1, 30, 25, label=\"Speed\")\n", " angle = gr.Slider(0, 90, 45, label=\"Angle\")\n", " output = gr.LinePlot(\n", " x=\"x\",\n", " y=\"y\",\n", " overlay_point=True,\n", " tooltip=[\"x\", \"y\"],\n", " x_lim=[0, 100],\n", " y_lim=[0, 60],\n", " width=350,\n", " height=300,\n", " )\n", " btn = gr.Button(value=\"Run\")\n", " btn.click(plot, [speed, angle], output)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: blocks_kinematics"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy pandas "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import pandas as pd\n", "import numpy as np\n", "\n", "import gradio as gr\n", "\n", "def plot(v, a):\n", " g = 9.81\n", " theta = a / 180 * 3.14\n", " tmax = ((2 * v) * np.sin(theta)) / g\n", " timemat = tmax * np.linspace(0, 1, 40)\n", "\n", " x = (v * timemat) * np.cos(theta)\n", " y = ((v * timemat) * np.sin(theta)) - ((0.5 * g) * (timemat**2))\n", " df = pd.DataFrame({\"x\": x, \"y\": y})\n", " return df\n", "\n", "demo = gr.Blocks()\n", "\n", "with demo:\n", " gr.Markdown(\n", " r\"Let's do some kinematics! Choose the speed and angle to see the trajectory. Remember that the range $R = v_0^2 \\cdot \\frac{\\sin(2\\theta)}{g}$\"\n", " )\n", "\n", " with gr.Row():\n", " speed = gr.Slider(1, 30, 25, label=\"Speed\")\n", " angle = gr.Slider(0, 90, 45, label=\"Angle\")\n", " output = gr.LinePlot(\n", " x=\"x\",\n", " y=\"y\",\n", " overlay_point=True,\n", " tooltip=[\"x\", \"y\"],\n", " x_lim=[0, 100],\n", " y_lim=[0, 60],\n", " width=350,\n", " height=300,\n", " )\n", " btn = gr.Button(value=\"Run\")\n", " btn.click(plot, [speed, angle], output)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/blocks_multiple_event_triggers/requirements.txt b/demo/blocks_multiple_event_triggers/requirements.txt index 5cb63bcc8c366..bbe82b5897e5e 100644 --- a/demo/blocks_multiple_event_triggers/requirements.txt +++ b/demo/blocks_multiple_event_triggers/requirements.txt @@ -1,2 +1,3 @@ plotly -pypistats \ No newline at end of file +pypistats +python-dateutil diff --git a/demo/blocks_multiple_event_triggers/run.ipynb b/demo/blocks_multiple_event_triggers/run.ipynb index daded4de20238..ec9efcc3599a9 100644 --- a/demo/blocks_multiple_event_triggers/run.ipynb +++ b/demo/blocks_multiple_event_triggers/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: blocks_multiple_event_triggers"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio plotly pypistats"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import pypistats\n", "from datetime import date\n", "from dateutil.relativedelta import relativedelta\n", "import pandas as pd\n", "\n", "def get_plot(lib, time):\n", " data = pypistats.overall(lib, total=True, format=\"pandas\")\n", " data = data.groupby(\"category\").get_group(\"with_mirrors\").sort_values(\"date\")\n", " start_date = date.today() - relativedelta(months=int(time.split(\" \")[0]))\n", " data = data[(data['date'] > str(start_date))]\n", " data.date = pd.to_datetime(pd.to_datetime(data.date))\n", " return gr.LinePlot(value=data, x=\"date\", y=\"downloads\",\n", " tooltip=['date', 'downloads'],\n", " title=f\"Pypi downloads of {lib} over last {time}\",\n", " overlay_point=True,\n", " height=400,\n", " width=900)\n", "\n", "with gr.Blocks() as demo:\n", " gr.Markdown(\n", " \"\"\"\n", " ## Pypi Download Stats \ud83d\udcc8\n", " See live download stats for all of Hugging Face's open-source libraries \ud83e\udd17\n", " \"\"\")\n", " with gr.Row():\n", " lib = gr.Dropdown([\"transformers\", \"datasets\", \"huggingface-hub\", \"gradio\", \"accelerate\"],\n", " value=\"gradio\", label=\"Library\")\n", " time = gr.Dropdown([\"3 months\", \"6 months\", \"9 months\", \"12 months\"],\n", " value=\"3 months\", label=\"Downloads over the last...\")\n", "\n", " plt = gr.LinePlot()\n", " # You can add multiple event triggers in 2 lines like this\n", " for event in [lib.change, time.change, demo.load]:\n", " event(get_plot, [lib, time], [plt])\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: blocks_multiple_event_triggers"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio plotly pypistats python-dateutil "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import pypistats\n", "from datetime import date\n", "from dateutil.relativedelta import relativedelta\n", "import pandas as pd\n", "\n", "def get_plot(lib, time):\n", " data = pypistats.overall(lib, total=True, format=\"pandas\")\n", " data = data.groupby(\"category\").get_group(\"with_mirrors\").sort_values(\"date\")\n", " start_date = date.today() - relativedelta(months=int(time.split(\" \")[0]))\n", " data = data[(data['date'] > str(start_date))]\n", " data.date = pd.to_datetime(pd.to_datetime(data.date))\n", " return gr.LinePlot(value=data, x=\"date\", y=\"downloads\",\n", " tooltip=['date', 'downloads'],\n", " title=f\"Pypi downloads of {lib} over last {time}\",\n", " overlay_point=True,\n", " height=400,\n", " width=900)\n", "\n", "with gr.Blocks() as demo:\n", " gr.Markdown(\n", " \"\"\"\n", " ## Pypi Download Stats \ud83d\udcc8\n", " See live download stats for all of Hugging Face's open-source libraries \ud83e\udd17\n", " \"\"\")\n", " with gr.Row():\n", " lib = gr.Dropdown([\"transformers\", \"datasets\", \"huggingface-hub\", \"gradio\", \"accelerate\"],\n", " value=\"gradio\", label=\"Library\")\n", " time = gr.Dropdown([\"3 months\", \"6 months\", \"9 months\", \"12 months\"],\n", " value=\"3 months\", label=\"Downloads over the last...\")\n", "\n", " plt = gr.LinePlot()\n", " # You can add multiple event triggers in 2 lines like this\n", " for event in [lib.change, time.change, demo.load]:\n", " event(get_plot, [lib, time], [plt])\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/button_component/run.ipynb b/demo/button_component/run.ipynb index 438e15668df96..81f0218f5bc0b 100644 --- a/demo/button_component/run.ipynb +++ b/demo/button_component/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: button_component"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "with gr.Blocks() as demo:\n", " gr.Button()\n", "\n", "demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: button_component"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "icon = \"https://cdn.icon-icons.com/icons2/2620/PNG/512/among_us_player_red_icon_156942.png\"\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " gr.Button(variant=\"primary\")\n", " gr.Button(variant=\"secondary\")\n", " gr.Button(variant=\"stop\")\n", " with gr.Row():\n", " gr.Button(variant=\"primary\", size=\"sm\")\n", " gr.Button(variant=\"secondary\", size=\"sm\")\n", " gr.Button(variant=\"stop\", size=\"sm\")\n", " with gr.Row():\n", " gr.Button(variant=\"primary\", icon=icon)\n", " gr.Button(variant=\"secondary\", icon=icon)\n", " gr.Button(variant=\"stop\", icon=icon)\n", "\n", " with gr.Row():\n", " gr.Button(variant=\"primary\", size=\"sm\", icon=icon)\n", " gr.Button(variant=\"secondary\", size=\"sm\", icon=icon)\n", " gr.Button(variant=\"stop\", size=\"sm\", icon=icon)\n", "\n", " with gr.Row():\n", " gr.Button(variant=\"primary\", icon=icon, interactive=False)\n", " gr.Button(variant=\"secondary\", icon=icon, interactive=False)\n", " gr.Button(variant=\"stop\", icon=icon, interactive=False)\n", "\n", " with gr.Row():\n", " gr.Button(variant=\"primary\", size=\"sm\", icon=icon, interactive=False)\n", " gr.Button(variant=\"secondary\", size=\"sm\", icon=icon, interactive=False)\n", " gr.Button(variant=\"stop\", size=\"sm\", icon=icon, interactive=False)\n", "\n", " with gr.Row():\n", " gr.Button(variant=\"primary\", interactive=False)\n", " gr.Button(variant=\"secondary\", interactive=False)\n", " gr.Button(variant=\"stop\", interactive=False)\n", "\n", " with gr.Row():\n", " gr.Button(variant=\"primary\", size=\"sm\", interactive=False)\n", " gr.Button(variant=\"secondary\", size=\"sm\", interactive=False)\n", " gr.Button(variant=\"stop\", size=\"sm\", interactive=False)\n", "\n", " with gr.Group():\n", " gr.Button(variant=\"primary\")\n", " gr.Button(variant=\"primary\")\n", " gr.Button(variant=\"secondary\")\n", " gr.Button(variant=\"secondary\")\n", " gr.Button(variant=\"stop\")\n", " gr.Button(variant=\"stop\")\n", "\n", "\n", "demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/button_component/run.py b/demo/button_component/run.py index 9119c5c351d0c..88b0d2215517f 100644 --- a/demo/button_component/run.py +++ b/demo/button_component/run.py @@ -1,6 +1,52 @@ import gradio as gr +icon = "https://cdn.icon-icons.com/icons2/2620/PNG/512/among_us_player_red_icon_156942.png" with gr.Blocks() as demo: - gr.Button() + with gr.Row(): + gr.Button(variant="primary") + gr.Button(variant="secondary") + gr.Button(variant="stop") + with gr.Row(): + gr.Button(variant="primary", size="sm") + gr.Button(variant="secondary", size="sm") + gr.Button(variant="stop", size="sm") + with gr.Row(): + gr.Button(variant="primary", icon=icon) + gr.Button(variant="secondary", icon=icon) + gr.Button(variant="stop", icon=icon) + + with gr.Row(): + gr.Button(variant="primary", size="sm", icon=icon) + gr.Button(variant="secondary", size="sm", icon=icon) + gr.Button(variant="stop", size="sm", icon=icon) + + with gr.Row(): + gr.Button(variant="primary", icon=icon, interactive=False) + gr.Button(variant="secondary", icon=icon, interactive=False) + gr.Button(variant="stop", icon=icon, interactive=False) + + with gr.Row(): + gr.Button(variant="primary", size="sm", icon=icon, interactive=False) + gr.Button(variant="secondary", size="sm", icon=icon, interactive=False) + gr.Button(variant="stop", size="sm", icon=icon, interactive=False) + + with gr.Row(): + gr.Button(variant="primary", interactive=False) + gr.Button(variant="secondary", interactive=False) + gr.Button(variant="stop", interactive=False) + + with gr.Row(): + gr.Button(variant="primary", size="sm", interactive=False) + gr.Button(variant="secondary", size="sm", interactive=False) + gr.Button(variant="stop", size="sm", interactive=False) + + with gr.Group(): + gr.Button(variant="primary") + gr.Button(variant="primary") + gr.Button(variant="secondary") + gr.Button(variant="secondary") + gr.Button(variant="stop") + gr.Button(variant="stop") + demo.launch() diff --git a/demo/chatbot_core_components/requirements.txt b/demo/chatbot_core_components/requirements.txt new file mode 100644 index 0000000000000..2f15c45be7df6 --- /dev/null +++ b/demo/chatbot_core_components/requirements.txt @@ -0,0 +1,4 @@ +plotly +numpy +pandas +matplotlib diff --git a/demo/chatbot_core_components/run.ipynb b/demo/chatbot_core_components/run.ipynb index fdb3c13228677..89be6e1d5b74b 100644 --- a/demo/chatbot_core_components/run.ipynb +++ b/demo/chatbot_core_components/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatbot_core_components"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/audio.wav https://github.com/gradio-app/gradio/raw/main/demo/chatbot_core_components/files/audio.wav\n", "!wget -q -O files/avatar.png https://github.com/gradio-app/gradio/raw/main/demo/chatbot_core_components/files/avatar.png\n", "!wget -q -O files/sample.txt https://github.com/gradio-app/gradio/raw/main/demo/chatbot_core_components/files/sample.txt\n", "!wget -q -O files/world.mp4 https://github.com/gradio-app/gradio/raw/main/demo/chatbot_core_components/files/world.mp4"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "import plotly.express as px\n", "import random\n", "\n", "# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.\n", "\n", "txt = \"\"\"\n", "Absolutely! The mycorrhizal network, often referred to as the \"Wood Wide Web,\" is a symbiotic association between fungi and the roots of most plant species. Here\u2019s a deeper dive into how it works and its implications:\n", "\n", "### How It Works\n", "\n", "1. **Symbiosis**: Mycorrhizal fungi attach to plant roots, extending far into the soil. The plant provides the fungi with carbohydrates produced via photosynthesis. In return, the fungi help the plant absorb water and essential nutrients like phosphorus and nitrogen from the soil.\n", "\n", "2. **Network Formation**: The fungal hyphae (thread-like structures) connect individual plants, creating an extensive underground network. This network can link many plants together, sometimes spanning entire forests.\n", "\n", "3. **Communication**: Trees and plants use this network to communicate and share resources. For example, a tree under attack by pests can send chemical signals through the mycorrhizal network to warn neighboring trees. These trees can then produce defensive chemicals to prepare for the impending threat.\n", "\n", "### Benefits and Functions\n", "\n", "1. **Resource Sharing**: The network allows for the redistribution of resources among plants. For instance, a large, established tree might share excess nutrients and water with smaller, younger trees, promoting overall forest health.\n", "\n", "2. **Defense Mechanism**: The ability to share information about pests and diseases enhances the resilience of plant communities. This early warning system helps plants activate their defenses before they are directly affected.\n", "\n", "3. **Support for Seedlings**: Young seedlings, which have limited root systems, benefit immensely from the mycorrhizal network. They receive nutrients and water from larger plants, increasing their chances of survival and growth.\n", "\n", "### Ecological Impact\n", "\n", "1. **Biodiversity**: The mycorrhizal network supports biodiversity by fostering a cooperative environment. Plants of different species can coexist and thrive because of the shared resources and information.\n", "\n", "2. **Forest Health**: The network enhances the overall health of forests. By enabling efficient nutrient cycling and supporting plant defenses, it contributes to the stability and longevity of forest ecosystems.\n", "\n", "3. **Climate Change Mitigation**: Healthy forests act as significant carbon sinks, absorbing carbon dioxide from the atmosphere. The mycorrhizal network plays a critical role in maintaining forest health and, consequently, in mitigating climate change.\n", "\n", "### Research and Discoveries\n", "\n", "1. **Suzanne Simard's Work**: Ecologist Suzanne Simard\u2019s research has been pivotal in uncovering the complexities of the mycorrhizal network. She demonstrated that trees of different species can share resources and that \"mother trees\" (large, older trees) play a crucial role in nurturing younger plants.\n", "\n", "2. **Implications for Conservation**: Understanding the mycorrhizal network has significant implications for conservation efforts. It highlights the importance of preserving not just individual trees but entire ecosystems, including the fungal networks that sustain them.\n", "\n", "### Practical Applications\n", "\n", "1. **Agriculture**: Farmers and horticulturists are exploring the use of mycorrhizal fungi to improve crop yields and soil health. By incorporating these fungi into agricultural practices, they can reduce the need for chemical fertilizers and enhance plant resilience.\n", "\n", "2. **Reforestation**: In reforestation projects, introducing mycorrhizal fungi can accelerate the recovery of degraded lands. The fungi help establish healthy plant communities, ensuring the success of newly planted trees.\n", "\n", "The \"Wood Wide Web\" exemplifies the intricate and often hidden connections that sustain life on Earth. It\u2019s a reminder of the profound interdependence within natural systems and the importance of preserving these delicate relationships.\n", "\"\"\"\n", "\n", "def random_plot():\n", " df = px.data.iris()\n", " fig = px.scatter(\n", " df,\n", " x=\"sepal_width\",\n", " y=\"sepal_length\",\n", " color=\"species\",\n", " size=\"petal_length\",\n", " hover_data=[\"petal_width\"],\n", " )\n", " return fig\n", "\n", "color_map = {\n", " \"harmful\": \"crimson\",\n", " \"neutral\": \"gray\",\n", " \"beneficial\": \"green\",\n", "}\n", "\n", "def html_src(harm_level):\n", " return f\"\"\"\n", "
\n", "
\n", " {harm_level}\n", "
\n", "
\n", "\"\"\"\n", "\n", "def print_like_dislike(x: gr.LikeData):\n", " print(x.index, x.value, x.liked)\n", "\n", "def random_bokeh_plot():\n", " from bokeh.models import ColumnDataSource, Whisker\n", " from bokeh.plotting import figure\n", " from bokeh.sampledata.autompg2 import autompg2 as df\n", " from bokeh.transform import factor_cmap, jitter\n", "\n", " classes = sorted(df[\"class\"].unique())\n", "\n", " p = figure(\n", " height=400,\n", " x_range=classes,\n", " background_fill_color=\"#efefef\",\n", " title=\"Car class vs HWY mpg with quintile ranges\",\n", " )\n", " p.xgrid.grid_line_color = None\n", "\n", " g = df.groupby(\"class\")\n", " upper = g.hwy.quantile(0.80)\n", " lower = g.hwy.quantile(0.20)\n", " source = ColumnDataSource(data=dict(base=classes, upper=upper, lower=lower))\n", "\n", " error = Whisker(\n", " base=\"base\",\n", " upper=\"upper\",\n", " lower=\"lower\",\n", " source=source,\n", " level=\"annotation\",\n", " line_width=2,\n", " )\n", " error.upper_head.size = 20\n", " error.lower_head.size = 20\n", " p.add_layout(error)\n", "\n", " p.circle(\n", " jitter(\"class\", 0.3, range=p.x_range),\n", " \"hwy\",\n", " source=df,\n", " alpha=0.5,\n", " size=13,\n", " line_color=\"white\",\n", " color=factor_cmap(\"class\", \"Light6\", classes),\n", " )\n", " return p\n", "\n", "def random_matplotlib_plot():\n", " import numpy as np\n", " import pandas as pd\n", " import matplotlib.pyplot as plt\n", "\n", " countries = [\"USA\", \"Canada\", \"Mexico\", \"UK\"]\n", " months = [\"January\", \"February\", \"March\", \"April\", \"May\"]\n", " m = months.index(\"January\")\n", " r = 3.2\n", " start_day = 30 * m\n", " final_day = 30 * (m + 1)\n", " x = np.arange(start_day, final_day + 1)\n", " pop_count = {\"USA\": 350, \"Canada\": 40, \"Mexico\": 300, \"UK\": 120}\n", " df = pd.DataFrame({\"day\": x})\n", " for country in countries:\n", " df[country] = x ** (r) * (pop_count[country] + 1)\n", "\n", " fig = plt.figure()\n", " plt.plot(df[\"day\"], df[countries].to_numpy())\n", " plt.title(\"Outbreak in \" + \"January\")\n", " plt.ylabel(\"Cases\")\n", " plt.xlabel(\"Days since Day 0\")\n", " plt.legend(countries)\n", " return fig\n", "\n", "def add_message(history, message):\n", " for x in message[\"files\"]:\n", " history.append(((x,), None))\n", " if message[\"text\"] is not None:\n", " history.append((message[\"text\"], None))\n", " return history, gr.MultimodalTextbox(value=None, interactive=False)\n", "\n", "def bot(history, response_type):\n", " if response_type == \"plot\":\n", " history[-1][1] = gr.Plot(random_plot())\n", " elif response_type == \"bokeh_plot\":\n", " history[-1][1] = gr.Plot(random_bokeh_plot())\n", " elif response_type == \"matplotlib_plot\":\n", " history[-1][1] = gr.Plot(random_matplotlib_plot())\n", " elif response_type == \"gallery\":\n", " history[-1][1] = gr.Gallery(\n", " [os.path.join(\"files\", \"avatar.png\"), os.path.join(\"files\", \"avatar.png\")]\n", " )\n", " elif response_type == \"image\":\n", " history[-1][1] = gr.Image(os.path.join(\"files\", \"avatar.png\"))\n", " elif response_type == \"video\":\n", " history[-1][1] = gr.Video(os.path.join(\"files\", \"world.mp4\"))\n", " elif response_type == \"audio\":\n", " history[-1][1] = gr.Audio(os.path.join(\"files\", \"audio.wav\"))\n", " elif response_type == \"audio_file\":\n", " history[-1][1] = (os.path.join(\"files\", \"audio.wav\"), \"description\")\n", " elif response_type == \"image_file\":\n", " history[-1][1] = (os.path.join(\"files\", \"avatar.png\"), \"description\")\n", " elif response_type == \"video_file\":\n", " history[-1][1] = (os.path.join(\"files\", \"world.mp4\"), \"description\")\n", " elif response_type == \"txt_file\":\n", " history[-1][1] = (os.path.join(\"files\", \"sample.txt\"), \"description\")\n", " elif response_type == \"html\":\n", " history[-1][1] = gr.HTML(\n", " html_src(random.choice([\"harmful\", \"neutral\", \"beneficial\"]))\n", " )\n", " else:\n", " history[-1][1] = txt\n", " return history\n", "\n", "fig = random_plot()\n", "\n", "with gr.Blocks(fill_height=True) as demo:\n", " chatbot = gr.Chatbot(\n", " elem_id=\"chatbot\",\n", " bubble_full_width=False,\n", " scale=1,\n", " show_copy_button=True,\n", " avatar_images=(\n", " None, # os.path.join(\"files\", \"avatar.png\"),\n", " os.path.join(\"files\", \"avatar.png\"),\n", " ),\n", " # layout=\"panel\",\n", " )\n", " response_type = gr.Radio(\n", " [\n", " \"audio_file\",\n", " \"image_file\",\n", " \"video_file\",\n", " \"txt_file\",\n", " \"plot\",\n", " \"matplotlib_plot\",\n", " \"bokeh_plot\",\n", " \"image\",\n", " \"text\",\n", " \"gallery\",\n", " \"video\",\n", " \"audio\",\n", " \"html\",\n", " ],\n", " value=\"text\",\n", " label=\"Response Type\",\n", " )\n", "\n", " chat_input = gr.MultimodalTextbox(\n", " interactive=True,\n", " placeholder=\"Enter message or upload file...\",\n", " show_label=False,\n", " )\n", "\n", " chat_msg = chat_input.submit(\n", " add_message, [chatbot, chat_input], [chatbot, chat_input]\n", " )\n", " bot_msg = chat_msg.then(\n", " bot, [chatbot, response_type], chatbot, api_name=\"bot_response\"\n", " )\n", " bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])\n", "\n", " chatbot.like(print_like_dislike, None, None)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatbot_core_components"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio plotly numpy pandas matplotlib "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/audio.wav https://github.com/gradio-app/gradio/raw/main/demo/chatbot_core_components/files/audio.wav\n", "!wget -q -O files/avatar.png https://github.com/gradio-app/gradio/raw/main/demo/chatbot_core_components/files/avatar.png\n", "!wget -q -O files/sample.txt https://github.com/gradio-app/gradio/raw/main/demo/chatbot_core_components/files/sample.txt\n", "!wget -q -O files/world.mp4 https://github.com/gradio-app/gradio/raw/main/demo/chatbot_core_components/files/world.mp4"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "import plotly.express as px\n", "import random\n", "\n", "# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.\n", "\n", "txt = \"\"\"\n", "Absolutely! The mycorrhizal network, often referred to as the \"Wood Wide Web,\" is a symbiotic association between fungi and the roots of most plant species. Here\u2019s a deeper dive into how it works and its implications:\n", "\n", "### How It Works\n", "\n", "1. **Symbiosis**: Mycorrhizal fungi attach to plant roots, extending far into the soil. The plant provides the fungi with carbohydrates produced via photosynthesis. In return, the fungi help the plant absorb water and essential nutrients like phosphorus and nitrogen from the soil.\n", "\n", "2. **Network Formation**: The fungal hyphae (thread-like structures) connect individual plants, creating an extensive underground network. This network can link many plants together, sometimes spanning entire forests.\n", "\n", "3. **Communication**: Trees and plants use this network to communicate and share resources. For example, a tree under attack by pests can send chemical signals through the mycorrhizal network to warn neighboring trees. These trees can then produce defensive chemicals to prepare for the impending threat.\n", "\n", "### Benefits and Functions\n", "\n", "1. **Resource Sharing**: The network allows for the redistribution of resources among plants. For instance, a large, established tree might share excess nutrients and water with smaller, younger trees, promoting overall forest health.\n", "\n", "2. **Defense Mechanism**: The ability to share information about pests and diseases enhances the resilience of plant communities. This early warning system helps plants activate their defenses before they are directly affected.\n", "\n", "3. **Support for Seedlings**: Young seedlings, which have limited root systems, benefit immensely from the mycorrhizal network. They receive nutrients and water from larger plants, increasing their chances of survival and growth.\n", "\n", "### Ecological Impact\n", "\n", "1. **Biodiversity**: The mycorrhizal network supports biodiversity by fostering a cooperative environment. Plants of different species can coexist and thrive because of the shared resources and information.\n", "\n", "2. **Forest Health**: The network enhances the overall health of forests. By enabling efficient nutrient cycling and supporting plant defenses, it contributes to the stability and longevity of forest ecosystems.\n", "\n", "3. **Climate Change Mitigation**: Healthy forests act as significant carbon sinks, absorbing carbon dioxide from the atmosphere. The mycorrhizal network plays a critical role in maintaining forest health and, consequently, in mitigating climate change.\n", "\n", "### Research and Discoveries\n", "\n", "1. **Suzanne Simard's Work**: Ecologist Suzanne Simard\u2019s research has been pivotal in uncovering the complexities of the mycorrhizal network. She demonstrated that trees of different species can share resources and that \"mother trees\" (large, older trees) play a crucial role in nurturing younger plants.\n", "\n", "2. **Implications for Conservation**: Understanding the mycorrhizal network has significant implications for conservation efforts. It highlights the importance of preserving not just individual trees but entire ecosystems, including the fungal networks that sustain them.\n", "\n", "### Practical Applications\n", "\n", "1. **Agriculture**: Farmers and horticulturists are exploring the use of mycorrhizal fungi to improve crop yields and soil health. By incorporating these fungi into agricultural practices, they can reduce the need for chemical fertilizers and enhance plant resilience.\n", "\n", "2. **Reforestation**: In reforestation projects, introducing mycorrhizal fungi can accelerate the recovery of degraded lands. The fungi help establish healthy plant communities, ensuring the success of newly planted trees.\n", "\n", "The \"Wood Wide Web\" exemplifies the intricate and often hidden connections that sustain life on Earth. It\u2019s a reminder of the profound interdependence within natural systems and the importance of preserving these delicate relationships.\n", "\"\"\"\n", "\n", "def random_plot():\n", " df = px.data.iris()\n", " fig = px.scatter(\n", " df,\n", " x=\"sepal_width\",\n", " y=\"sepal_length\",\n", " color=\"species\",\n", " size=\"petal_length\",\n", " hover_data=[\"petal_width\"],\n", " )\n", " return fig\n", "\n", "color_map = {\n", " \"harmful\": \"crimson\",\n", " \"neutral\": \"gray\",\n", " \"beneficial\": \"green\",\n", "}\n", "\n", "def html_src(harm_level):\n", " return f\"\"\"\n", "
\n", "
\n", " {harm_level}\n", "
\n", "
\n", "\"\"\"\n", "\n", "def print_like_dislike(x: gr.LikeData):\n", " print(x.index, x.value, x.liked)\n", "\n", "def random_bokeh_plot():\n", " from bokeh.models import ColumnDataSource, Whisker\n", " from bokeh.plotting import figure\n", " from bokeh.sampledata.autompg2 import autompg2 as df\n", " from bokeh.transform import factor_cmap, jitter\n", "\n", " classes = sorted(df[\"class\"].unique())\n", "\n", " p = figure(\n", " height=400,\n", " x_range=classes,\n", " background_fill_color=\"#efefef\",\n", " title=\"Car class vs HWY mpg with quintile ranges\",\n", " )\n", " p.xgrid.grid_line_color = None\n", "\n", " g = df.groupby(\"class\")\n", " upper = g.hwy.quantile(0.80)\n", " lower = g.hwy.quantile(0.20)\n", " source = ColumnDataSource(data=dict(base=classes, upper=upper, lower=lower))\n", "\n", " error = Whisker(\n", " base=\"base\",\n", " upper=\"upper\",\n", " lower=\"lower\",\n", " source=source,\n", " level=\"annotation\",\n", " line_width=2,\n", " )\n", " error.upper_head.size = 20\n", " error.lower_head.size = 20\n", " p.add_layout(error)\n", "\n", " p.circle(\n", " jitter(\"class\", 0.3, range=p.x_range),\n", " \"hwy\",\n", " source=df,\n", " alpha=0.5,\n", " size=13,\n", " line_color=\"white\",\n", " color=factor_cmap(\"class\", \"Light6\", classes),\n", " )\n", " return p\n", "\n", "def random_matplotlib_plot():\n", " import numpy as np\n", " import pandas as pd\n", " import matplotlib.pyplot as plt\n", "\n", " countries = [\"USA\", \"Canada\", \"Mexico\", \"UK\"]\n", " months = [\"January\", \"February\", \"March\", \"April\", \"May\"]\n", " m = months.index(\"January\")\n", " r = 3.2\n", " start_day = 30 * m\n", " final_day = 30 * (m + 1)\n", " x = np.arange(start_day, final_day + 1)\n", " pop_count = {\"USA\": 350, \"Canada\": 40, \"Mexico\": 300, \"UK\": 120}\n", " df = pd.DataFrame({\"day\": x})\n", " for country in countries:\n", " df[country] = x ** (r) * (pop_count[country] + 1)\n", "\n", " fig = plt.figure()\n", " plt.plot(df[\"day\"], df[countries].to_numpy())\n", " plt.title(\"Outbreak in \" + \"January\")\n", " plt.ylabel(\"Cases\")\n", " plt.xlabel(\"Days since Day 0\")\n", " plt.legend(countries)\n", " return fig\n", "\n", "def add_message(history, message):\n", " for x in message[\"files\"]:\n", " history.append({\"role\": \"user\", \"content\": {\"path\": x}})\n", " if message[\"text\"] is not None:\n", " history.append({\"role\": \"user\", \"content\": message[\"text\"]})\n", " return history, gr.MultimodalTextbox(value=None, interactive=False)\n", "\n", "def bot(history, response_type):\n", " msg = {\"role\": \"assistant\", \"content\": \"\"}\n", " if response_type == \"plot\":\n", " content = gr.Plot(random_plot())\n", " elif response_type == \"bokeh_plot\":\n", " content = gr.Plot(random_bokeh_plot())\n", " elif response_type == \"matplotlib_plot\":\n", " content = gr.Plot(random_matplotlib_plot())\n", " elif response_type == \"gallery\":\n", " content = gr.Gallery(\n", " [os.path.join(\"files\", \"avatar.png\"), os.path.join(\"files\", \"avatar.png\")]\n", " )\n", " elif response_type == \"image\":\n", " content = gr.Image(os.path.join(\"files\", \"avatar.png\"))\n", " elif response_type == \"video\":\n", " content = gr.Video(os.path.join(\"files\", \"world.mp4\"))\n", " elif response_type == \"audio\":\n", " content = gr.Audio(os.path.join(\"files\", \"audio.wav\"))\n", " elif response_type == \"audio_file\":\n", " content = {\"path\": os.path.join(\"files\", \"audio.wav\"), \"alt_text\": \"description\"}\n", " elif response_type == \"image_file\":\n", " content = {\"path\": os.path.join(\"files\", \"avatar.png\"), \"alt_text\": \"description\"}\n", " elif response_type == \"video_file\":\n", " content = {\"path\": os.path.join(\"files\", \"world.mp4\"), \"alt_text\": \"description\"}\n", " elif response_type == \"txt_file\":\n", " content = {\"path\": os.path.join(\"files\", \"sample.txt\"), \"alt_text\": \"description\"}\n", " elif response_type == \"html\":\n", " content = gr.HTML(\n", " html_src(random.choice([\"harmful\", \"neutral\", \"beneficial\"]))\n", " )\n", " else:\n", " content = txt\n", " msg[\"content\"] = content # type: ignore\n", " history.append(msg)\n", " return history\n", "\n", "fig = random_plot()\n", "\n", "with gr.Blocks(fill_height=True) as demo:\n", " chatbot = gr.Chatbot(\n", " elem_id=\"chatbot\",\n", " type=\"messages\",\n", " bubble_full_width=False,\n", " scale=1,\n", " show_copy_button=True,\n", " avatar_images=(\n", " None, # os.path.join(\"files\", \"avatar.png\"),\n", " os.path.join(\"files\", \"avatar.png\"),\n", " ),\n", " )\n", " response_type = gr.Radio(\n", " [\n", " \"audio_file\",\n", " \"image_file\",\n", " \"video_file\",\n", " \"txt_file\",\n", " \"plot\",\n", " \"matplotlib_plot\",\n", " \"bokeh_plot\",\n", " \"image\",\n", " \"text\",\n", " \"gallery\",\n", " \"video\",\n", " \"audio\",\n", " \"html\",\n", " ],\n", " value=\"text\",\n", " label=\"Response Type\",\n", " )\n", "\n", " chat_input = gr.MultimodalTextbox(\n", " interactive=True,\n", " placeholder=\"Enter message or upload file...\",\n", " show_label=False,\n", " )\n", "\n", " chat_msg = chat_input.submit(\n", " add_message, [chatbot, chat_input], [chatbot, chat_input]\n", " )\n", " bot_msg = chat_msg.then(\n", " bot, [chatbot, response_type], chatbot, api_name=\"bot_response\"\n", " )\n", " bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])\n", "\n", " chatbot.like(print_like_dislike, None, None)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/chatbot_core_components/run.py b/demo/chatbot_core_components/run.py index ec72f404382a0..46cee130abd54 100644 --- a/demo/chatbot_core_components/run.py +++ b/demo/chatbot_core_components/run.py @@ -148,42 +148,45 @@ def random_matplotlib_plot(): def add_message(history, message): for x in message["files"]: - history.append(((x,), None)) + history.append({"role": "user", "content": {"path": x}}) if message["text"] is not None: - history.append((message["text"], None)) + history.append({"role": "user", "content": message["text"]}) return history, gr.MultimodalTextbox(value=None, interactive=False) def bot(history, response_type): + msg = {"role": "assistant", "content": ""} if response_type == "plot": - history[-1][1] = gr.Plot(random_plot()) + content = gr.Plot(random_plot()) elif response_type == "bokeh_plot": - history[-1][1] = gr.Plot(random_bokeh_plot()) + content = gr.Plot(random_bokeh_plot()) elif response_type == "matplotlib_plot": - history[-1][1] = gr.Plot(random_matplotlib_plot()) + content = gr.Plot(random_matplotlib_plot()) elif response_type == "gallery": - history[-1][1] = gr.Gallery( + content = gr.Gallery( [os.path.join("files", "avatar.png"), os.path.join("files", "avatar.png")] ) elif response_type == "image": - history[-1][1] = gr.Image(os.path.join("files", "avatar.png")) + content = gr.Image(os.path.join("files", "avatar.png")) elif response_type == "video": - history[-1][1] = gr.Video(os.path.join("files", "world.mp4")) + content = gr.Video(os.path.join("files", "world.mp4")) elif response_type == "audio": - history[-1][1] = gr.Audio(os.path.join("files", "audio.wav")) + content = gr.Audio(os.path.join("files", "audio.wav")) elif response_type == "audio_file": - history[-1][1] = (os.path.join("files", "audio.wav"), "description") + content = {"path": os.path.join("files", "audio.wav"), "alt_text": "description"} elif response_type == "image_file": - history[-1][1] = (os.path.join("files", "avatar.png"), "description") + content = {"path": os.path.join("files", "avatar.png"), "alt_text": "description"} elif response_type == "video_file": - history[-1][1] = (os.path.join("files", "world.mp4"), "description") + content = {"path": os.path.join("files", "world.mp4"), "alt_text": "description"} elif response_type == "txt_file": - history[-1][1] = (os.path.join("files", "sample.txt"), "description") + content = {"path": os.path.join("files", "sample.txt"), "alt_text": "description"} elif response_type == "html": - history[-1][1] = gr.HTML( + content = gr.HTML( html_src(random.choice(["harmful", "neutral", "beneficial"])) ) else: - history[-1][1] = txt + content = txt + msg["content"] = content # type: ignore + history.append(msg) return history fig = random_plot() @@ -191,6 +194,7 @@ def bot(history, response_type): with gr.Blocks(fill_height=True) as demo: chatbot = gr.Chatbot( elem_id="chatbot", + type="messages", bubble_full_width=False, scale=1, show_copy_button=True, @@ -198,7 +202,6 @@ def bot(history, response_type): None, # os.path.join("files", "avatar.png"), os.path.join("files", "avatar.png"), ), - # layout="panel", ) response_type = gr.Radio( [ diff --git a/demo/chatbot_multimodal/files/avatar.png b/demo/chatbot_examples/files/avatar.png similarity index 100% rename from demo/chatbot_multimodal/files/avatar.png rename to demo/chatbot_examples/files/avatar.png diff --git a/demo/chatbot_examples/files/cantina.wav b/demo/chatbot_examples/files/cantina.wav new file mode 100644 index 0000000000000..41f0204384682 Binary files /dev/null and b/demo/chatbot_examples/files/cantina.wav differ diff --git a/demo/chatbot_examples/run.ipynb b/demo/chatbot_examples/run.ipynb new file mode 100644 index 0000000000000..e1396d382da40 --- /dev/null +++ b/demo/chatbot_examples/run.ipynb @@ -0,0 +1 @@ +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatbot_examples"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/avatar.png https://github.com/gradio-app/gradio/raw/main/demo/chatbot_examples/files/avatar.png\n", "!wget -q -O files/cantina.wav https://github.com/gradio-app/gradio/raw/main/demo/chatbot_examples/files/cantina.wav"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "# Multimodal Chatbot demo that shows support for examples (example messages shown within the chatbot).\n", "\n", "def print_like_dislike(x: gr.LikeData):\n", " print(x.index, x.value, x.liked)\n", "\n", "def add_message(history, message):\n", " for x in message[\"files\"]:\n", " history.append(((x,), None))\n", " if message[\"text\"] is not None:\n", " history.append((message[\"text\"], None))\n", " return history, gr.MultimodalTextbox(value=None, interactive=False)\n", "\n", "def append_example_message(x: gr.SelectData, history):\n", " if x.value[\"text\"] is not None:\n", " history.append((x.value[\"text\"], None))\n", " if \"files\" in x.value:\n", " if isinstance(x.value[\"files\"], list):\n", " for file in x.value[\"files\"]:\n", " history.append((file, None))\n", " else:\n", " history.append((x.value[\"files\"], None))\n", " return history\n", "\n", "def respond(history):\n", " history[-1][1] = \"Cool!\"\n", " return history\n", "\n", "with gr.Blocks(fill_height=True) as demo:\n", " chatbot = gr.Chatbot(\n", " elem_id=\"chatbot\",\n", " bubble_full_width=False,\n", " scale=1,\n", " placeholder='

Welcome to Gradio!

',\n", " examples=[{\"icon\": os.path.join(os.path.abspath(''), \"files/avatar.png\"), \"display_text\": \"Display Text Here!\", \"text\": \"Try this example with this audio.\", \"files\": [os.path.join(os.path.abspath(''), \"files/cantina.wav\")]},\n", " {\"text\": \"Try this example with this image.\", \"files\": [os.path.join(os.path.abspath(''), \"files/avatar.png\")]},\n", " {\"text\": \"This is just text, no files!\"},\n", " {\"text\": \"Try this example with this image.\", \"files\": [os.path.join(os.path.abspath(''), \"files/avatar.png\"), os.path.join(os.path.abspath(''), \"files/avatar.png\")]},\n", " {\"text\": \"Try this example with this Audio.\", \"files\": [os.path.join(os.path.abspath(''), \"files/cantina.wav\")]}]\n", " )\n", "\n", " chat_input = gr.MultimodalTextbox(interactive=True,\n", " file_count=\"multiple\",\n", " placeholder=\"Enter message or upload file...\", show_label=False)\n", "\n", " chat_msg = chat_input.submit(add_message, [chatbot, chat_input], [chatbot, chat_input])\n", " bot_msg = chat_msg.then(respond, chatbot, chatbot, api_name=\"bot_response\")\n", " bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])\n", "\n", " chatbot.like(print_like_dislike, None, None)\n", " chatbot.example_select(append_example_message, [chatbot], [chatbot]).then(respond, chatbot, chatbot, api_name=\"respond\")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/chatbot_examples/run.py b/demo/chatbot_examples/run.py new file mode 100644 index 0000000000000..a2c2c3f36e90c --- /dev/null +++ b/demo/chatbot_examples/run.py @@ -0,0 +1,55 @@ +import gradio as gr +import os +# Multimodal Chatbot demo that shows support for examples (example messages shown within the chatbot). + +def print_like_dislike(x: gr.LikeData): + print(x.index, x.value, x.liked) + +def add_message(history, message): + for x in message["files"]: + history.append(((x,), None)) + if message["text"] is not None: + history.append((message["text"], None)) + return history, gr.MultimodalTextbox(value=None, interactive=False) + +def append_example_message(x: gr.SelectData, history): + if x.value["text"] is not None: + history.append((x.value["text"], None)) + if "files" in x.value: + if isinstance(x.value["files"], list): + for file in x.value["files"]: + history.append((file, None)) + else: + history.append((x.value["files"], None)) + return history + +def respond(history): + history[-1][1] = "Cool!" + return history + +with gr.Blocks(fill_height=True) as demo: + chatbot = gr.Chatbot( + elem_id="chatbot", + bubble_full_width=False, + scale=1, + placeholder='

Welcome to Gradio!

', + examples=[{"icon": os.path.join(os.path.dirname(__file__), "files/avatar.png"), "display_text": "Display Text Here!", "text": "Try this example with this audio.", "files": [os.path.join(os.path.dirname(__file__), "files/cantina.wav")]}, + {"text": "Try this example with this image.", "files": [os.path.join(os.path.dirname(__file__), "files/avatar.png")]}, + {"text": "This is just text, no files!"}, + {"text": "Try this example with this image.", "files": [os.path.join(os.path.dirname(__file__), "files/avatar.png"), os.path.join(os.path.dirname(__file__), "files/avatar.png")]}, + {"text": "Try this example with this Audio.", "files": [os.path.join(os.path.dirname(__file__), "files/cantina.wav")]}] + ) + + chat_input = gr.MultimodalTextbox(interactive=True, + file_count="multiple", + placeholder="Enter message or upload file...", show_label=False) + + chat_msg = chat_input.submit(add_message, [chatbot, chat_input], [chatbot, chat_input]) + bot_msg = chat_msg.then(respond, chatbot, chatbot, api_name="bot_response") + bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input]) + + chatbot.like(print_like_dislike, None, None) + chatbot.example_select(append_example_message, [chatbot], [chatbot]).then(respond, chatbot, chatbot, api_name="respond") + +if __name__ == "__main__": + demo.launch() diff --git a/demo/chatbot_multimodal/requirements.txt b/demo/chatbot_multimodal/requirements.txt deleted file mode 100644 index d42d0ad03bdf8..0000000000000 --- a/demo/chatbot_multimodal/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -plotly \ No newline at end of file diff --git a/demo/chatbot_multimodal/run.ipynb b/demo/chatbot_multimodal/run.ipynb index 53caf6d4463b5..0299e0799d9e9 100644 --- a/demo/chatbot_multimodal/run.ipynb +++ b/demo/chatbot_multimodal/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatbot_multimodal"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio plotly"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/avatar.png https://github.com/gradio-app/gradio/raw/main/demo/chatbot_multimodal/files/avatar.png\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/chatbot_multimodal/messages_testcase.py"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import plotly.express as px\n", "\n", "# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.\n", "\n", "def random_plot():\n", " df = px.data.iris()\n", " fig = px.scatter(df, x=\"sepal_width\", y=\"sepal_length\", color=\"species\",\n", " size='petal_length', hover_data=['petal_width'])\n", " return fig\n", "\n", "def print_like_dislike(x: gr.LikeData):\n", " print(x.index, x.value, x.liked)\n", "\n", "def add_message(history, message):\n", " for x in message[\"files\"]:\n", " history.append(((x,), None))\n", " if message[\"text\"] is not None:\n", " history.append((message[\"text\"], None))\n", " return history, gr.MultimodalTextbox(value=None, interactive=False)\n", "\n", "def bot(history):\n", " history[-1][1] = \"Cool!\"\n", " return history\n", "\n", "fig = random_plot()\n", "\n", "with gr.Blocks(fill_height=True) as demo:\n", " chatbot = gr.Chatbot(\n", " elem_id=\"chatbot\",\n", " bubble_full_width=False,\n", " scale=1,\n", " )\n", "\n", " chat_input = gr.MultimodalTextbox(interactive=True,\n", " file_count=\"multiple\",\n", " placeholder=\"Enter message or upload file...\", show_label=False)\n", "\n", " chat_msg = chat_input.submit(add_message, [chatbot, chat_input], [chatbot, chat_input])\n", " bot_msg = chat_msg.then(bot, chatbot, chatbot, api_name=\"bot_response\")\n", " bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])\n", "\n", " chatbot.like(print_like_dislike, None, None)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatbot_multimodal"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/chatbot_multimodal/tuples_testcase.py"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import time\n", "\n", "# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.\n", "\n", "\n", "def print_like_dislike(x: gr.LikeData):\n", " print(x.index, x.value, x.liked)\n", "\n", "\n", "def add_message(history, message):\n", " for x in message[\"files\"]:\n", " history.append({\"role\": \"user\", \"content\": {\"path\": x}})\n", " if message[\"text\"] is not None:\n", " history.append({\"role\": \"user\", \"content\": message[\"text\"]})\n", " return history, gr.MultimodalTextbox(value=None, interactive=False)\n", "\n", "\n", "def bot(history: list):\n", " response = \"**That's cool!**\"\n", " history.append({\"role\": \"assistant\", \"content\": \"\"})\n", " for character in response:\n", " history[-1][\"content\"] += character\n", " time.sleep(0.05)\n", " yield history\n", "\n", "\n", "with gr.Blocks() as demo:\n", " chatbot = gr.Chatbot(elem_id=\"chatbot\", bubble_full_width=False, type=\"messages\")\n", "\n", " chat_input = gr.MultimodalTextbox(\n", " interactive=True,\n", " file_count=\"multiple\",\n", " placeholder=\"Enter message or upload file...\",\n", " show_label=False,\n", " )\n", "\n", " chat_msg = chat_input.submit(\n", " add_message, [chatbot, chat_input], [chatbot, chat_input]\n", " )\n", " bot_msg = chat_msg.then(bot, chatbot, chatbot, api_name=\"bot_response\")\n", " bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])\n", "\n", " chatbot.like(print_like_dislike, None, None, like_user_message=True)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/chatbot_multimodal/run.py b/demo/chatbot_multimodal/run.py index c4919c5e9223b..ac0cb18805246 100644 --- a/demo/chatbot_multimodal/run.py +++ b/demo/chatbot_multimodal/run.py @@ -1,46 +1,47 @@ import gradio as gr -import plotly.express as px +import time # Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text. -def random_plot(): - df = px.data.iris() - fig = px.scatter(df, x="sepal_width", y="sepal_length", color="species", - size='petal_length', hover_data=['petal_width']) - return fig def print_like_dislike(x: gr.LikeData): print(x.index, x.value, x.liked) + def add_message(history, message): for x in message["files"]: - history.append(((x,), None)) + history.append({"role": "user", "content": {"path": x}}) if message["text"] is not None: - history.append((message["text"], None)) + history.append({"role": "user", "content": message["text"]}) return history, gr.MultimodalTextbox(value=None, interactive=False) -def bot(history): - history[-1][1] = "Cool!" - return history -fig = random_plot() +def bot(history: list): + response = "**That's cool!**" + history.append({"role": "assistant", "content": ""}) + for character in response: + history[-1]["content"] += character + time.sleep(0.05) + yield history -with gr.Blocks(fill_height=True) as demo: - chatbot = gr.Chatbot( - elem_id="chatbot", - bubble_full_width=False, - scale=1, - ) - chat_input = gr.MultimodalTextbox(interactive=True, - file_count="multiple", - placeholder="Enter message or upload file...", show_label=False) +with gr.Blocks() as demo: + chatbot = gr.Chatbot(elem_id="chatbot", bubble_full_width=False, type="messages") - chat_msg = chat_input.submit(add_message, [chatbot, chat_input], [chatbot, chat_input]) + chat_input = gr.MultimodalTextbox( + interactive=True, + file_count="multiple", + placeholder="Enter message or upload file...", + show_label=False, + ) + + chat_msg = chat_input.submit( + add_message, [chatbot, chat_input], [chatbot, chat_input] + ) bot_msg = chat_msg.then(bot, chatbot, chatbot, api_name="bot_response") bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input]) - chatbot.like(print_like_dislike, None, None) + chatbot.like(print_like_dislike, None, None, like_user_message=True) if __name__ == "__main__": demo.launch() diff --git a/demo/chatbot_multimodal/messages_testcase.py b/demo/chatbot_multimodal/tuples_testcase.py similarity index 69% rename from demo/chatbot_multimodal/messages_testcase.py rename to demo/chatbot_multimodal/tuples_testcase.py index bd003af9a6788..c4919c5e9223b 100644 --- a/demo/chatbot_multimodal/messages_testcase.py +++ b/demo/chatbot_multimodal/tuples_testcase.py @@ -1,32 +1,35 @@ import gradio as gr -import time +import plotly.express as px # Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text. +def random_plot(): + df = px.data.iris() + fig = px.scatter(df, x="sepal_width", y="sepal_length", color="species", + size='petal_length', hover_data=['petal_width']) + return fig + def print_like_dislike(x: gr.LikeData): print(x.index, x.value, x.liked) def add_message(history, message): for x in message["files"]: - history.append({"role": "user", "content": {"path": x}}) + history.append(((x,), None)) if message["text"] is not None: - history.append({"role": "user", "content": message["text"]}) + history.append((message["text"], None)) return history, gr.MultimodalTextbox(value=None, interactive=False) -def bot(history: list): - response = "**That's cool!**" - history.append({"role": "assistant", "content": ""}) - for character in response: - history[-1]['content'] += character - time.sleep(0.05) - yield history +def bot(history): + history[-1][1] = "Cool!" + return history + +fig = random_plot() -with gr.Blocks() as demo: +with gr.Blocks(fill_height=True) as demo: chatbot = gr.Chatbot( - [], elem_id="chatbot", bubble_full_width=False, - type="messages" + scale=1, ) chat_input = gr.MultimodalTextbox(interactive=True, diff --git a/demo/chatbot_retry_undo_like/requirements.txt b/demo/chatbot_retry_undo_like/requirements.txt new file mode 100644 index 0000000000000..6b964ccca3c1b --- /dev/null +++ b/demo/chatbot_retry_undo_like/requirements.txt @@ -0,0 +1 @@ +huggingface_hub diff --git a/demo/chatbot_retry_undo_like/run.ipynb b/demo/chatbot_retry_undo_like/run.ipynb new file mode 100644 index 0000000000000..6606a81db7b40 --- /dev/null +++ b/demo/chatbot_retry_undo_like/run.ipynb @@ -0,0 +1 @@ +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatbot_retry_undo_like"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio huggingface_hub "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["from huggingface_hub import InferenceClient\n", "import gradio as gr\n", "\n", "client = InferenceClient()\n", "\n", "def respond(\n", " prompt: str,\n", " history,\n", "):\n", " if not history:\n", " history = [{\"role\": \"system\", \"content\": \"You are a friendly chatbot\"}]\n", " history.append({\"role\": \"user\", \"content\": prompt})\n", "\n", " yield history\n", "\n", " response = {\"role\": \"assistant\", \"content\": \"\"}\n", " for message in client.chat_completion( # type: ignore\n", " history,\n", " temperature=0.95,\n", " top_p=0.9,\n", " max_tokens=512,\n", " stream=True,\n", " model=\"HuggingFaceH4/zephyr-7b-beta\"\n", " ):\n", " response[\"content\"] += message.choices[0].delta.content or \"\"\n", " yield history + [response]\n", "\n", "\n", "def handle_undo(history, undo_data: gr.UndoData):\n", " return history[:undo_data.index], history[undo_data.index]['content']\n", "\n", "def handle_retry(history, retry_data: gr.RetryData):\n", " new_history = history[:retry_data.index]\n", " previous_prompt = history[retry_data.index]['content']\n", " yield from respond(previous_prompt, new_history)\n", "\n", "\n", "def handle_like(data: gr.LikeData):\n", " if data.liked:\n", " print(\"You upvoted this response: \", data.value)\n", " else:\n", " print(\"You downvoted this response: \", data.value)\n", "\n", "\n", "with gr.Blocks() as demo:\n", " gr.Markdown(\"# Chat with Hugging Face Zephyr 7b \ud83e\udd17\")\n", " chatbot = gr.Chatbot(\n", " label=\"Agent\",\n", " type=\"messages\",\n", " avatar_images=(\n", " None,\n", " \"https://em-content.zobj.net/source/twitter/376/hugging-face_1f917.png\",\n", " ),\n", " )\n", " prompt = gr.Textbox(max_lines=1, label=\"Chat Message\")\n", " prompt.submit(respond, [prompt, chatbot], [chatbot])\n", " prompt.submit(lambda: \"\", None, [prompt])\n", " chatbot.undo(handle_undo, chatbot, [chatbot, prompt])\n", " chatbot.retry(handle_retry, chatbot, [chatbot])\n", " chatbot.like(handle_like, None, None)\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/chatbot_retry_undo_like/run.py b/demo/chatbot_retry_undo_like/run.py new file mode 100644 index 0000000000000..5defaec383e7c --- /dev/null +++ b/demo/chatbot_retry_undo_like/run.py @@ -0,0 +1,64 @@ +from huggingface_hub import InferenceClient +import gradio as gr + +client = InferenceClient() + +def respond( + prompt: str, + history, +): + if not history: + history = [{"role": "system", "content": "You are a friendly chatbot"}] + history.append({"role": "user", "content": prompt}) + + yield history + + response = {"role": "assistant", "content": ""} + for message in client.chat_completion( # type: ignore + history, + temperature=0.95, + top_p=0.9, + max_tokens=512, + stream=True, + model="HuggingFaceH4/zephyr-7b-beta" + ): + response["content"] += message.choices[0].delta.content or "" + yield history + [response] + + +def handle_undo(history, undo_data: gr.UndoData): + return history[:undo_data.index], history[undo_data.index]['content'] + +def handle_retry(history, retry_data: gr.RetryData): + new_history = history[:retry_data.index] + previous_prompt = history[retry_data.index]['content'] + yield from respond(previous_prompt, new_history) + + +def handle_like(data: gr.LikeData): + if data.liked: + print("You upvoted this response: ", data.value) + else: + print("You downvoted this response: ", data.value) + + +with gr.Blocks() as demo: + gr.Markdown("# Chat with Hugging Face Zephyr 7b 🤗") + chatbot = gr.Chatbot( + label="Agent", + type="messages", + avatar_images=( + None, + "https://em-content.zobj.net/source/twitter/376/hugging-face_1f917.png", + ), + ) + prompt = gr.Textbox(max_lines=1, label="Chat Message") + prompt.submit(respond, [prompt, chatbot], [chatbot]) + prompt.submit(lambda: "", None, [prompt]) + chatbot.undo(handle_undo, chatbot, [chatbot, prompt]) + chatbot.retry(handle_retry, chatbot, [chatbot]) + chatbot.like(handle_like, None, None) + + +if __name__ == "__main__": + demo.launch() diff --git a/demo/chatbot_simple/run.ipynb b/demo/chatbot_simple/run.ipynb index 9085788db1004..04839e78c6347 100644 --- a/demo/chatbot_simple/run.ipynb +++ b/demo/chatbot_simple/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatbot_simple"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import random\n", "import time\n", "\n", "with gr.Blocks() as demo:\n", " chatbot = gr.Chatbot()\n", " msg = gr.Textbox()\n", " clear = gr.ClearButton([msg, chatbot])\n", "\n", " def respond(message, chat_history):\n", " bot_message = random.choice([\"How are you?\", \"I love you\", \"I'm very hungry\"])\n", " chat_history.append((message, bot_message))\n", " time.sleep(2)\n", " return \"\", chat_history\n", "\n", " msg.submit(respond, [msg, chatbot], [msg, chatbot])\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatbot_simple"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import random\n", "import time\n", "\n", "with gr.Blocks() as demo:\n", " chatbot = gr.Chatbot(type=\"messages\")\n", " msg = gr.Textbox()\n", " clear = gr.ClearButton([msg, chatbot])\n", "\n", " def respond(message, chat_history):\n", " bot_message = random.choice([\"How are you?\", \"Today is a great day\", \"I'm very hungry\"])\n", " chat_history.append({\"role\": \"user\", \"content\": message})\n", " chat_history.append({\"role\": \"assistant\", \"content\": bot_message})\n", " time.sleep(2)\n", " return \"\", chat_history\n", "\n", " msg.submit(respond, [msg, chatbot], [msg, chatbot])\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/chatbot_simple/run.py b/demo/chatbot_simple/run.py index 9344665c7332e..7fcdd00f467e8 100644 --- a/demo/chatbot_simple/run.py +++ b/demo/chatbot_simple/run.py @@ -3,13 +3,14 @@ import time with gr.Blocks() as demo: - chatbot = gr.Chatbot() + chatbot = gr.Chatbot(type="messages") msg = gr.Textbox() clear = gr.ClearButton([msg, chatbot]) def respond(message, chat_history): - bot_message = random.choice(["How are you?", "I love you", "I'm very hungry"]) - chat_history.append((message, bot_message)) + bot_message = random.choice(["How are you?", "Today is a great day", "I'm very hungry"]) + chat_history.append({"role": "user", "content": message}) + chat_history.append({"role": "assistant", "content": bot_message}) time.sleep(2) return "", chat_history diff --git a/demo/chatbot_streaming/run.ipynb b/demo/chatbot_streaming/run.ipynb index 0ac39e6b04d7a..3f45e9bb42260 100644 --- a/demo/chatbot_streaming/run.ipynb +++ b/demo/chatbot_streaming/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatbot_streaming"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/chatbot_streaming/testcase_messages.py"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import random\n", "import time\n", "\n", "with gr.Blocks() as demo:\n", " chatbot = gr.Chatbot()\n", " msg = gr.Textbox()\n", " clear = gr.Button(\"Clear\")\n", "\n", " def user(user_message, history):\n", " return \"\", history + [[user_message, None]]\n", "\n", " def bot(history):\n", " bot_message = random.choice([\"How are you?\", \"I love you\", \"I'm very hungry\"])\n", " history[-1][1] = \"\"\n", " for character in bot_message:\n", " history[-1][1] += character\n", " time.sleep(0.05)\n", " yield history\n", "\n", " msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(\n", " bot, chatbot, chatbot\n", " )\n", " clear.click(lambda: None, None, chatbot, queue=False)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatbot_streaming"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import random\n", "import time\n", "\n", "with gr.Blocks() as demo:\n", " chatbot = gr.Chatbot(type=\"messages\")\n", " msg = gr.Textbox()\n", " clear = gr.Button(\"Clear\")\n", "\n", " def user(user_message, history: list):\n", " return \"\", history + [{\"role\": \"user\", \"content\": user_message}]\n", "\n", " def bot(history: list):\n", " bot_message = random.choice([\"How are you?\", \"I love you\", \"I'm very hungry\"])\n", " history.append({\"role\": \"assistant\", \"content\": \"\"})\n", " for character in bot_message:\n", " history[-1]['content'] += character\n", " time.sleep(0.05)\n", " yield history\n", "\n", " msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(\n", " bot, chatbot, chatbot\n", " )\n", " clear.click(lambda: None, None, chatbot, queue=False)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/chatbot_streaming/run.py b/demo/chatbot_streaming/run.py index 37a10c60c26a4..a38cd6cd27cfd 100644 --- a/demo/chatbot_streaming/run.py +++ b/demo/chatbot_streaming/run.py @@ -3,18 +3,18 @@ import time with gr.Blocks() as demo: - chatbot = gr.Chatbot() + chatbot = gr.Chatbot(type="messages") msg = gr.Textbox() clear = gr.Button("Clear") - def user(user_message, history): - return "", history + [[user_message, None]] + def user(user_message, history: list): + return "", history + [{"role": "user", "content": user_message}] - def bot(history): + def bot(history: list): bot_message = random.choice(["How are you?", "I love you", "I'm very hungry"]) - history[-1][1] = "" + history.append({"role": "assistant", "content": ""}) for character in bot_message: - history[-1][1] += character + history[-1]['content'] += character time.sleep(0.05) yield history diff --git a/demo/chatbot_streaming/testcase_messages.py b/demo/chatbot_streaming/testcase_messages.py deleted file mode 100644 index a38cd6cd27cfd..0000000000000 --- a/demo/chatbot_streaming/testcase_messages.py +++ /dev/null @@ -1,27 +0,0 @@ -import gradio as gr -import random -import time - -with gr.Blocks() as demo: - chatbot = gr.Chatbot(type="messages") - msg = gr.Textbox() - clear = gr.Button("Clear") - - def user(user_message, history: list): - return "", history + [{"role": "user", "content": user_message}] - - def bot(history: list): - bot_message = random.choice(["How are you?", "I love you", "I'm very hungry"]) - history.append({"role": "assistant", "content": ""}) - for character in bot_message: - history[-1]['content'] += character - time.sleep(0.05) - yield history - - msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then( - bot, chatbot, chatbot - ) - clear.click(lambda: None, None, chatbot, queue=False) - -if __name__ == "__main__": - demo.launch() diff --git a/demo/chatinterface_multimodal/files/avatar.png b/demo/chatinterface_multimodal/files/avatar.png new file mode 100644 index 0000000000000..8f1df7156f0a6 Binary files /dev/null and b/demo/chatinterface_multimodal/files/avatar.png differ diff --git a/demo/chatinterface_multimodal/files/cantina.wav b/demo/chatinterface_multimodal/files/cantina.wav new file mode 100644 index 0000000000000..41f0204384682 Binary files /dev/null and b/demo/chatinterface_multimodal/files/cantina.wav differ diff --git a/demo/chatinterface_multimodal/run.ipynb b/demo/chatinterface_multimodal/run.ipynb index 469651b5ea9c5..5b8f0148ca98b 100644 --- a/demo/chatinterface_multimodal/run.ipynb +++ b/demo/chatinterface_multimodal/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatinterface_multimodal"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "def echo(message, history):\n", " return message[\"text\"]\n", "\n", "demo = gr.ChatInterface(\n", " fn=echo,\n", " examples=[{\"text\": \"hello\"}, {\"text\": \"hola\"}, {\"text\": \"merhaba\"}],\n", " title=\"Echo Bot\",\n", " multimodal=True,\n", ")\n", "demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatinterface_multimodal"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/avatar.png https://github.com/gradio-app/gradio/raw/main/demo/chatinterface_multimodal/files/avatar.png\n", "!wget -q -O files/cantina.wav https://github.com/gradio-app/gradio/raw/main/demo/chatinterface_multimodal/files/cantina.wav"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "def echo(message, history):\n", " return message[\"text\"]\n", "\n", "demo = gr.ChatInterface(\n", " fn=echo,\n", " type=\"messages\",\n", " examples=[{\"text\": \"hello\"}, {\"text\": \"hola\"}, {\"text\": \"merhaba\"}],\n", " title=\"Echo Bot\",\n", " multimodal=True,\n", ")\n", "demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/chatinterface_multimodal/run.py b/demo/chatinterface_multimodal/run.py index cc4285fd8adf7..65d5925faa6e0 100644 --- a/demo/chatinterface_multimodal/run.py +++ b/demo/chatinterface_multimodal/run.py @@ -5,6 +5,7 @@ def echo(message, history): demo = gr.ChatInterface( fn=echo, + type="messages", examples=[{"text": "hello"}, {"text": "hola"}, {"text": "merhaba"}], title="Echo Bot", multimodal=True, diff --git a/demo/chatinterface_random_response/run.ipynb b/demo/chatinterface_random_response/run.ipynb index dd95ecee051e2..ef4bb5efc029b 100644 --- a/demo/chatinterface_random_response/run.ipynb +++ b/demo/chatinterface_random_response/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatinterface_random_response"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import random\n", "import gradio as gr\n", "\n", "def random_response(message, history):\n", " return random.choice([\"Yes\", \"No\"])\n", "\n", "demo = gr.ChatInterface(random_response)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatinterface_random_response"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import random\n", "import gradio as gr\n", "\n", "def random_response(message, history):\n", " return random.choice([\"Yes\", \"No\"])\n", "\n", "demo = gr.ChatInterface(random_response, type=\"messages\")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/chatinterface_random_response/run.py b/demo/chatinterface_random_response/run.py index 5b060926e1ca2..5640d04b327bb 100644 --- a/demo/chatinterface_random_response/run.py +++ b/demo/chatinterface_random_response/run.py @@ -4,7 +4,7 @@ def random_response(message, history): return random.choice(["Yes", "No"]) -demo = gr.ChatInterface(random_response) +demo = gr.ChatInterface(random_response, type="messages") if __name__ == "__main__": demo.launch() diff --git a/demo/chatinterface_streaming_echo/messages_testcase.py b/demo/chatinterface_streaming_echo/messages_testcase.py deleted file mode 100644 index 6e87b7dcc0759..0000000000000 --- a/demo/chatinterface_streaming_echo/messages_testcase.py +++ /dev/null @@ -1,12 +0,0 @@ -import time -import gradio as gr - -def slow_echo(message, history): - for i in range(len(message)): - time.sleep(0.05) - yield "You typed: " + message[: i + 1] - -demo = gr.ChatInterface(slow_echo, type="messages") - -if __name__ == "__main__": - demo.launch() diff --git a/demo/chatinterface_streaming_echo/run.ipynb b/demo/chatinterface_streaming_echo/run.ipynb index 43d310e0cdb69..d077e6dcccf98 100644 --- a/demo/chatinterface_streaming_echo/run.ipynb +++ b/demo/chatinterface_streaming_echo/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatinterface_streaming_echo"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/chatinterface_streaming_echo/messages_testcase.py"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import time\n", "import gradio as gr\n", "\n", "def slow_echo(message, history):\n", " for i in range(len(message)):\n", " time.sleep(0.05)\n", " yield \"You typed: \" + message[: i + 1]\n", "\n", "demo = gr.ChatInterface(slow_echo)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatinterface_streaming_echo"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import time\n", "import gradio as gr\n", "\n", "def slow_echo(message, history):\n", " for i in range(len(message)):\n", " time.sleep(0.05)\n", " yield \"You typed: \" + message[: i + 1]\n", "\n", "demo = gr.ChatInterface(slow_echo, type=\"messages\")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/chatinterface_streaming_echo/run.py b/demo/chatinterface_streaming_echo/run.py index 7b5d9bcb11778..6e87b7dcc0759 100644 --- a/demo/chatinterface_streaming_echo/run.py +++ b/demo/chatinterface_streaming_echo/run.py @@ -6,7 +6,7 @@ def slow_echo(message, history): time.sleep(0.05) yield "You typed: " + message[: i + 1] -demo = gr.ChatInterface(slow_echo) +demo = gr.ChatInterface(slow_echo, type="messages") if __name__ == "__main__": demo.launch() diff --git a/demo/chatinterface_system_prompt/run.ipynb b/demo/chatinterface_system_prompt/run.ipynb index d6b62ddf986b6..514d69cfa1c9b 100644 --- a/demo/chatinterface_system_prompt/run.ipynb +++ b/demo/chatinterface_system_prompt/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatinterface_system_prompt"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import time\n", "\n", "def echo(message, history, system_prompt, tokens):\n", " response = f\"System prompt: {system_prompt}\\n Message: {message}.\"\n", " for i in range(min(len(response), int(tokens))):\n", " time.sleep(0.05)\n", " yield response[: i + 1]\n", "\n", "demo = gr.ChatInterface(\n", " echo,\n", " additional_inputs=[\n", " gr.Textbox(\"You are helpful AI.\", label=\"System Prompt\"),\n", " gr.Slider(10, 100),\n", " ],\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatinterface_system_prompt"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import time\n", "\n", "def echo(message, history, system_prompt, tokens):\n", " response = f\"System prompt: {system_prompt}\\n Message: {message}.\"\n", " for i in range(min(len(response), int(tokens))):\n", " time.sleep(0.05)\n", " yield response[: i + 1]\n", "\n", "demo = gr.ChatInterface(\n", " echo,\n", " type=\"messages\",\n", " additional_inputs=[\n", " gr.Textbox(\"You are helpful AI.\", label=\"System Prompt\"),\n", " gr.Slider(10, 100),\n", " ],\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/chatinterface_system_prompt/run.py b/demo/chatinterface_system_prompt/run.py index ce824d71c6961..f74320ac6e180 100644 --- a/demo/chatinterface_system_prompt/run.py +++ b/demo/chatinterface_system_prompt/run.py @@ -9,6 +9,7 @@ def echo(message, history, system_prompt, tokens): demo = gr.ChatInterface( echo, + type="messages", additional_inputs=[ gr.Textbox("You are helpful AI.", label="System Prompt"), gr.Slider(10, 100), diff --git a/demo/chicago-bikeshare-dashboard/requirements.txt b/demo/chicago-bikeshare-dashboard/requirements.txt index aa9de6427ec58..5facfef0cc151 100644 --- a/demo/chicago-bikeshare-dashboard/requirements.txt +++ b/demo/chicago-bikeshare-dashboard/requirements.txt @@ -1,3 +1,4 @@ psycopg2 matplotlib SQLAlchemy +pandas diff --git a/demo/chicago-bikeshare-dashboard/run.ipynb b/demo/chicago-bikeshare-dashboard/run.ipynb index fab526fa16def..4b4b7e3bfc55f 100644 --- a/demo/chicago-bikeshare-dashboard/run.ipynb +++ b/demo/chicago-bikeshare-dashboard/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chicago-bikeshare-dashboard"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio psycopg2 matplotlib SQLAlchemy "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import os\n", "import gradio as gr\n", "import pandas as pd\n", "\n", "DB_USER = os.getenv(\"DB_USER\")\n", "DB_PASSWORD = os.getenv(\"DB_PASSWORD\")\n", "DB_HOST = os.getenv(\"DB_HOST\")\n", "PORT = 8080\n", "DB_NAME = \"bikeshare\"\n", "\n", "connection_string = (\n", " f\"postgresql://{DB_USER}:{DB_PASSWORD}@{DB_HOST}?port={PORT}&dbname={DB_NAME}\"\n", ")\n", "\n", "def get_count_ride_type():\n", " df = pd.read_sql(\n", " \"\"\"\n", " SELECT COUNT(ride_id) as n, rideable_type\n", " FROM rides\n", " GROUP BY rideable_type\n", " ORDER BY n DESC\n", " \"\"\",\n", " con=connection_string,\n", " )\n", " return df\n", "\n", "def get_most_popular_stations():\n", "\n", " df = pd.read_sql(\n", " \"\"\"\n", " SELECT COUNT(ride_id) as n, MAX(start_station_name) as station\n", " FROM RIDES\n", " WHERE start_station_name is NOT NULL\n", " GROUP BY start_station_id\n", " ORDER BY n DESC\n", " LIMIT 5\n", " \"\"\",\n", " con=connection_string,\n", " )\n", " return df\n", "\n", "with gr.Blocks() as demo:\n", " gr.Markdown(\n", " \"\"\"\n", " # Chicago Bike Share Dashboard\n", " \n", " This demo pulls Chicago bike share data for March 2022 from a postgresql database hosted on AWS.\n", " This demo uses psycopg2 but any postgresql client library (SQLAlchemy)\n", " is compatible with gradio.\n", " \n", " Connection credentials are handled by environment variables\n", " defined as secrets in the Space.\n", "\n", " If data were added to the database, the plots in this demo would update\n", " whenever the webpage is reloaded.\n", " \n", " This demo serves as a starting point for your database-connected apps!\n", " \"\"\"\n", " )\n", " with gr.Row():\n", " bike_type = gr.BarPlot(\n", " x=\"rideable_type\",\n", " y='n',\n", " title=\"Number of rides per bicycle type\",\n", " y_title=\"Number of Rides\",\n", " x_title=\"Bicycle Type\",\n", " vertical=False,\n", " tooltip=['rideable_type', \"n\"],\n", " height=300,\n", " width=300,\n", " )\n", " station = gr.BarPlot(\n", " x='station',\n", " y='n',\n", " title=\"Most Popular Stations\",\n", " y_title=\"Number of Rides\",\n", " x_title=\"Station Name\",\n", " vertical=False,\n", " tooltip=['station', 'n'],\n", " height=300,\n", " width=300\n", " )\n", "\n", " demo.load(get_count_ride_type, inputs=None, outputs=bike_type)\n", " demo.load(get_most_popular_stations, inputs=None, outputs=station)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chicago-bikeshare-dashboard"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio psycopg2 matplotlib SQLAlchemy pandas "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import os\n", "import gradio as gr\n", "import pandas as pd\n", "\n", "DB_USER = os.getenv(\"DB_USER\")\n", "DB_PASSWORD = os.getenv(\"DB_PASSWORD\")\n", "DB_HOST = os.getenv(\"DB_HOST\")\n", "PORT = 8080\n", "DB_NAME = \"bikeshare\"\n", "\n", "connection_string = (\n", " f\"postgresql://{DB_USER}:{DB_PASSWORD}@{DB_HOST}?port={PORT}&dbname={DB_NAME}\"\n", ")\n", "\n", "def get_count_ride_type():\n", " df = pd.read_sql(\n", " \"\"\"\n", " SELECT COUNT(ride_id) as n, rideable_type\n", " FROM rides\n", " GROUP BY rideable_type\n", " ORDER BY n DESC\n", " \"\"\",\n", " con=connection_string,\n", " )\n", " return df\n", "\n", "def get_most_popular_stations():\n", "\n", " df = pd.read_sql(\n", " \"\"\"\n", " SELECT COUNT(ride_id) as n, MAX(start_station_name) as station\n", " FROM RIDES\n", " WHERE start_station_name is NOT NULL\n", " GROUP BY start_station_id\n", " ORDER BY n DESC\n", " LIMIT 5\n", " \"\"\",\n", " con=connection_string,\n", " )\n", " return df\n", "\n", "with gr.Blocks() as demo:\n", " gr.Markdown(\n", " \"\"\"\n", " # Chicago Bike Share Dashboard\n", " \n", " This demo pulls Chicago bike share data for March 2022 from a postgresql database hosted on AWS.\n", " This demo uses psycopg2 but any postgresql client library (SQLAlchemy)\n", " is compatible with gradio.\n", " \n", " Connection credentials are handled by environment variables\n", " defined as secrets in the Space.\n", "\n", " If data were added to the database, the plots in this demo would update\n", " whenever the webpage is reloaded.\n", " \n", " This demo serves as a starting point for your database-connected apps!\n", " \"\"\"\n", " )\n", " with gr.Row():\n", " bike_type = gr.BarPlot(\n", " x=\"rideable_type\",\n", " y='n',\n", " title=\"Number of rides per bicycle type\",\n", " y_title=\"Number of Rides\",\n", " x_title=\"Bicycle Type\",\n", " vertical=False,\n", " tooltip=['rideable_type', \"n\"],\n", " height=300,\n", " width=300,\n", " )\n", " station = gr.BarPlot(\n", " x='station',\n", " y='n',\n", " title=\"Most Popular Stations\",\n", " y_title=\"Number of Rides\",\n", " x_title=\"Station Name\",\n", " vertical=False,\n", " tooltip=['station', 'n'],\n", " height=300,\n", " width=300\n", " )\n", "\n", " demo.load(get_count_ride_type, inputs=None, outputs=bike_type)\n", " demo.load(get_most_popular_stations, inputs=None, outputs=station)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/clear_components/requirements.txt b/demo/clear_components/requirements.txt new file mode 100644 index 0000000000000..402187013ab1e --- /dev/null +++ b/demo/clear_components/requirements.txt @@ -0,0 +1,3 @@ +numpy +pandas +matplotlib diff --git a/demo/clear_components/run.ipynb b/demo/clear_components/run.ipynb index 1a5e2761288f5..a447c537a1b40 100644 --- a/demo/clear_components/run.ipynb +++ b/demo/clear_components/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: clear_components"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/clear_components/__init__.py"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from datetime import datetime\n", "import os\n", "import random\n", "import string\n", "import pandas as pd\n", "\n", "import numpy as np\n", "import matplotlib.pyplot as plt\n", "\n", "def random_plot():\n", " start_year = 2020\n", " x = np.arange(start_year, start_year + 5)\n", " year_count = x.shape[0]\n", " plt_format = \"-\"\n", " fig = plt.figure()\n", " ax = fig.add_subplot(111)\n", " series = np.arange(0, year_count, dtype=float)\n", " series = series**2\n", " series += np.random.rand(year_count)\n", " ax.plot(x, series, plt_format)\n", " return fig\n", "\n", "images = [\n", " \"https://images.unsplash.com/photo-1507003211169-0a1dd7228f2d?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80\",\n", " \"https://images.unsplash.com/photo-1554151228-14d9def656e4?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=386&q=80\",\n", " \"https://images.unsplash.com/photo-1542909168-82c3e7fdca5c?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxzZWFyY2h8MXx8aHVtYW4lMjBmYWNlfGVufDB8fDB8fA%3D%3D&w=1000&q=80\",\n", "]\n", "file_dir = os.path.join(os.path.abspath(''), \"..\", \"kitchen_sink\", \"files\")\n", "model3d_dir = os.path.join(os.path.abspath(''), \"..\", \"model3D\", \"files\")\n", "highlighted_text_output_1 = [\n", " {\n", " \"entity\": \"I-LOC\",\n", " \"score\": 0.9988978,\n", " \"index\": 2,\n", " \"word\": \"Chicago\",\n", " \"start\": 5,\n", " \"end\": 12,\n", " },\n", " {\n", " \"entity\": \"I-MISC\",\n", " \"score\": 0.9958592,\n", " \"index\": 5,\n", " \"word\": \"Pakistani\",\n", " \"start\": 22,\n", " \"end\": 31,\n", " },\n", "]\n", "highlighted_text_output_2 = [\n", " {\n", " \"entity\": \"I-LOC\",\n", " \"score\": 0.9988978,\n", " \"index\": 2,\n", " \"word\": \"Chicago\",\n", " \"start\": 5,\n", " \"end\": 12,\n", " },\n", " {\n", " \"entity\": \"I-LOC\",\n", " \"score\": 0.9958592,\n", " \"index\": 5,\n", " \"word\": \"Pakistan\",\n", " \"start\": 22,\n", " \"end\": 30,\n", " },\n", "]\n", "\n", "highlighted_text = \"Does Chicago have any Pakistani restaurants\"\n", "\n", "def random_model3d():\n", " model_3d = random.choice(\n", " [os.path.join(model3d_dir, model) for model in os.listdir(model3d_dir) if model != \"source.txt\"]\n", " )\n", " return model_3d\n", "\n", "components = [\n", " gr.Textbox(value=lambda: datetime.now(), label=\"Current Time\"),\n", " gr.Number(value=lambda: random.random(), label=\"Random Percentage\"),\n", " gr.Slider(minimum=0, maximum=100, randomize=True, label=\"Slider with randomize\"),\n", " gr.Slider(\n", " minimum=0,\n", " maximum=1,\n", " value=lambda: random.random(),\n", " label=\"Slider with value func\",\n", " ),\n", " gr.Checkbox(value=lambda: random.random() > 0.5, label=\"Random Checkbox\"),\n", " gr.CheckboxGroup(\n", " choices=[\"a\", \"b\", \"c\", \"d\"],\n", " value=lambda: random.choice([\"a\", \"b\", \"c\", \"d\"]),\n", " label=\"Random CheckboxGroup\",\n", " ),\n", " gr.Radio(\n", " choices=list(string.ascii_lowercase),\n", " value=lambda: random.choice(string.ascii_lowercase),\n", " ),\n", " gr.Dropdown(\n", " choices=[\"a\", \"b\", \"c\", \"d\", \"e\"],\n", " value=lambda: random.choice([\"a\", \"b\", \"c\"]),\n", " ),\n", " gr.Image(\n", " value=lambda: random.choice(images)\n", " ),\n", " gr.Video(value=lambda: os.path.join(file_dir, \"world.mp4\")),\n", " gr.Audio(value=lambda: os.path.join(file_dir, \"cantina.wav\")),\n", " gr.File(\n", " value=lambda: random.choice(\n", " [os.path.join(file_dir, img) for img in os.listdir(file_dir)]\n", " )\n", " ),\n", " gr.Dataframe(\n", " value=lambda: pd.DataFrame({\"random_number_rows\": range(5)}, columns=[\"one\", \"two\", \"three\"]) # type: ignore\n", " ),\n", " gr.ColorPicker(value=lambda: random.choice([\"#000000\", \"#ff0000\", \"#0000FF\"])),\n", " gr.Label(value=lambda: random.choice([\"Pedestrian\", \"Car\", \"Cyclist\"])),\n", " gr.HighlightedText(\n", " value=lambda: random.choice(\n", " [\n", " {\"text\": highlighted_text, \"entities\": highlighted_text_output_1},\n", " {\"text\": highlighted_text, \"entities\": highlighted_text_output_2},\n", " ]\n", " ),\n", " ),\n", " gr.JSON(value=lambda: random.choice([{\"a\": 1}, {\"b\": 2}])),\n", " gr.HTML(\n", " value=lambda: random.choice(\n", " [\n", " '

I am red

',\n", " '

I am blue

',\n", " ]\n", " )\n", " ),\n", " gr.Gallery(\n", " value=lambda: images\n", " ),\n", " gr.Model3D(value=random_model3d),\n", " gr.Plot(value=random_plot),\n", " gr.Markdown(value=lambda: f\"### {random.choice(['Hello', 'Hi', 'Goodbye!'])}\"),\n", "]\n", "\n", "def evaluate_values(*args):\n", " are_false = []\n", " for a in args:\n", " if isinstance(a, (pd.DataFrame, np.ndarray)):\n", " are_false.append(not a.any().any()) # type: ignore\n", " elif isinstance(a, str) and a.startswith(\"#\"):\n", " are_false.append(a == \"#000000\")\n", " else:\n", " are_false.append(not a)\n", " return all(are_false)\n", "\n", "with gr.Blocks() as demo:\n", " for i, component in enumerate(components):\n", " component.label = f\"component_{str(i).zfill(2)}\"\n", " component.render()\n", " clear = gr.ClearButton(value=\"Clear\", components=components)\n", " result = gr.Textbox(label=\"Are all cleared?\")\n", " hide = gr.Button(value=\"Hide\")\n", " reveal = gr.Button(value=\"Reveal\")\n", " clear_button_and_components = components + [clear]\n", " hide.click(\n", " lambda: [c.__class__(visible=False) for c in clear_button_and_components],\n", " inputs=[],\n", " outputs=clear_button_and_components\n", " )\n", " reveal.click(\n", " lambda: [c.__class__(visible=True) for c in clear_button_and_components],\n", " inputs=[],\n", " outputs=clear_button_and_components\n", " )\n", " get_value = gr.Button(value=\"Get Values\")\n", " get_value.click(evaluate_values, components, result)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: clear_components"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy pandas matplotlib "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/clear_components/__init__.py"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from datetime import datetime\n", "import os\n", "import random\n", "import string\n", "import pandas as pd\n", "\n", "import numpy as np\n", "import matplotlib.pyplot as plt\n", "\n", "def random_plot():\n", " start_year = 2020\n", " x = np.arange(start_year, start_year + 5)\n", " year_count = x.shape[0]\n", " plt_format = \"-\"\n", " fig = plt.figure()\n", " ax = fig.add_subplot(111)\n", " series = np.arange(0, year_count, dtype=float)\n", " series = series**2\n", " series += np.random.rand(year_count)\n", " ax.plot(x, series, plt_format)\n", " return fig\n", "\n", "images = [\n", " \"https://images.unsplash.com/photo-1507003211169-0a1dd7228f2d?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80\",\n", " \"https://images.unsplash.com/photo-1554151228-14d9def656e4?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=386&q=80\",\n", " \"https://images.unsplash.com/photo-1542909168-82c3e7fdca5c?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxzZWFyY2h8MXx8aHVtYW4lMjBmYWNlfGVufDB8fDB8fA%3D%3D&w=1000&q=80\",\n", "]\n", "file_dir = os.path.join(os.path.abspath(''), \"..\", \"kitchen_sink\", \"files\")\n", "model3d_dir = os.path.join(os.path.abspath(''), \"..\", \"model3D\", \"files\")\n", "highlighted_text_output_1 = [\n", " {\n", " \"entity\": \"I-LOC\",\n", " \"score\": 0.9988978,\n", " \"index\": 2,\n", " \"word\": \"Chicago\",\n", " \"start\": 5,\n", " \"end\": 12,\n", " },\n", " {\n", " \"entity\": \"I-MISC\",\n", " \"score\": 0.9958592,\n", " \"index\": 5,\n", " \"word\": \"Pakistani\",\n", " \"start\": 22,\n", " \"end\": 31,\n", " },\n", "]\n", "highlighted_text_output_2 = [\n", " {\n", " \"entity\": \"I-LOC\",\n", " \"score\": 0.9988978,\n", " \"index\": 2,\n", " \"word\": \"Chicago\",\n", " \"start\": 5,\n", " \"end\": 12,\n", " },\n", " {\n", " \"entity\": \"I-LOC\",\n", " \"score\": 0.9958592,\n", " \"index\": 5,\n", " \"word\": \"Pakistan\",\n", " \"start\": 22,\n", " \"end\": 30,\n", " },\n", "]\n", "\n", "highlighted_text = \"Does Chicago have any Pakistani restaurants\"\n", "\n", "def random_model3d():\n", " model_3d = random.choice(\n", " [os.path.join(model3d_dir, model) for model in os.listdir(model3d_dir) if model != \"source.txt\"]\n", " )\n", " return model_3d\n", "\n", "components = [\n", " gr.Textbox(value=lambda: datetime.now(), label=\"Current Time\"),\n", " gr.Number(value=lambda: random.random(), label=\"Random Percentage\"),\n", " gr.Slider(minimum=0, maximum=100, randomize=True, label=\"Slider with randomize\"),\n", " gr.Slider(\n", " minimum=0,\n", " maximum=1,\n", " value=lambda: random.random(),\n", " label=\"Slider with value func\",\n", " ),\n", " gr.Checkbox(value=lambda: random.random() > 0.5, label=\"Random Checkbox\"),\n", " gr.CheckboxGroup(\n", " choices=[\"a\", \"b\", \"c\", \"d\"],\n", " value=lambda: random.choice([\"a\", \"b\", \"c\", \"d\"]),\n", " label=\"Random CheckboxGroup\",\n", " ),\n", " gr.Radio(\n", " choices=list(string.ascii_lowercase),\n", " value=lambda: random.choice(string.ascii_lowercase),\n", " ),\n", " gr.Dropdown(\n", " choices=[\"a\", \"b\", \"c\", \"d\", \"e\"],\n", " value=lambda: random.choice([\"a\", \"b\", \"c\"]),\n", " ),\n", " gr.Image(\n", " value=lambda: random.choice(images)\n", " ),\n", " gr.Video(value=lambda: os.path.join(file_dir, \"world.mp4\")),\n", " gr.Audio(value=lambda: os.path.join(file_dir, \"cantina.wav\")),\n", " gr.File(\n", " value=lambda: random.choice(\n", " [os.path.join(file_dir, img) for img in os.listdir(file_dir)]\n", " )\n", " ),\n", " gr.Dataframe(\n", " value=lambda: pd.DataFrame({\"random_number_rows\": range(5)}, columns=[\"one\", \"two\", \"three\"]) # type: ignore\n", " ),\n", " gr.ColorPicker(value=lambda: random.choice([\"#000000\", \"#ff0000\", \"#0000FF\"])),\n", " gr.Label(value=lambda: random.choice([\"Pedestrian\", \"Car\", \"Cyclist\"])),\n", " gr.HighlightedText(\n", " value=lambda: random.choice(\n", " [\n", " {\"text\": highlighted_text, \"entities\": highlighted_text_output_1},\n", " {\"text\": highlighted_text, \"entities\": highlighted_text_output_2},\n", " ]\n", " ),\n", " ),\n", " gr.JSON(value=lambda: random.choice([{\"a\": 1}, {\"b\": 2}])),\n", " gr.HTML(\n", " value=lambda: random.choice(\n", " [\n", " '

I am red

',\n", " '

I am blue

',\n", " ]\n", " )\n", " ),\n", " gr.Gallery(\n", " value=lambda: images\n", " ),\n", " gr.Model3D(value=random_model3d),\n", " gr.Plot(value=random_plot),\n", " gr.Markdown(value=lambda: f\"### {random.choice(['Hello', 'Hi', 'Goodbye!'])}\"),\n", "]\n", "\n", "def evaluate_values(*args):\n", " are_false = []\n", " for a in args:\n", " if isinstance(a, (pd.DataFrame, np.ndarray)):\n", " are_false.append(not a.any().any()) # type: ignore\n", " elif isinstance(a, str) and a.startswith(\"#\"):\n", " are_false.append(a == \"#000000\")\n", " else:\n", " are_false.append(not a)\n", " return all(are_false)\n", "\n", "with gr.Blocks() as demo:\n", " for i, component in enumerate(components):\n", " component.label = f\"component_{str(i).zfill(2)}\"\n", " component.render()\n", " clear = gr.ClearButton(value=\"Clear\", components=components)\n", " result = gr.Textbox(label=\"Are all cleared?\")\n", " hide = gr.Button(value=\"Hide\")\n", " reveal = gr.Button(value=\"Reveal\")\n", " clear_button_and_components = components + [clear]\n", " hide.click(\n", " lambda: [c.__class__(visible=False) for c in clear_button_and_components],\n", " inputs=[],\n", " outputs=clear_button_and_components\n", " )\n", " reveal.click(\n", " lambda: [c.__class__(visible=True) for c in clear_button_and_components],\n", " inputs=[],\n", " outputs=clear_button_and_components\n", " )\n", " get_value = gr.Button(value=\"Get Values\")\n", " get_value.click(evaluate_values, components, result)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/code/run.ipynb b/demo/code/run.ipynb index d3339171907cf..e7daf42b04a66 100644 --- a/demo/code/run.ipynb +++ b/demo/code/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: code"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/code/file.css"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "from time import sleep\n", "\n", "css_file = os.path.join(os.path.abspath(''), \"file.css\")\n", "\n", "def set_lang(language):\n", " print(language)\n", " return gr.Code(language=language)\n", "\n", "def set_lang_from_path():\n", " sleep(1)\n", " return gr.Code((css_file,), language=\"css\")\n", "\n", "def code(language, code):\n", " return gr.Code(code, language=language)\n", "\n", "io = gr.Interface(lambda x: x, \"code\", \"code\")\n", "\n", "with gr.Blocks() as demo:\n", " lang = gr.Dropdown(value=\"python\", choices=gr.Code.languages)\n", " with gr.Row():\n", " code_in = gr.Code(\n", " language=\"python\",\n", " label=\"Input\",\n", " value='def all_odd_elements(sequence):\\n \"\"\"Returns every odd element of the sequence.\"\"\"',\n", " )\n", " code_out = gr.Code(label=\"Output\")\n", " btn = gr.Button(\"Run\")\n", " btn_two = gr.Button(\"Load File\")\n", "\n", " lang.change(set_lang, inputs=lang, outputs=code_in)\n", " btn.click(code, inputs=[lang, code_in], outputs=code_out)\n", " btn_two.click(set_lang_from_path, inputs=None, outputs=code_out)\n", " io.render()\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: code"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/code/file.css"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "from time import sleep\n", "\n", "css_file = os.path.join(os.path.abspath(''), \"file.css\")\n", "\n", "def set_lang(language):\n", " print(language)\n", " return gr.Code(language=language)\n", "\n", "def set_lang_from_path():\n", " sleep(1)\n", " return gr.Code(open(css_file).read(), language=\"css\")\n", "\n", "def code(language, code):\n", " return gr.Code(code, language=language)\n", "\n", "io = gr.Interface(lambda x: x, \"code\", \"code\")\n", "\n", "with gr.Blocks() as demo:\n", " lang = gr.Dropdown(value=\"python\", choices=gr.Code.languages)\n", " with gr.Row():\n", " code_in = gr.Code(\n", " language=\"python\",\n", " label=\"Input\",\n", " value='def all_odd_elements(sequence):\\n \"\"\"Returns every odd element of the sequence.\"\"\"',\n", " )\n", " code_out = gr.Code(label=\"Output\")\n", " btn = gr.Button(\"Run\")\n", " btn_two = gr.Button(\"Load File\")\n", "\n", " lang.change(set_lang, inputs=lang, outputs=code_in)\n", " btn.click(code, inputs=[lang, code_in], outputs=code_out)\n", " btn_two.click(set_lang_from_path, inputs=None, outputs=code_out)\n", " io.render()\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/code/run.py b/demo/code/run.py index 2e0156bb9ec01..03cbad845295d 100644 --- a/demo/code/run.py +++ b/demo/code/run.py @@ -10,7 +10,7 @@ def set_lang(language): def set_lang_from_path(): sleep(1) - return gr.Code((css_file,), language="css") + return gr.Code(open(css_file).read(), language="css") def code(language, code): return gr.Code(code, language=language) diff --git a/demo/dashboard/requirements.txt b/demo/dashboard/requirements.txt index d42d0ad03bdf8..b68957b9f62af 100644 --- a/demo/dashboard/requirements.txt +++ b/demo/dashboard/requirements.txt @@ -1 +1,2 @@ -plotly \ No newline at end of file +plotly +pandas diff --git a/demo/dashboard/run.ipynb b/demo/dashboard/run.ipynb index 0b92a9368b858..eb1cddd198878 100644 --- a/demo/dashboard/run.ipynb +++ b/demo/dashboard/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: dashboard\n", "### This demo shows how you can build an interactive dashboard with gradio. Click on a python library on the left hand side and then on the right hand side click on the metric you'd like to see plot over time. Data is pulled from HuggingFace Hub datasets.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio plotly"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/dashboard/helpers.py"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import pandas as pd\n", "import plotly.express as px\n", "from helpers import retrieve_pip_installs, retrieve_stars, retrieve_issues\n", "\n", "LIBRARIES = [\"accelerate\", \"datasets\", \"diffusers\", \"evaluate\", \"gradio\", \"hub_docs\",\n", " \"huggingface_hub\", \"optimum\", \"pytorch_image_models\", \"tokenizers\", \"transformers\"]\n", "\n", "def create_pip_plot(libraries, pip_choices):\n", " if \"Pip\" not in pip_choices:\n", " return gr.Plot(visible=False)\n", " output = retrieve_pip_installs(libraries, \"Cumulated\" in pip_choices)\n", " df = pd.DataFrame(output).melt(id_vars=\"day\")\n", " plot = px.line(df, x=\"day\", y=\"value\", color=\"variable\",\n", " title=\"Pip installs\")\n", " plot.update_layout(legend=dict(x=0.5, y=0.99), title_x=0.5, legend_title_text=\"\")\n", " return gr.Plot(value=plot, visible=True)\n", "\n", "def create_star_plot(libraries, star_choices):\n", " if \"Stars\" not in star_choices:\n", " return gr.Plot(visible=False)\n", " output = retrieve_stars(libraries, \"Week over Week\" in star_choices)\n", " df = pd.DataFrame(output).melt(id_vars=\"day\")\n", " plot = px.line(df, x=\"day\", y=\"value\", color=\"variable\",\n", " title=\"Number of stargazers\")\n", " plot.update_layout(legend=dict(x=0.5, y=0.99), title_x=0.5, legend_title_text=\"\")\n", " return gr.Plot(value=plot, visible=True)\n", "\n", "def create_issue_plot(libraries, issue_choices):\n", " if \"Issue\" not in issue_choices:\n", " return gr.Plot(visible=False)\n", " output = retrieve_issues(libraries,\n", " exclude_org_members=\"Exclude org members\" in issue_choices,\n", " week_over_week=\"Week over Week\" in issue_choices)\n", " df = pd.DataFrame(output).melt(id_vars=\"day\")\n", " plot = px.line(df, x=\"day\", y=\"value\", color=\"variable\",\n", " title=\"Cumulated number of issues, PRs, and comments\",\n", " )\n", " plot.update_layout(legend=dict(x=0.5, y=0.99), title_x=0.5, legend_title_text=\"\")\n", " return gr.Plot(value=plot, visible=True)\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " with gr.Column():\n", " gr.Markdown(\"## Select libraries to display\")\n", " libraries = gr.CheckboxGroup(choices=LIBRARIES, show_label=False)\n", " with gr.Column():\n", " gr.Markdown(\"## Select graphs to display\")\n", " pip = gr.CheckboxGroup(choices=[\"Pip\", \"Cumulated\"], show_label=False)\n", " stars = gr.CheckboxGroup(choices=[\"Stars\", \"Week over Week\"], show_label=False)\n", " issues = gr.CheckboxGroup(choices=[\"Issue\", \"Exclude org members\", \"week over week\"], show_label=False)\n", " with gr.Row():\n", " fetch = gr.Button(value=\"Fetch\")\n", " with gr.Row():\n", " with gr.Column():\n", " pip_plot = gr.Plot(visible=False)\n", " star_plot = gr.Plot(visible=False)\n", " issue_plot = gr.Plot(visible=False)\n", "\n", " fetch.click(create_pip_plot, inputs=[libraries, pip], outputs=pip_plot)\n", " fetch.click(create_star_plot, inputs=[libraries, stars], outputs=star_plot)\n", " fetch.click(create_issue_plot, inputs=[libraries, issues], outputs=issue_plot)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: dashboard\n", "### This demo shows how you can build an interactive dashboard with gradio. Click on a python library on the left hand side and then on the right hand side click on the metric you'd like to see plot over time. Data is pulled from HuggingFace Hub datasets.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio plotly pandas "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/dashboard/helpers.py"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import pandas as pd\n", "import plotly.express as px\n", "from helpers import retrieve_pip_installs, retrieve_stars, retrieve_issues\n", "\n", "LIBRARIES = [\"accelerate\", \"datasets\", \"diffusers\", \"evaluate\", \"gradio\", \"hub_docs\",\n", " \"huggingface_hub\", \"optimum\", \"pytorch_image_models\", \"tokenizers\", \"transformers\"]\n", "\n", "def create_pip_plot(libraries, pip_choices):\n", " if \"Pip\" not in pip_choices:\n", " return gr.Plot(visible=False)\n", " output = retrieve_pip_installs(libraries, \"Cumulated\" in pip_choices)\n", " df = pd.DataFrame(output).melt(id_vars=\"day\")\n", " plot = px.line(df, x=\"day\", y=\"value\", color=\"variable\",\n", " title=\"Pip installs\")\n", " plot.update_layout(legend=dict(x=0.5, y=0.99), title_x=0.5, legend_title_text=\"\")\n", " return gr.Plot(value=plot, visible=True)\n", "\n", "def create_star_plot(libraries, star_choices):\n", " if \"Stars\" not in star_choices:\n", " return gr.Plot(visible=False)\n", " output = retrieve_stars(libraries, \"Week over Week\" in star_choices)\n", " df = pd.DataFrame(output).melt(id_vars=\"day\")\n", " plot = px.line(df, x=\"day\", y=\"value\", color=\"variable\",\n", " title=\"Number of stargazers\")\n", " plot.update_layout(legend=dict(x=0.5, y=0.99), title_x=0.5, legend_title_text=\"\")\n", " return gr.Plot(value=plot, visible=True)\n", "\n", "def create_issue_plot(libraries, issue_choices):\n", " if \"Issue\" not in issue_choices:\n", " return gr.Plot(visible=False)\n", " output = retrieve_issues(libraries,\n", " exclude_org_members=\"Exclude org members\" in issue_choices,\n", " week_over_week=\"Week over Week\" in issue_choices)\n", " df = pd.DataFrame(output).melt(id_vars=\"day\")\n", " plot = px.line(df, x=\"day\", y=\"value\", color=\"variable\",\n", " title=\"Cumulated number of issues, PRs, and comments\",\n", " )\n", " plot.update_layout(legend=dict(x=0.5, y=0.99), title_x=0.5, legend_title_text=\"\")\n", " return gr.Plot(value=plot, visible=True)\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " with gr.Column():\n", " gr.Markdown(\"## Select libraries to display\")\n", " libraries = gr.CheckboxGroup(choices=LIBRARIES, show_label=False)\n", " with gr.Column():\n", " gr.Markdown(\"## Select graphs to display\")\n", " pip = gr.CheckboxGroup(choices=[\"Pip\", \"Cumulated\"], show_label=False)\n", " stars = gr.CheckboxGroup(choices=[\"Stars\", \"Week over Week\"], show_label=False)\n", " issues = gr.CheckboxGroup(choices=[\"Issue\", \"Exclude org members\", \"week over week\"], show_label=False)\n", " with gr.Row():\n", " fetch = gr.Button(value=\"Fetch\")\n", " with gr.Row():\n", " with gr.Column():\n", " pip_plot = gr.Plot(visible=False)\n", " star_plot = gr.Plot(visible=False)\n", " issue_plot = gr.Plot(visible=False)\n", "\n", " fetch.click(create_pip_plot, inputs=[libraries, pip], outputs=pip_plot)\n", " fetch.click(create_star_plot, inputs=[libraries, stars], outputs=star_plot)\n", " fetch.click(create_issue_plot, inputs=[libraries, issues], outputs=issue_plot)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/dataframe_colorful/requirements.txt b/demo/dataframe_colorful/requirements.txt new file mode 100644 index 0000000000000..fb6c7ed7ec60d --- /dev/null +++ b/demo/dataframe_colorful/requirements.txt @@ -0,0 +1 @@ +pandas diff --git a/demo/dataframe_colorful/run.ipynb b/demo/dataframe_colorful/run.ipynb index e9181aab72819..e4ff9dc885bce 100644 --- a/demo/dataframe_colorful/run.ipynb +++ b/demo/dataframe_colorful/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: dataframe_colorful"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import pandas as pd\n", "import gradio as gr\n", "\n", "df = pd.DataFrame(\n", " {\n", " \"A\": [14, 4, 5, 4, 1],\n", " \"B\": [5, 2, 54, 3, 2],\n", " \"C\": [20, 20, 7, 3, 8],\n", " \"D\": [14, 3, 6, 2, 6],\n", " \"E\": [23, 45, 64, 32, 23],\n", " }\n", ")\n", "\n", "t = df.style.highlight_max(color=\"lightgreen\", axis=0)\n", "\n", "with gr.Blocks() as demo:\n", " gr.Dataframe(t)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: dataframe_colorful"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio pandas "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import pandas as pd\n", "import gradio as gr\n", "\n", "df = pd.DataFrame(\n", " {\n", " \"A\": [14, 4, 5, 4, 1],\n", " \"B\": [5, 2, 54, 3, 2],\n", " \"C\": [20, 20, 7, 3, 8],\n", " \"D\": [14, 3, 6, 2, 6],\n", " \"E\": [23, 45, 64, 32, 23],\n", " }\n", ")\n", "\n", "t = df.style.highlight_max(color=\"lightgreen\", axis=0)\n", "\n", "with gr.Blocks() as demo:\n", " gr.Dataframe(t)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/dataframe_datatype/requirements.txt b/demo/dataframe_datatype/requirements.txt new file mode 100644 index 0000000000000..5da331cf67f41 --- /dev/null +++ b/demo/dataframe_datatype/requirements.txt @@ -0,0 +1,2 @@ +numpy +pandas diff --git a/demo/dataframe_datatype/run.ipynb b/demo/dataframe_datatype/run.ipynb index 926524b56beda..2d3f95f067a3b 100644 --- a/demo/dataframe_datatype/run.ipynb +++ b/demo/dataframe_datatype/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: dataframe_datatype"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import pandas as pd\n", "import numpy as np\n", "\n", "def make_dataframe(n_periods):\n", " rng = np.random.default_rng()\n", " return pd.DataFrame({\"date_1\": pd.date_range(\"2021-01-01\", periods=n_periods),\n", " \"date_2\": pd.date_range(\"2022-02-15\", periods=n_periods).strftime('%B %d, %Y, %r'),\n", " \"number\": rng.random(n_periods).astype(np.float64),\n", " \"number_2\": rng.integers(0, 100, n_periods).astype(np.int32),\n", " \"bool\": [True] * n_periods,\n", " \"markdown\": [\"# Hello\"] * n_periods})\n", "\n", "demo = gr.Interface(make_dataframe,\n", " gr.Number(precision=0),\n", " gr.Dataframe(datatype=[\"date\", \"date\", \"number\", \"number\", \"bool\", \"markdown\"]))\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: dataframe_datatype"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy pandas "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import pandas as pd\n", "import numpy as np\n", "\n", "def make_dataframe(n_periods):\n", " rng = np.random.default_rng()\n", " return pd.DataFrame({\"date_1\": pd.date_range(\"2021-01-01\", periods=n_periods),\n", " \"date_2\": pd.date_range(\"2022-02-15\", periods=n_periods).strftime('%B %d, %Y, %r'),\n", " \"number\": rng.random(n_periods).astype(np.float64),\n", " \"number_2\": rng.integers(0, 100, n_periods).astype(np.int32),\n", " \"bool\": [True] * n_periods,\n", " \"markdown\": [\"# Hello\"] * n_periods})\n", "\n", "demo = gr.Interface(make_dataframe,\n", " gr.Number(precision=0),\n", " gr.Dataframe(datatype=[\"date\", \"date\", \"number\", \"number\", \"bool\", \"markdown\"]))\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/dataset/requirements.txt b/demo/dataset/requirements.txt new file mode 100644 index 0000000000000..24ce15ab7ead3 --- /dev/null +++ b/demo/dataset/requirements.txt @@ -0,0 +1 @@ +numpy diff --git a/demo/dataset/run.ipynb b/demo/dataset/run.ipynb index 44090bff10fbf..862997f8fa319 100644 --- a/demo/dataset/run.ipynb +++ b/demo/dataset/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: dataset"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/Bunny.obj https://github.com/gradio-app/gradio/raw/main/demo/dataset/files/Bunny.obj\n", "!wget -q -O files/cantina.wav https://github.com/gradio-app/gradio/raw/main/demo/dataset/files/cantina.wav\n", "!wget -q -O files/cheetah1.jpg https://github.com/gradio-app/gradio/raw/main/demo/dataset/files/cheetah1.jpg\n", "!wget -q -O files/time.csv https://github.com/gradio-app/gradio/raw/main/demo/dataset/files/time.csv\n", "!wget -q -O files/titanic.csv https://github.com/gradio-app/gradio/raw/main/demo/dataset/files/titanic.csv\n", "!wget -q -O files/world.mp4 https://github.com/gradio-app/gradio/raw/main/demo/dataset/files/world.mp4"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "import numpy as np\n", "\n", "txt = \"the quick brown fox\"\n", "num = 10\n", "\n", "img = os.path.join(os.path.abspath(''), \"files/cheetah1.jpg\")\n", "vid = os.path.join(os.path.abspath(''), \"files/world.mp4\")\n", "audio = os.path.join(os.path.abspath(''), \"files/cantina.wav\")\n", "csv = os.path.join(os.path.abspath(''), \"files/time.csv\")\n", "model = os.path.join(os.path.abspath(''), \"files/Bunny.obj\")\n", "\n", "dataframe = [[1, 2, 3, 4], [4, 5, 6, 7], [8, 9, 1, 2], [3, 4, 5, 6]]\n", "\n", "with gr.Blocks() as demo:\n", " gr.Markdown(\"# Dataset previews\")\n", " a = gr.Audio(visible=False)\n", " gr.Dataset(\n", " components=[a],\n", " label=\"Audio\",\n", " samples=[\n", " [audio],\n", " [audio],\n", " [audio],\n", " [audio],\n", " [audio],\n", " [audio],\n", " ],\n", " )\n", " c = gr.Checkbox(visible=False)\n", " gr.Dataset(\n", " label=\"Checkbox\",\n", " components=[c],\n", " samples=[[True], [True], [False], [True], [False], [False]],\n", " )\n", "\n", " c_2 = gr.CheckboxGroup(visible=False, choices=['a', 'b', 'c'])\n", " gr.Dataset(\n", " label=\"CheckboxGroup\",\n", " components=[c_2],\n", " samples=[\n", " [[\"a\"]],\n", " [[\"a\", \"b\"]],\n", " [[\"a\", \"b\", \"c\"]],\n", " [[\"b\"]],\n", " [[\"c\"]],\n", " [[\"a\", \"c\"]],\n", " ],\n", " )\n", " c_3 = gr.ColorPicker(visible=False)\n", " gr.Dataset(\n", " label=\"ColorPicker\",\n", " components=[c_3],\n", " samples=[\n", " [\"#FFFFFF\"],\n", " [\"#000000\"],\n", " [\"#FFFFFF\"],\n", " [\"#000000\"],\n", " [\"#FFFFFF\"],\n", " [\"#000000\"],\n", " ],\n", " )\n", " d = gr.DataFrame(visible=False)\n", " gr.Dataset(\n", " components=[d],\n", " label=\"Dataframe\",\n", " samples=[\n", " [np.zeros((3, 3)).tolist()],\n", " [np.ones((2, 2)).tolist()],\n", " [np.random.randint(0, 10, (3, 10)).tolist()],\n", " [np.random.randint(0, 10, (10, 3)).tolist()],\n", " [np.random.randint(0, 10, (10, 10)).tolist()],\n", " ],\n", " )\n", " d_2 = gr.Dropdown(visible=False, choices=[\"one\", \"two\", \"three\"])\n", " gr.Dataset(\n", " components=[d_2],\n", " label=\"Dropdown\",\n", " samples=[[\"one\"], [\"two\"], [\"three\"], [\"one\"], [\"two\"], [\"three\"]],\n", " )\n", " f = gr.File(visible=False)\n", " gr.Dataset(\n", " components=[f],\n", " label=\"File\",\n", " samples=[\n", " [csv],\n", " [csv],\n", " [csv],\n", " [csv],\n", " [csv],\n", " [csv],\n", " ],\n", " )\n", " h = gr.HTML(visible=False)\n", " gr.Dataset(\n", " components=[h],\n", " label=\"HTML\",\n", " samples=[\n", " [\"

hi

\"],\n", " [\"

hi

\"],\n", " [\"

hi

\"],\n", " [\"

hi

\"],\n", " [\"

hi

\"],\n", " [\"

hi

\"],\n", " ],\n", " )\n", " i = gr.Image(visible=False)\n", " gr.Dataset(\n", " components=[i],\n", " label=\"Image\",\n", " samples=[[img], [img], [img], [img], [img], [img]],\n", " )\n", " m = gr.Markdown(visible=False)\n", " gr.Dataset(\n", " components=[m],\n", " label=\"Markdown\",\n", " samples=[\n", " [\"# hi\"],\n", " [\"# hi\"],\n", " [\"# hi\"],\n", " [\"# hi\"],\n", " [\"# hi\"],\n", " [\"# hi\"],\n", " ],\n", " )\n", " m_2 = gr.Model3D(visible=False)\n", " gr.Dataset(\n", " components=[m_2],\n", " label=\"Model3D\",\n", " samples=[[model], [model], [model], [model], [model], [model]],\n", " )\n", " n = gr.Number(visible=False)\n", " gr.Dataset(\n", " label=\"Number\",\n", " components=[n],\n", " samples=[[1], [1], [1], [1], [1], [1]],\n", " )\n", " r = gr.Radio(visible=False, choices=[\"one\", \"two\", \"three\"])\n", " gr.Dataset(\n", " components=[r],\n", " label=\"Radio\",\n", " samples=[[\"one\"], [\"two\"], [\"three\"], [\"one\"], [\"two\"], [\"three\"]],\n", " )\n", " s = gr.Slider(visible=False)\n", " gr.Dataset(\n", " label=\"Slider\",\n", " components=[s],\n", " samples=[[1], [1], [1], [1], [1], [1]],\n", " )\n", " t = gr.Textbox(visible=False)\n", " gr.Dataset(\n", " label=\"Textbox\",\n", " components=[t],\n", " samples=[\n", " [\"Some value\"],\n", " [\"Some value\"],\n", " [\"Some value\"],\n", " [\"Some value\"],\n", " [\"Some value\"],\n", " [\"Some value\"],\n", " ],\n", " )\n", " v = gr.Video(visible=False)\n", " gr.Dataset(\n", " components=[v],\n", " label=\"Video\",\n", " samples=[[vid], [vid], [vid], [vid], [vid], [vid]],\n", " )\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: dataset"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/Bunny.obj https://github.com/gradio-app/gradio/raw/main/demo/dataset/files/Bunny.obj\n", "!wget -q -O files/cantina.wav https://github.com/gradio-app/gradio/raw/main/demo/dataset/files/cantina.wav\n", "!wget -q -O files/cheetah1.jpg https://github.com/gradio-app/gradio/raw/main/demo/dataset/files/cheetah1.jpg\n", "!wget -q -O files/time.csv https://github.com/gradio-app/gradio/raw/main/demo/dataset/files/time.csv\n", "!wget -q -O files/titanic.csv https://github.com/gradio-app/gradio/raw/main/demo/dataset/files/titanic.csv\n", "!wget -q -O files/world.mp4 https://github.com/gradio-app/gradio/raw/main/demo/dataset/files/world.mp4"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "import numpy as np\n", "\n", "txt = \"the quick brown fox\"\n", "num = 10\n", "\n", "img = os.path.join(os.path.abspath(''), \"files/cheetah1.jpg\")\n", "vid = os.path.join(os.path.abspath(''), \"files/world.mp4\")\n", "audio = os.path.join(os.path.abspath(''), \"files/cantina.wav\")\n", "csv = os.path.join(os.path.abspath(''), \"files/time.csv\")\n", "model = os.path.join(os.path.abspath(''), \"files/Bunny.obj\")\n", "\n", "dataframe = [[1, 2, 3, 4], [4, 5, 6, 7], [8, 9, 1, 2], [3, 4, 5, 6]]\n", "\n", "with gr.Blocks() as demo:\n", " gr.Markdown(\"# Dataset previews\")\n", " a = gr.Audio(visible=False)\n", " gr.Dataset(\n", " components=[a],\n", " label=\"Audio\",\n", " samples=[\n", " [audio],\n", " [audio],\n", " [audio],\n", " [audio],\n", " [audio],\n", " [audio],\n", " ],\n", " )\n", " c = gr.Checkbox(visible=False)\n", " gr.Dataset(\n", " label=\"Checkbox\",\n", " components=[c],\n", " samples=[[True], [True], [False], [True], [False], [False]],\n", " )\n", "\n", " c_2 = gr.CheckboxGroup(visible=False, choices=['a', 'b', 'c'])\n", " gr.Dataset(\n", " label=\"CheckboxGroup\",\n", " components=[c_2],\n", " samples=[\n", " [[\"a\"]],\n", " [[\"a\", \"b\"]],\n", " [[\"a\", \"b\", \"c\"]],\n", " [[\"b\"]],\n", " [[\"c\"]],\n", " [[\"a\", \"c\"]],\n", " ],\n", " )\n", " c_3 = gr.ColorPicker(visible=False)\n", " gr.Dataset(\n", " label=\"ColorPicker\",\n", " components=[c_3],\n", " samples=[\n", " [\"#FFFFFF\"],\n", " [\"#000000\"],\n", " [\"#FFFFFF\"],\n", " [\"#000000\"],\n", " [\"#FFFFFF\"],\n", " [\"#000000\"],\n", " ],\n", " )\n", " d = gr.DataFrame(visible=False)\n", " gr.Dataset(\n", " components=[d],\n", " label=\"Dataframe\",\n", " samples=[\n", " [np.zeros((3, 3)).tolist()],\n", " [np.ones((2, 2)).tolist()],\n", " [np.random.randint(0, 10, (3, 10)).tolist()],\n", " [np.random.randint(0, 10, (10, 3)).tolist()],\n", " [np.random.randint(0, 10, (10, 10)).tolist()],\n", " ],\n", " )\n", " d_2 = gr.Dropdown(visible=False, choices=[\"one\", \"two\", \"three\"])\n", " gr.Dataset(\n", " components=[d_2],\n", " label=\"Dropdown\",\n", " samples=[[\"one\"], [\"two\"], [\"three\"], [\"one\"], [\"two\"], [\"three\"]],\n", " )\n", " f = gr.File(visible=False)\n", " gr.Dataset(\n", " components=[f],\n", " label=\"File\",\n", " samples=[\n", " [csv],\n", " [csv],\n", " [csv],\n", " [csv],\n", " [csv],\n", " [csv],\n", " ],\n", " )\n", " h = gr.HTML(visible=False)\n", " gr.Dataset(\n", " components=[h],\n", " label=\"HTML\",\n", " samples=[\n", " [\"

hi

\"],\n", " [\"

hi

\"],\n", " [\"

hi

\"],\n", " [\"

hi

\"],\n", " [\"

hi

\"],\n", " [\"

hi

\"],\n", " ],\n", " )\n", " i = gr.Image(visible=False)\n", " gr.Dataset(\n", " components=[i],\n", " label=\"Image\",\n", " samples=[[img], [img], [img], [img], [img], [img]],\n", " )\n", " m = gr.Markdown(visible=False)\n", " gr.Dataset(\n", " components=[m],\n", " label=\"Markdown\",\n", " samples=[\n", " [\"# hi\"],\n", " [\"# hi\"],\n", " [\"# hi\"],\n", " [\"# hi\"],\n", " [\"# hi\"],\n", " [\"# hi\"],\n", " ],\n", " )\n", " m_2 = gr.Model3D(visible=False)\n", " gr.Dataset(\n", " components=[m_2],\n", " label=\"Model3D\",\n", " samples=[[model], [model], [model], [model], [model], [model]],\n", " )\n", " n = gr.Number(visible=False)\n", " gr.Dataset(\n", " label=\"Number\",\n", " components=[n],\n", " samples=[[1], [1], [1], [1], [1], [1]],\n", " )\n", " r = gr.Radio(visible=False, choices=[\"one\", \"two\", \"three\"])\n", " gr.Dataset(\n", " components=[r],\n", " label=\"Radio\",\n", " samples=[[\"one\"], [\"two\"], [\"three\"], [\"one\"], [\"two\"], [\"three\"]],\n", " )\n", " s = gr.Slider(visible=False)\n", " gr.Dataset(\n", " label=\"Slider\",\n", " components=[s],\n", " samples=[[1], [1], [1], [1], [1], [1]],\n", " )\n", " t = gr.Textbox(visible=False)\n", " gr.Dataset(\n", " label=\"Textbox\",\n", " components=[t],\n", " samples=[\n", " [\"Some value\"],\n", " [\"Some value\"],\n", " [\"Some value\"],\n", " [\"Some value\"],\n", " [\"Some value\"],\n", " [\"Some value\"],\n", " ],\n", " )\n", " v = gr.Video(visible=False)\n", " gr.Dataset(\n", " components=[v],\n", " label=\"Video\",\n", " samples=[[vid], [vid], [vid], [vid], [vid], [vid]],\n", " )\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/depth_estimation/run.ipynb b/demo/depth_estimation/run.ipynb index d3023183dcf1d..8b17597f3d703 100644 --- a/demo/depth_estimation/run.ipynb +++ b/demo/depth_estimation/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: depth_estimation\n", "### A demo for predicting the depth of an image and generating a 3D model of it.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch git+https://github.com/nielsrogge/transformers.git@add_dpt_redesign#egg=transformers numpy Pillow jinja2 open3d"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('examples')\n", "!wget -q -O examples/1-jonathan-borba-CgWTqYxHEkg-unsplash.jpg https://github.com/gradio-app/gradio/raw/main/demo/depth_estimation/examples/1-jonathan-borba-CgWTqYxHEkg-unsplash.jpg\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/depth_estimation/packages.txt"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from transformers import DPTFeatureExtractor, DPTForDepthEstimation\n", "import torch\n", "import numpy as np\n", "from PIL import Image\n", "import open3d as o3d\n", "from pathlib import Path\n", "\n", "feature_extractor = DPTFeatureExtractor.from_pretrained(\"Intel/dpt-large\")\n", "model = DPTForDepthEstimation.from_pretrained(\"Intel/dpt-large\")\n", "\n", "def process_image(image_path):\n", " image_path = Path(image_path)\n", " image_raw = Image.open(image_path)\n", " image = image_raw.resize(\n", " (800, int(800 * image_raw.size[1] / image_raw.size[0])),\n", " Image.Resampling.LANCZOS)\n", "\n", " # prepare image for the model\n", " encoding = feature_extractor(image, return_tensors=\"pt\") # type: ignore\n", "\n", " # forward pass\n", " with torch.no_grad():\n", " outputs = model(**encoding) # type: ignore\n", " predicted_depth = outputs.predicted_depth\n", "\n", " # interpolate to original size\n", " prediction = torch.nn.functional.interpolate(\n", " predicted_depth.unsqueeze(1),\n", " size=image.size[::-1],\n", " mode=\"bicubic\",\n", " align_corners=False,\n", " ).squeeze()\n", " output = prediction.cpu().numpy()\n", " depth_image = (output * 255 / np.max(output)).astype('uint8')\n", " try:\n", " gltf_path = create_3d_obj(np.array(image), depth_image, image_path)\n", " img = Image.fromarray(depth_image)\n", " return [img, gltf_path, gltf_path]\n", " except Exception:\n", " gltf_path = create_3d_obj(\n", " np.array(image), depth_image, image_path, depth=8)\n", " img = Image.fromarray(depth_image)\n", " return [img, gltf_path, gltf_path]\n", " except:\n", " print(\"Error reconstructing 3D model\")\n", " raise Exception(\"Error reconstructing 3D model\")\n", "\n", "def create_3d_obj(rgb_image, depth_image, image_path, depth=10):\n", " depth_o3d = o3d.geometry.Image(depth_image)\n", " image_o3d = o3d.geometry.Image(rgb_image)\n", " rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(\n", " image_o3d, depth_o3d, convert_rgb_to_intensity=False)\n", " w = int(depth_image.shape[1])\n", " h = int(depth_image.shape[0])\n", "\n", " camera_intrinsic = o3d.camera.PinholeCameraIntrinsic()\n", " camera_intrinsic.set_intrinsics(w, h, 500, 500, w/2, h/2)\n", "\n", " pcd = o3d.geometry.PointCloud.create_from_rgbd_image(\n", " rgbd_image, camera_intrinsic)\n", "\n", " print('normals')\n", " pcd.normals = o3d.utility.Vector3dVector(\n", " np.zeros((1, 3))) # invalidate existing normals\n", " pcd.estimate_normals(\n", " search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.01, max_nn=30))\n", " pcd.orient_normals_towards_camera_location(\n", " camera_location=np.array([0., 0., 1000.]))\n", " pcd.transform([[1, 0, 0, 0],\n", " [0, -1, 0, 0],\n", " [0, 0, -1, 0],\n", " [0, 0, 0, 1]])\n", " pcd.transform([[-1, 0, 0, 0],\n", " [0, 1, 0, 0],\n", " [0, 0, 1, 0],\n", " [0, 0, 0, 1]])\n", "\n", " print('run Poisson surface reconstruction')\n", " with o3d.utility.VerbosityContextManager(o3d.utility.VerbosityLevel.Debug):\n", " mesh_raw, densities = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(\n", " pcd, depth=depth, width=0, scale=1.1, linear_fit=True)\n", "\n", " voxel_size = max(mesh_raw.get_max_bound() - mesh_raw.get_min_bound()) / 256\n", " print(f'voxel_size = {voxel_size:e}')\n", " mesh = mesh_raw.simplify_vertex_clustering(\n", " voxel_size=voxel_size,\n", " contraction=o3d.geometry.SimplificationContraction.Average)\n", "\n", " # vertices_to_remove = densities < np.quantile(densities, 0.001)\n", " # mesh.remove_vertices_by_mask(vertices_to_remove)\n", " bbox = pcd.get_axis_aligned_bounding_box()\n", " mesh_crop = mesh.crop(bbox)\n", " gltf_path = f'./{image_path.stem}.gltf'\n", " o3d.io.write_triangle_mesh(\n", " gltf_path, mesh_crop, write_triangle_uvs=True)\n", " return gltf_path\n", "\n", "title = \"Demo: zero-shot depth estimation with DPT + 3D Point Cloud\"\n", "description = \"This demo is a variation from the original DPT Demo. It uses the DPT model to predict the depth of an image and then uses 3D Point Cloud to create a 3D object.\"\n", "examples = [[\"examples/1-jonathan-borba-CgWTqYxHEkg-unsplash.jpg\"]]\n", "\n", "iface = gr.Interface(fn=process_image,\n", " inputs=[gr.Image(\n", " type=\"filepath\", label=\"Input Image\")],\n", " outputs=[gr.Image(label=\"predicted depth\", type=\"pil\"),\n", " gr.Model3D(label=\"3d mesh reconstruction\", clear_color=(\n", " 1.0, 1.0, 1.0, 1.0)),\n", " gr.File(label=\"3d gLTF\")],\n", " title=title,\n", " description=description,\n", " examples=examples,\n", " allow_flagging=\"never\",\n", " cache_examples=False)\n", "\n", "iface.launch(debug=True)\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: depth_estimation\n", "### A demo for predicting the depth of an image and generating a 3D model of it.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch git+https://github.com/nielsrogge/transformers.git@add_dpt_redesign#egg=transformers numpy Pillow jinja2 open3d"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('examples')\n", "!wget -q -O examples/1-jonathan-borba-CgWTqYxHEkg-unsplash.jpg https://github.com/gradio-app/gradio/raw/main/demo/depth_estimation/examples/1-jonathan-borba-CgWTqYxHEkg-unsplash.jpg\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/depth_estimation/packages.txt"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from transformers import DPTFeatureExtractor, DPTForDepthEstimation\n", "import torch\n", "import numpy as np\n", "from PIL import Image\n", "import open3d as o3d\n", "from pathlib import Path\n", "\n", "feature_extractor = DPTFeatureExtractor.from_pretrained(\"Intel/dpt-large\")\n", "model = DPTForDepthEstimation.from_pretrained(\"Intel/dpt-large\")\n", "\n", "def process_image(image_path):\n", " image_path = Path(image_path)\n", " image_raw = Image.open(image_path)\n", " image = image_raw.resize(\n", " (800, int(800 * image_raw.size[1] / image_raw.size[0])),\n", " Image.Resampling.LANCZOS)\n", "\n", " # prepare image for the model\n", " encoding = feature_extractor(image, return_tensors=\"pt\") # type: ignore\n", "\n", " # forward pass\n", " with torch.no_grad():\n", " outputs = model(**encoding) # type: ignore\n", " predicted_depth = outputs.predicted_depth\n", "\n", " # interpolate to original size\n", " prediction = torch.nn.functional.interpolate(\n", " predicted_depth.unsqueeze(1),\n", " size=image.size[::-1],\n", " mode=\"bicubic\",\n", " align_corners=False,\n", " ).squeeze()\n", " output = prediction.cpu().numpy()\n", " depth_image = (output * 255 / np.max(output)).astype('uint8')\n", " try:\n", " gltf_path = create_3d_obj(np.array(image), depth_image, image_path)\n", " img = Image.fromarray(depth_image)\n", " return [img, gltf_path, gltf_path]\n", " except Exception:\n", " gltf_path = create_3d_obj(\n", " np.array(image), depth_image, image_path, depth=8)\n", " img = Image.fromarray(depth_image)\n", " return [img, gltf_path, gltf_path]\n", " except:\n", " print(\"Error reconstructing 3D model\")\n", " raise Exception(\"Error reconstructing 3D model\")\n", "\n", "def create_3d_obj(rgb_image, depth_image, image_path, depth=10):\n", " depth_o3d = o3d.geometry.Image(depth_image)\n", " image_o3d = o3d.geometry.Image(rgb_image)\n", " rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(\n", " image_o3d, depth_o3d, convert_rgb_to_intensity=False)\n", " w = int(depth_image.shape[1])\n", " h = int(depth_image.shape[0])\n", "\n", " camera_intrinsic = o3d.camera.PinholeCameraIntrinsic()\n", " camera_intrinsic.set_intrinsics(w, h, 500, 500, w/2, h/2)\n", "\n", " pcd = o3d.geometry.PointCloud.create_from_rgbd_image(\n", " rgbd_image, camera_intrinsic)\n", "\n", " print('normals')\n", " pcd.normals = o3d.utility.Vector3dVector(\n", " np.zeros((1, 3))) # invalidate existing normals\n", " pcd.estimate_normals(\n", " search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.01, max_nn=30))\n", " pcd.orient_normals_towards_camera_location(\n", " camera_location=np.array([0., 0., 1000.]))\n", " pcd.transform([[1, 0, 0, 0],\n", " [0, -1, 0, 0],\n", " [0, 0, -1, 0],\n", " [0, 0, 0, 1]])\n", " pcd.transform([[-1, 0, 0, 0],\n", " [0, 1, 0, 0],\n", " [0, 0, 1, 0],\n", " [0, 0, 0, 1]])\n", "\n", " print('run Poisson surface reconstruction')\n", " with o3d.utility.VerbosityContextManager(o3d.utility.VerbosityLevel.Debug):\n", " mesh_raw, densities = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(\n", " pcd, depth=depth, width=0, scale=1.1, linear_fit=True)\n", "\n", " voxel_size = max(mesh_raw.get_max_bound() - mesh_raw.get_min_bound()) / 256\n", " print(f'voxel_size = {voxel_size:e}')\n", " mesh = mesh_raw.simplify_vertex_clustering(\n", " voxel_size=voxel_size,\n", " contraction=o3d.geometry.SimplificationContraction.Average)\n", "\n", " # vertices_to_remove = densities < np.quantile(densities, 0.001)\n", " # mesh.remove_vertices_by_mask(vertices_to_remove)\n", " bbox = pcd.get_axis_aligned_bounding_box()\n", " mesh_crop = mesh.crop(bbox)\n", " gltf_path = f'./{image_path.stem}.gltf'\n", " o3d.io.write_triangle_mesh(\n", " gltf_path, mesh_crop, write_triangle_uvs=True)\n", " return gltf_path\n", "\n", "title = \"Demo: zero-shot depth estimation with DPT + 3D Point Cloud\"\n", "description = \"This demo is a variation from the original DPT Demo. It uses the DPT model to predict the depth of an image and then uses 3D Point Cloud to create a 3D object.\"\n", "examples = [[\"examples/1-jonathan-borba-CgWTqYxHEkg-unsplash.jpg\"]]\n", "\n", "iface = gr.Interface(fn=process_image,\n", " inputs=[gr.Image(\n", " type=\"filepath\", label=\"Input Image\")],\n", " outputs=[gr.Image(label=\"predicted depth\", type=\"pil\"),\n", " gr.Model3D(label=\"3d mesh reconstruction\", clear_color=(\n", " 1.0, 1.0, 1.0, 1.0)),\n", " gr.File(label=\"3d gLTF\")],\n", " title=title,\n", " description=description,\n", " examples=examples,\n", " flagging_mode=\"never\",\n", " cache_examples=False)\n", "\n", "iface.launch(debug=True)\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/depth_estimation/run.py b/demo/depth_estimation/run.py index e5fcf1ea3716b..50f84544dc521 100644 --- a/demo/depth_estimation/run.py +++ b/demo/depth_estimation/run.py @@ -110,7 +110,7 @@ def create_3d_obj(rgb_image, depth_image, image_path, depth=10): title=title, description=description, examples=examples, - allow_flagging="never", + flagging_mode="never", cache_examples=False) iface.launch(debug=True) diff --git a/demo/fake_diffusion_with_gif/requirements.txt b/demo/fake_diffusion_with_gif/requirements.txt new file mode 100644 index 0000000000000..6dd520b61558d --- /dev/null +++ b/demo/fake_diffusion_with_gif/requirements.txt @@ -0,0 +1,3 @@ +numpy +requests +Pillow diff --git a/demo/fake_diffusion_with_gif/run.ipynb b/demo/fake_diffusion_with_gif/run.ipynb index 91a2905550b70..5f0600aff358d 100644 --- a/demo/fake_diffusion_with_gif/run.ipynb +++ b/demo/fake_diffusion_with_gif/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: fake_diffusion_with_gif"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import numpy as np\n", "import time\n", "import os\n", "from PIL import Image\n", "import requests\n", "from io import BytesIO\n", "\n", "def create_gif(images):\n", " pil_images = []\n", " for image in images:\n", " if isinstance(image, str):\n", " response = requests.get(image)\n", " image = Image.open(BytesIO(response.content))\n", " else:\n", " image = Image.fromarray((image * 255).astype(np.uint8))\n", " pil_images.append(image)\n", " fp_out = os.path.join(os.path.abspath(''), \"image.gif\")\n", " img = pil_images.pop(0)\n", " img.save(fp=fp_out, format='GIF', append_images=pil_images,\n", " save_all=True, duration=400, loop=0)\n", " return fp_out\n", "\n", "def fake_diffusion(steps):\n", " rng = np.random.default_rng()\n", " images = []\n", " for _ in range(steps):\n", " time.sleep(1)\n", " image = rng.random((600, 600, 3))\n", " images.append(image)\n", " yield image, gr.Image(visible=False)\n", "\n", " time.sleep(1)\n", " image = \"https://gradio-builds.s3.amazonaws.com/diffusion_image/cute_dog.jpg\"\n", " images.append(image)\n", " gif_path = create_gif(images)\n", "\n", " yield image, gr.Image(value=gif_path, visible=True)\n", "\n", "demo = gr.Interface(fake_diffusion,\n", " inputs=gr.Slider(1, 10, 3, step=1),\n", " outputs=[\"image\", gr.Image(label=\"All Images\", visible=False)])\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: fake_diffusion_with_gif"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy requests Pillow "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import numpy as np\n", "import time\n", "import os\n", "from PIL import Image\n", "import requests\n", "from io import BytesIO\n", "\n", "def create_gif(images):\n", " pil_images = []\n", " for image in images:\n", " if isinstance(image, str):\n", " response = requests.get(image)\n", " image = Image.open(BytesIO(response.content))\n", " else:\n", " image = Image.fromarray((image * 255).astype(np.uint8))\n", " pil_images.append(image)\n", " fp_out = os.path.join(os.path.abspath(''), \"image.gif\")\n", " img = pil_images.pop(0)\n", " img.save(fp=fp_out, format='GIF', append_images=pil_images,\n", " save_all=True, duration=400, loop=0)\n", " return fp_out\n", "\n", "def fake_diffusion(steps):\n", " rng = np.random.default_rng()\n", " images = []\n", " for _ in range(steps):\n", " time.sleep(1)\n", " image = rng.random((600, 600, 3))\n", " images.append(image)\n", " yield image, gr.Image(visible=False)\n", "\n", " time.sleep(1)\n", " image = \"https://gradio-builds.s3.amazonaws.com/diffusion_image/cute_dog.jpg\"\n", " images.append(image)\n", " gif_path = create_gif(images)\n", "\n", " yield image, gr.Image(value=gif_path, visible=True)\n", "\n", "demo = gr.Interface(fake_diffusion,\n", " inputs=gr.Slider(1, 10, 3, step=1),\n", " outputs=[\"image\", gr.Image(label=\"All Images\", visible=False)])\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/file_component_events/run.ipynb b/demo/file_component_events/run.ipynb index 136e1b1a9d6d3..dc29663db5e2c 100644 --- a/demo/file_component_events/run.ipynb +++ b/demo/file_component_events/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: file_component_events"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "def delete_file(n: int, file: gr.DeletedFileData):\n", " return [file.file.path, n + 1]\n", "\n", "with gr.Blocks() as demo:\n", "\n", " with gr.Row():\n", " with gr.Column():\n", " file_component = gr.File(label=\"Upload Single File\", file_count=\"single\")\n", " with gr.Column():\n", " output_file_1 = gr.File(\n", " label=\"Upload Single File Output\", file_count=\"single\"\n", " )\n", " num_load_btn_1 = gr.Number(label=\"# Load Upload Single File\", value=0)\n", " file_component.upload(\n", " lambda s, n: (s, n + 1),\n", " [file_component, num_load_btn_1],\n", " [output_file_1, num_load_btn_1],\n", " )\n", " with gr.Row():\n", " with gr.Column():\n", " file_component_multiple = gr.File(\n", " label=\"Upload Multiple Files\", file_count=\"multiple\"\n", " )\n", " with gr.Column():\n", " output_file_2 = gr.File(\n", " label=\"Upload Multiple Files Output\", file_count=\"multiple\"\n", " )\n", " num_load_btn_2 = gr.Number(label=\"# Load Upload Multiple Files\", value=0)\n", " file_component_multiple.upload(\n", " lambda s, n: (s, n + 1),\n", " [file_component_multiple, num_load_btn_2],\n", " [output_file_2, num_load_btn_2],\n", " )\n", " with gr.Row():\n", " with gr.Column():\n", " file_component_specific = gr.File(\n", " label=\"Upload Multiple Files Image/Video\",\n", " file_count=\"multiple\",\n", " file_types=[\"image\", \"video\"],\n", " )\n", " with gr.Column():\n", " output_file_3 = gr.File(\n", " label=\"Upload Multiple Files Output Image/Video\", file_count=\"multiple\"\n", " )\n", " num_load_btn_3 = gr.Number(\n", " label=\"# Load Upload Multiple Files Image/Video\", value=0\n", " )\n", " file_component_specific.upload(\n", " lambda s, n: (s, n + 1),\n", " [file_component_specific, num_load_btn_3],\n", " [output_file_3, num_load_btn_3],\n", " )\n", " with gr.Row():\n", " with gr.Column():\n", " file_component_pdf = gr.File(label=\"Upload PDF File\", file_types=[\"pdf\"])\n", " with gr.Column():\n", " output_file_4 = gr.File(label=\"Upload PDF File Output\")\n", " num_load_btn_4 = gr.Number(label=\"# Load Upload PDF File\", value=0)\n", " file_component_pdf.upload(\n", " lambda s, n: (s, n + 1),\n", " [file_component_pdf, num_load_btn_4],\n", " [output_file_4, num_load_btn_4],\n", " )\n", " with gr.Row():\n", " with gr.Column():\n", " file_component_invalid = gr.File(\n", " label=\"Upload File with Invalid file_types\",\n", " file_types=[\"invalid file_type\"],\n", " )\n", " with gr.Column():\n", " output_file_5 = gr.File(label=\"Upload File with Invalid file_types Output\")\n", " num_load_btn_5 = gr.Number(\n", " label=\"# Load Upload File with Invalid file_types\", value=0\n", " )\n", " file_component_invalid.upload(\n", " lambda s, n: (s, n + 1),\n", " [file_component_invalid, num_load_btn_5],\n", " [output_file_5, num_load_btn_5],\n", " )\n", " with gr.Row():\n", " with gr.Column():\n", " del_file_input = gr.File(label=\"Delete File\", file_count=\"multiple\")\n", " with gr.Column():\n", " del_file_data = gr.Textbox(label=\"Delete file data\")\n", " num_load_btn_6 = gr.Number(label=\"# Deleted File\", value=0)\n", " del_file_input.delete(\n", " delete_file,\n", " [num_load_btn_6],\n", " [del_file_data, num_load_btn_6],\n", " )\n", " # f = gr.File(label=\"Upload many File\", file_count=\"multiple\")\n", " # # f.delete(delete_file)\n", " # f.delete(delete_file, inputs=None, outputs=None)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: file_component_events"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "def delete_file(n: int, file: gr.DeletedFileData):\n", " return [file.file.path, n + 1]\n", "\n", "with gr.Blocks() as demo:\n", "\n", " with gr.Row():\n", " with gr.Column():\n", " file_component = gr.File(label=\"Upload Single File\", file_count=\"single\")\n", " with gr.Column():\n", " output_file_1 = gr.File(\n", " label=\"Upload Single File Output\", file_count=\"single\"\n", " )\n", " num_load_btn_1 = gr.Number(label=\"# Load Upload Single File\", value=0)\n", " file_component.upload(\n", " lambda s, n: (s, n + 1),\n", " [file_component, num_load_btn_1],\n", " [output_file_1, num_load_btn_1],\n", " )\n", " with gr.Row():\n", " with gr.Column():\n", " file_component_multiple = gr.File(\n", " label=\"Upload Multiple Files\", file_count=\"multiple\"\n", " )\n", " with gr.Column():\n", " output_file_2 = gr.File(\n", " label=\"Upload Multiple Files Output\", file_count=\"multiple\"\n", " )\n", " num_load_btn_2 = gr.Number(label=\"# Load Upload Multiple Files\", value=0)\n", " file_component_multiple.upload(\n", " lambda s, n: (s, n + 1),\n", " [file_component_multiple, num_load_btn_2],\n", " [output_file_2, num_load_btn_2],\n", " )\n", " with gr.Row():\n", " with gr.Column():\n", " file_component_specific = gr.File(\n", " label=\"Upload Multiple Files Image/Video\",\n", " file_count=\"multiple\",\n", " file_types=[\"image\", \"video\"],\n", " )\n", " with gr.Column():\n", " output_file_3 = gr.File(\n", " label=\"Upload Multiple Files Output Image/Video\", file_count=\"multiple\"\n", " )\n", " num_load_btn_3 = gr.Number(\n", " label=\"# Load Upload Multiple Files Image/Video\", value=0\n", " )\n", " file_component_specific.upload(\n", " lambda s, n: (s, n + 1),\n", " [file_component_specific, num_load_btn_3],\n", " [output_file_3, num_load_btn_3],\n", " )\n", " with gr.Row():\n", " with gr.Column():\n", " file_component_pdf = gr.File(label=\"Upload PDF File\", file_types=[\".pdf\"])\n", " with gr.Column():\n", " output_file_4 = gr.File(label=\"Upload PDF File Output\")\n", " num_load_btn_4 = gr.Number(label=\"# Load Upload PDF File\", value=0)\n", " file_component_pdf.upload(\n", " lambda s, n: (s, n + 1),\n", " [file_component_pdf, num_load_btn_4],\n", " [output_file_4, num_load_btn_4],\n", " )\n", " with gr.Row():\n", " with gr.Column():\n", " file_component_invalid = gr.File(\n", " label=\"Upload File with Invalid file_types\",\n", " file_types=[\"invalid file_type\"],\n", " )\n", " with gr.Column():\n", " output_file_5 = gr.File(label=\"Upload File with Invalid file_types Output\")\n", " num_load_btn_5 = gr.Number(\n", " label=\"# Load Upload File with Invalid file_types\", value=0\n", " )\n", " file_component_invalid.upload(\n", " lambda s, n: (s, n + 1),\n", " [file_component_invalid, num_load_btn_5],\n", " [output_file_5, num_load_btn_5],\n", " )\n", " with gr.Row():\n", " with gr.Column():\n", " del_file_input = gr.File(label=\"Delete File\", file_count=\"multiple\")\n", " with gr.Column():\n", " del_file_data = gr.Textbox(label=\"Delete file data\")\n", " num_load_btn_6 = gr.Number(label=\"# Deleted File\", value=0)\n", " del_file_input.delete(\n", " delete_file,\n", " [num_load_btn_6],\n", " [del_file_data, num_load_btn_6],\n", " )\n", " # f = gr.File(label=\"Upload many File\", file_count=\"multiple\")\n", " # # f.delete(delete_file)\n", " # f.delete(delete_file, inputs=None, outputs=None)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/file_component_events/run.py b/demo/file_component_events/run.py index 3f157dc2495a3..e1d593f7d2443 100644 --- a/demo/file_component_events/run.py +++ b/demo/file_component_events/run.py @@ -54,7 +54,7 @@ def delete_file(n: int, file: gr.DeletedFileData): ) with gr.Row(): with gr.Column(): - file_component_pdf = gr.File(label="Upload PDF File", file_types=["pdf"]) + file_component_pdf = gr.File(label="Upload PDF File", file_types=[".pdf"]) with gr.Column(): output_file_4 = gr.File(label="Upload PDF File Output") num_load_btn_4 = gr.Number(label="# Load Upload PDF File", value=0) diff --git a/demo/file_explorer_component_events/run.ipynb b/demo/file_explorer_component_events/run.ipynb index 5a6c915bbbf3c..bc81feb0bd465 100644 --- a/demo/file_explorer_component_events/run.ipynb +++ b/demo/file_explorer_component_events/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: file_explorer_component_events"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('dir1')\n", "!wget -q -O dir1/bar.txt https://github.com/gradio-app/gradio/raw/main/demo/file_explorer_component_events/dir1/bar.txt\n", "!wget -q -O dir1/foo.txt https://github.com/gradio-app/gradio/raw/main/demo/file_explorer_component_events/dir1/foo.txt\n", "os.mkdir('dir2')\n", "!wget -q -O dir2/baz.png https://github.com/gradio-app/gradio/raw/main/demo/file_explorer_component_events/dir2/baz.png\n", "!wget -q -O dir2/foo.png https://github.com/gradio-app/gradio/raw/main/demo/file_explorer_component_events/dir2/foo.png\n", "os.mkdir('dir3')\n", "!wget -q -O dir3/dir3_bar.log https://github.com/gradio-app/gradio/raw/main/demo/file_explorer_component_events/dir3/dir3_bar.log\n", "!wget -q -O dir3/dir3_foo.txt https://github.com/gradio-app/gradio/raw/main/demo/file_explorer_component_events/dir3/dir3_foo.txt\n", "!wget -q -O dir3/dir4 https://github.com/gradio-app/gradio/raw/main/demo/file_explorer_component_events/dir3/dir4"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from pathlib import Path\n", "\n", "base_root = Path(__file__).parent.resolve()\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " dd = gr.Dropdown(label=\"Select File Explorer Root\",\n", " value=str(base_root / \"dir1\"),\n", " choices=[str(base_root / \"dir1\"), str(base_root / \"dir2\"),\n", " str(base_root / \"dir3\")])\n", " with gr.Group():\n", " txt_only_glob = gr.Checkbox(label=\"Show only text files\", value=False)\n", " ignore_txt_in_glob = gr.Checkbox(label=\"Ignore text files in glob\", value=False)\n", "\n", " fe = gr.FileExplorer(root_dir=str(base_root / \"dir1\"),\n", " glob=\"**/*\", interactive=True)\n", " textbox = gr.Textbox(label=\"Selected Directory\")\n", " run = gr.Button(\"Run\")\n", " total_changes = gr.Number(0, elem_id=\"total-changes\")\n", "\n", " txt_only_glob.select(lambda s: gr.FileExplorer(glob=\"*.txt\" if s else \"*\") ,\n", " inputs=[txt_only_glob], outputs=[fe])\n", " ignore_txt_in_glob.select(lambda s: gr.FileExplorer(ignore_glob=\"*.txt\" if s else None),\n", " inputs=[ignore_txt_in_glob], outputs=[fe])\n", "\n", " dd.select(lambda s: gr.FileExplorer(root=s), inputs=[dd], outputs=[fe])\n", " run.click(lambda s: \",\".join(s) if isinstance(s, list) else s, inputs=[fe], outputs=[textbox])\n", " fe.change(lambda num: num + 1, inputs=total_changes, outputs=total_changes)\n", "\n", " with gr.Row():\n", " a = gr.Textbox(elem_id=\"input-box\")\n", " a.change(lambda x: x, inputs=[a])\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: file_explorer_component_events"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('dir1')\n", "!wget -q -O dir1/bar.txt https://github.com/gradio-app/gradio/raw/main/demo/file_explorer_component_events/dir1/bar.txt\n", "!wget -q -O dir1/foo.txt https://github.com/gradio-app/gradio/raw/main/demo/file_explorer_component_events/dir1/foo.txt\n", "os.mkdir('dir2')\n", "!wget -q -O dir2/baz.png https://github.com/gradio-app/gradio/raw/main/demo/file_explorer_component_events/dir2/baz.png\n", "!wget -q -O dir2/foo.png https://github.com/gradio-app/gradio/raw/main/demo/file_explorer_component_events/dir2/foo.png\n", "os.mkdir('dir3')\n", "!wget -q -O dir3/dir3_bar.log https://github.com/gradio-app/gradio/raw/main/demo/file_explorer_component_events/dir3/dir3_bar.log\n", "!wget -q -O dir3/dir3_foo.txt https://github.com/gradio-app/gradio/raw/main/demo/file_explorer_component_events/dir3/dir3_foo.txt\n", "!wget -q -O dir3/dir4 https://github.com/gradio-app/gradio/raw/main/demo/file_explorer_component_events/dir3/dir4"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from pathlib import Path\n", "\n", "base_root = Path(__file__).parent.resolve()\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " dd = gr.Dropdown(label=\"Select File Explorer Root\",\n", " value=str(base_root / \"dir1\"),\n", " choices=[str(base_root / \"dir1\"), str(base_root / \"dir2\"),\n", " str(base_root / \"dir3\")])\n", " with gr.Group():\n", " txt_only_glob = gr.Checkbox(label=\"Show only text files\", value=False)\n", " ignore_txt_in_glob = gr.Checkbox(label=\"Ignore text files in glob\", value=False)\n", "\n", " fe = gr.FileExplorer(root_dir=str(base_root / \"dir1\"),\n", " glob=\"**/*\", interactive=True)\n", " textbox = gr.Textbox(label=\"Selected Directory\")\n", " run = gr.Button(\"Run\")\n", " total_changes = gr.Number(0, elem_id=\"total-changes\")\n", "\n", " txt_only_glob.select(lambda s: gr.FileExplorer(glob=\"*.txt\" if s else \"*\") ,\n", " inputs=[txt_only_glob], outputs=[fe])\n", " ignore_txt_in_glob.select(lambda s: gr.FileExplorer(ignore_glob=\"*.txt\" if s else None),\n", " inputs=[ignore_txt_in_glob], outputs=[fe])\n", "\n", " dd.select(lambda s: gr.FileExplorer(root_dir=s), inputs=[dd], outputs=[fe])\n", " run.click(lambda s: \",\".join(s) if isinstance(s, list) else s, inputs=[fe], outputs=[textbox])\n", " fe.change(lambda num: num + 1, inputs=total_changes, outputs=total_changes)\n", "\n", " with gr.Row():\n", " a = gr.Textbox(elem_id=\"input-box\")\n", " a.change(lambda x: x, inputs=[a])\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/file_explorer_component_events/run.py b/demo/file_explorer_component_events/run.py index d81b33a9e5de6..f8e5349a05598 100644 --- a/demo/file_explorer_component_events/run.py +++ b/demo/file_explorer_component_events/run.py @@ -24,7 +24,7 @@ ignore_txt_in_glob.select(lambda s: gr.FileExplorer(ignore_glob="*.txt" if s else None), inputs=[ignore_txt_in_glob], outputs=[fe]) - dd.select(lambda s: gr.FileExplorer(root=s), inputs=[dd], outputs=[fe]) + dd.select(lambda s: gr.FileExplorer(root_dir=s), inputs=[dd], outputs=[fe]) run.click(lambda s: ",".join(s) if isinstance(s, list) else s, inputs=[fe], outputs=[textbox]) fe.change(lambda num: num + 1, inputs=total_changes, outputs=total_changes) diff --git a/demo/gallery_component/files/cheetah.jpg b/demo/gallery_component/files/cheetah.jpg new file mode 100644 index 0000000000000..d1fde62d026ae Binary files /dev/null and b/demo/gallery_component/files/cheetah.jpg differ diff --git a/demo/gallery_component/files/world.mp4 b/demo/gallery_component/files/world.mp4 new file mode 100644 index 0000000000000..b11552f9cb693 Binary files /dev/null and b/demo/gallery_component/files/world.mp4 differ diff --git a/demo/gallery_component/run.ipynb b/demo/gallery_component/run.ipynb index 7b9f7d9ca1284..420c19627aa9d 100644 --- a/demo/gallery_component/run.ipynb +++ b/demo/gallery_component/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: gallery_component"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "with gr.Blocks() as demo:\n", " cheetahs = [\n", " \"https://upload.wikimedia.org/wikipedia/commons/0/09/TheCheethcat.jpg\",\n", " \"https://nationalzoo.si.edu/sites/default/files/animals/cheetah-003.jpg\",\n", " \"https://img.etimg.com/thumb/msid-50159822,width-650,imgsize-129520,,resizemode-4,quality-100/.jpg\",\n", " \"https://nationalzoo.si.edu/sites/default/files/animals/cheetah-002.jpg\",\n", " \"https://images.theconversation.com/files/375893/original/file-20201218-13-a8h8uq.jpg?ixlib=rb-1.1.0&rect=16%2C407%2C5515%2C2924&q=45&auto=format&w=496&fit=clip\",\n", " ]\n", " gr.Gallery(value=cheetahs, columns=4)\n", "\n", "demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: gallery_component"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/cheetah.jpg https://github.com/gradio-app/gradio/raw/main/demo/gallery_component/files/cheetah.jpg\n", "!wget -q -O files/world.mp4 https://github.com/gradio-app/gradio/raw/main/demo/gallery_component/files/world.mp4"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "with gr.Blocks() as demo:\n", " gallery_items = [\n", " (\"https://upload.wikimedia.org/wikipedia/commons/0/09/TheCheethcat.jpg\", \"cheetah1\"),\n", " (\"https://nationalzoo.si.edu/sites/default/files/animals/cheetah-003.jpg\", \"cheetah2\"),\n", " (\"https://videos.pexels.com/video-files/3209828/3209828-uhd_2560_1440_25fps.mp4\", \"world\"),\n", " (\"files/cheetah.jpg\", \"cheetah3\"),\n", " (\"files/world.mp4\", \"world2\")\n", " ]\n", " gr.Gallery(value=gallery_items, columns=4)\n", "\n", "demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/gallery_component/run.py b/demo/gallery_component/run.py index cd27c44d1896e..6187da2f3e486 100644 --- a/demo/gallery_component/run.py +++ b/demo/gallery_component/run.py @@ -1,13 +1,13 @@ import gradio as gr with gr.Blocks() as demo: - cheetahs = [ - "https://upload.wikimedia.org/wikipedia/commons/0/09/TheCheethcat.jpg", - "https://nationalzoo.si.edu/sites/default/files/animals/cheetah-003.jpg", - "https://img.etimg.com/thumb/msid-50159822,width-650,imgsize-129520,,resizemode-4,quality-100/.jpg", - "https://nationalzoo.si.edu/sites/default/files/animals/cheetah-002.jpg", - "https://images.theconversation.com/files/375893/original/file-20201218-13-a8h8uq.jpg?ixlib=rb-1.1.0&rect=16%2C407%2C5515%2C2924&q=45&auto=format&w=496&fit=clip", + gallery_items = [ + ("https://upload.wikimedia.org/wikipedia/commons/0/09/TheCheethcat.jpg", "cheetah1"), + ("https://nationalzoo.si.edu/sites/default/files/animals/cheetah-003.jpg", "cheetah2"), + ("https://videos.pexels.com/video-files/3209828/3209828-uhd_2560_1440_25fps.mp4", "world"), + ("files/cheetah.jpg", "cheetah3"), + ("files/world.mp4", "world2") ] - gr.Gallery(value=cheetahs, columns=4) + gr.Gallery(value=gallery_items, columns=4) demo.launch() diff --git a/demo/gallery_component_events/run.ipynb b/demo/gallery_component_events/run.ipynb index 0b5bdf13c1aac..24e59d4c0794e 100644 --- a/demo/gallery_component_events/run.ipynb +++ b/demo/gallery_component_events/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: gallery_component_events"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "with gr.Blocks() as demo:\n", " cheetahs = [\n", " \"https://gradio-builds.s3.amazonaws.com/assets/cheetah-003.jpg\",\n", " \"https://gradio-builds.s3.amazonaws.com/assets/lite-logo.png\",\n", " \"https://gradio-builds.s3.amazonaws.com/assets/TheCheethcat.jpg\",\n", " ]\n", " with gr.Row():\n", " with gr.Column():\n", " gal = gr.Gallery(columns=4, interactive=True, label=\"Input Gallery\")\n", " btn = gr.Button()\n", " with gr.Column():\n", " output_gal = gr.Gallery(columns=4, interactive=True, label=\"Output Gallery\")\n", " with gr.Row():\n", " textbox = gr.Json(label=\"uploaded files\")\n", " num_upload = gr.Number(value=0, label=\"Num Upload\")\n", " num_change = gr.Number(value=0, label=\"Num Change\")\n", " select_output = gr.Textbox(label=\"Select Data\")\n", " gal.upload(lambda v,n: (v, v, n+1), [gal, num_upload], [textbox, output_gal, num_upload])\n", " gal.change(lambda v,n: (v, v, n+1), [gal, num_change], [textbox, output_gal, num_change])\n", "\n", " btn.click(lambda: cheetahs, None, [output_gal])\n", "\n", " def select(select_data: gr.SelectData):\n", " return select_data.value['image']['url']\n", "\n", " output_gal.select(select, None, select_output)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: gallery_component_events"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "with gr.Blocks() as demo:\n", " files = [\n", " \"https://gradio-builds.s3.amazonaws.com/assets/cheetah-003.jpg\",\n", " \"https://gradio-static-files.s3.amazonaws.com/world.mp4\",\n", " \"https://gradio-builds.s3.amazonaws.com/assets/TheCheethcat.jpg\",\n", " ]\n", " with gr.Row():\n", " with gr.Column():\n", " gal = gr.Gallery(columns=4, interactive=True, label=\"Input Gallery\")\n", " btn = gr.Button()\n", " with gr.Column():\n", " output_gal = gr.Gallery(columns=4, interactive=True, label=\"Output Gallery\")\n", " with gr.Row():\n", " textbox = gr.Json(label=\"uploaded files\")\n", " num_upload = gr.Number(value=0, label=\"Num Upload\")\n", " num_change = gr.Number(value=0, label=\"Num Change\")\n", " select_output = gr.Textbox(label=\"Select Data\")\n", " gal.upload(lambda v,n: (v, v, n+1), [gal, num_upload], [textbox, output_gal, num_upload])\n", " gal.change(lambda v,n: (v, v, n+1), [gal, num_change], [textbox, output_gal, num_change])\n", "\n", " btn.click(lambda: files, None, [output_gal])\n", "\n", " def select(select_data: gr.SelectData):\n", " return select_data.value['image']['url'] if 'image' in select_data.value else select_data.value['video']['url']\n", "\n", " output_gal.select(select, None, select_output)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/gallery_component_events/run.py b/demo/gallery_component_events/run.py index efbcf0ab03b5e..fa9f9cbf33e9f 100644 --- a/demo/gallery_component_events/run.py +++ b/demo/gallery_component_events/run.py @@ -1,9 +1,9 @@ import gradio as gr with gr.Blocks() as demo: - cheetahs = [ + files = [ "https://gradio-builds.s3.amazonaws.com/assets/cheetah-003.jpg", - "https://gradio-builds.s3.amazonaws.com/assets/lite-logo.png", + "https://gradio-static-files.s3.amazonaws.com/world.mp4", "https://gradio-builds.s3.amazonaws.com/assets/TheCheethcat.jpg", ] with gr.Row(): @@ -20,10 +20,10 @@ gal.upload(lambda v,n: (v, v, n+1), [gal, num_upload], [textbox, output_gal, num_upload]) gal.change(lambda v,n: (v, v, n+1), [gal, num_change], [textbox, output_gal, num_change]) - btn.click(lambda: cheetahs, None, [output_gal]) + btn.click(lambda: files, None, [output_gal]) def select(select_data: gr.SelectData): - return select_data.value['image']['url'] + return select_data.value['image']['url'] if 'image' in select_data.value else select_data.value['video']['url'] output_gal.select(select, None, select_output) diff --git a/demo/gallery_selections/requirements.txt b/demo/gallery_selections/requirements.txt new file mode 100644 index 0000000000000..24ce15ab7ead3 --- /dev/null +++ b/demo/gallery_selections/requirements.txt @@ -0,0 +1 @@ +numpy diff --git a/demo/gallery_selections/run.ipynb b/demo/gallery_selections/run.ipynb index a07e4e5fa362f..345818c2f26c8 100644 --- a/demo/gallery_selections/run.ipynb +++ b/demo/gallery_selections/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: gallery_selections"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import numpy as np\n", "\n", "with gr.Blocks() as demo:\n", " imgs = gr.State()\n", " gallery = gr.Gallery(allow_preview=False)\n", "\n", " def deselect_images():\n", " return gr.Gallery(selected_index=None)\n", "\n", " def generate_images():\n", " images = []\n", " for _ in range(9):\n", " image = np.ones((100, 100, 3), dtype=np.uint8) * np.random.randint(\n", " 0, 255, 3\n", " ) # image is a solid single color\n", " images.append(image)\n", " return images, images\n", "\n", " demo.load(generate_images, None, [gallery, imgs])\n", "\n", " with gr.Row():\n", " selected = gr.Number(show_label=False)\n", " darken_btn = gr.Button(\"Darken selected\")\n", " deselect_button = gr.Button(\"Deselect\")\n", "\n", " deselect_button.click(deselect_images, None, gallery)\n", "\n", " def get_select_index(evt: gr.SelectData):\n", " return evt.index\n", "\n", " gallery.select(get_select_index, None, selected)\n", "\n", " def darken_img(imgs, index):\n", " index = int(index)\n", " imgs[index] = np.round(imgs[index] * 0.8).astype(np.uint8)\n", " return imgs, imgs\n", "\n", " darken_btn.click(darken_img, [imgs, selected], [imgs, gallery])\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: gallery_selections"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import numpy as np\n", "\n", "with gr.Blocks() as demo:\n", " imgs = gr.State()\n", " gallery = gr.Gallery(allow_preview=False)\n", "\n", " def deselect_images():\n", " return gr.Gallery(selected_index=None)\n", "\n", " def generate_images():\n", " images = []\n", " for _ in range(9):\n", " image = np.ones((100, 100, 3), dtype=np.uint8) * np.random.randint(\n", " 0, 255, 3\n", " ) # image is a solid single color\n", " images.append(image)\n", " return images, images\n", "\n", " demo.load(generate_images, None, [gallery, imgs])\n", "\n", " with gr.Row():\n", " selected = gr.Number(show_label=False)\n", " darken_btn = gr.Button(\"Darken selected\")\n", " deselect_button = gr.Button(\"Deselect\")\n", "\n", " deselect_button.click(deselect_images, None, gallery)\n", "\n", " def get_select_index(evt: gr.SelectData):\n", " return evt.index\n", "\n", " gallery.select(get_select_index, None, selected)\n", "\n", " def darken_img(imgs, index):\n", " index = int(index)\n", " imgs[index] = np.round(imgs[index] * 0.8).astype(np.uint8)\n", " return imgs, imgs\n", "\n", " darken_btn.click(darken_img, [imgs, selected], [imgs, gallery])\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/hello_blocks/run.ipynb b/demo/hello_blocks/run.ipynb index 9d509b14150e0..04ab5cb198579 100644 --- a/demo/hello_blocks/run.ipynb +++ b/demo/hello_blocks/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: hello_blocks"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "def greet(name):\n", " return \"Hello \" + name + \"!\"\n", "\n", "with gr.Blocks() as demo:\n", " name = gr.Textbox(label=\"Name\")\n", " output = gr.Textbox(label=\"Output Box\")\n", " greet_btn = gr.Button(\"Greet\")\n", " greet_btn.click(fn=greet, inputs=name, outputs=output, api_name=\"greet\")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: hello_blocks"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "\n", "def greet(name):\n", " return \"Hello \" + name + \"!\"\n", "\n", "\n", "with gr.Blocks() as demo:\n", " name = gr.Textbox(label=\"Name\")\n", " output = gr.Textbox(label=\"Output Box\")\n", " greet_btn = gr.Button(\"Greet\")\n", " greet_btn.click(fn=greet, inputs=name, outputs=output, api_name=\"greet\")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/hello_blocks/run.py b/demo/hello_blocks/run.py index bfb65715686ac..f11bca19f42a7 100644 --- a/demo/hello_blocks/run.py +++ b/demo/hello_blocks/run.py @@ -1,8 +1,10 @@ import gradio as gr + def greet(name): return "Hello " + name + "!" + with gr.Blocks() as demo: name = gr.Textbox(label="Name") output = gr.Textbox(label="Output Box") diff --git a/demo/hello_world/run.ipynb b/demo/hello_world/run.ipynb index ddfe0d41accb4..c6614e5d3a57b 100644 --- a/demo/hello_world/run.ipynb +++ b/demo/hello_world/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: hello_world\n", "### The simplest possible Gradio demo. It wraps a 'Hello {name}!' function in an Interface that accepts and returns text.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "def greet(name):\n", " return \"Hello \" + name + \"!\"\n", "\n", "demo = gr.Interface(fn=greet, inputs=\"textbox\", outputs=\"textbox\")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: hello_world\n", "### The simplest possible Gradio demo. It wraps a 'Hello {name}!' function in an Interface that accepts and returns text.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "\n", "def greet(name):\n", " return \"Hello \" + name + \"!\"\n", "\n", "\n", "demo = gr.Interface(fn=greet, inputs=\"textbox\", outputs=\"textbox\")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/hello_world/run.py b/demo/hello_world/run.py index 15e7cb988b942..8179c16a51dee 100644 --- a/demo/hello_world/run.py +++ b/demo/hello_world/run.py @@ -1,8 +1,10 @@ import gradio as gr + def greet(name): return "Hello " + name + "!" + demo = gr.Interface(fn=greet, inputs="textbox", outputs="textbox") if __name__ == "__main__": diff --git a/demo/hello_world_4/screenshot.gif b/demo/hello_world_4/screenshot.gif index 32c4fd62f4dfb..eb7e3352b4868 100644 Binary files a/demo/hello_world_4/screenshot.gif and b/demo/hello_world_4/screenshot.gif differ diff --git a/demo/image_classification/requirements.txt b/demo/image_classification/requirements.txt index 37f700a78ecad..5ec204513fa11 100644 --- a/demo/image_classification/requirements.txt +++ b/demo/image_classification/requirements.txt @@ -1,2 +1,3 @@ torch -torchvision \ No newline at end of file +torchvision +requests diff --git a/demo/image_classification/run.ipynb b/demo/image_classification/run.ipynb index c3711bde42867..d2e91908bb3d0 100644 --- a/demo/image_classification/run.ipynb +++ b/demo/image_classification/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: image_classification\n", "### Simple image classification in Pytorch with Gradio's Image input and Label output.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch torchvision"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/image_classification/cheetah.jpg"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import torch\n", "import requests\n", "from torchvision import transforms\n", "\n", "model = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True).eval()\n", "response = requests.get(\"https://git.io/JJkYN\")\n", "labels = response.text.split(\"\\n\")\n", "\n", "def predict(inp):\n", " inp = transforms.ToTensor()(inp).unsqueeze(0)\n", " with torch.no_grad():\n", " prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)\n", " confidences = {labels[i]: float(prediction[i]) for i in range(1000)}\n", " return confidences\n", "\n", "demo = gr.Interface(fn=predict,\n", " inputs=gr.Image(type=\"pil\"),\n", " outputs=gr.Label(num_top_classes=3),\n", " examples=[[\"cheetah.jpg\"]],\n", " )\n", "\n", "demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: image_classification\n", "### Simple image classification in Pytorch with Gradio's Image input and Label output.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch torchvision requests "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/image_classification/cheetah.jpg"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import torch\n", "import requests\n", "from torchvision import transforms\n", "\n", "model = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True).eval()\n", "response = requests.get(\"https://git.io/JJkYN\")\n", "labels = response.text.split(\"\\n\")\n", "\n", "def predict(inp):\n", " inp = transforms.ToTensor()(inp).unsqueeze(0)\n", " with torch.no_grad():\n", " prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)\n", " confidences = {labels[i]: float(prediction[i]) for i in range(1000)}\n", " return confidences\n", "\n", "demo = gr.Interface(fn=predict,\n", " inputs=gr.Image(type=\"pil\"),\n", " outputs=gr.Label(num_top_classes=3),\n", " examples=[[\"cheetah.jpg\"]],\n", " )\n", "\n", "demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/image_classifier/requirements.txt b/demo/image_classifier/requirements.txt index c2bbfa1f2b3c7..7947059c17d1e 100644 --- a/demo/image_classifier/requirements.txt +++ b/demo/image_classifier/requirements.txt @@ -1,2 +1,3 @@ numpy -tensorflow \ No newline at end of file +tensorflow +requests diff --git a/demo/image_classifier/run.ipynb b/demo/image_classifier/run.ipynb index b20982b9d799c..10e0f1b812c4f 100644 --- a/demo/image_classifier/run.ipynb +++ b/demo/image_classifier/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: image_classifier"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy tensorflow"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/imagenet_labels.json https://github.com/gradio-app/gradio/raw/main/demo/image_classifier/files/imagenet_labels.json\n", "os.mkdir('images')\n", "!wget -q -O images/cheetah1.jpg https://github.com/gradio-app/gradio/raw/main/demo/image_classifier/images/cheetah1.jpg\n", "!wget -q -O images/lion.jpg https://github.com/gradio-app/gradio/raw/main/demo/image_classifier/images/lion.jpg"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import os\n", "import requests\n", "import tensorflow as tf\n", "\n", "import gradio as gr\n", "\n", "inception_net = tf.keras.applications.MobileNetV2() # load the model\n", "\n", "# Download human-readable labels for ImageNet.\n", "response = requests.get(\"https://git.io/JJkYN\")\n", "labels = response.text.split(\"\\n\")\n", "\n", "def classify_image(inp):\n", " inp = inp.reshape((-1, 224, 224, 3))\n", " inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)\n", " prediction = inception_net.predict(inp).flatten()\n", " return {labels[i]: float(prediction[i]) for i in range(1000)}\n", "\n", "image = gr.Image()\n", "label = gr.Label(num_top_classes=3)\n", "\n", "demo = gr.Interface(\n", " fn=classify_image,\n", " inputs=image,\n", " outputs=label,\n", " examples=[\n", " os.path.join(os.path.abspath(''), \"images/cheetah1.jpg\"),\n", " os.path.join(os.path.abspath(''), \"images/lion.jpg\")\n", " ]\n", " )\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n", "\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: image_classifier"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy tensorflow requests "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/imagenet_labels.json https://github.com/gradio-app/gradio/raw/main/demo/image_classifier/files/imagenet_labels.json\n", "os.mkdir('images')\n", "!wget -q -O images/cheetah1.jpg https://github.com/gradio-app/gradio/raw/main/demo/image_classifier/images/cheetah1.jpg\n", "!wget -q -O images/lion.jpg https://github.com/gradio-app/gradio/raw/main/demo/image_classifier/images/lion.jpg"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import os\n", "import requests\n", "import tensorflow as tf\n", "\n", "import gradio as gr\n", "\n", "inception_net = tf.keras.applications.MobileNetV2() # load the model\n", "\n", "# Download human-readable labels for ImageNet.\n", "response = requests.get(\"https://git.io/JJkYN\")\n", "labels = response.text.split(\"\\n\")\n", "\n", "def classify_image(inp):\n", " inp = inp.reshape((-1, 224, 224, 3))\n", " inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)\n", " prediction = inception_net.predict(inp).flatten()\n", " return {labels[i]: float(prediction[i]) for i in range(1000)}\n", "\n", "image = gr.Image()\n", "label = gr.Label(num_top_classes=3)\n", "\n", "demo = gr.Interface(\n", " fn=classify_image,\n", " inputs=image,\n", " outputs=label,\n", " examples=[\n", " os.path.join(os.path.abspath(''), \"images/cheetah1.jpg\"),\n", " os.path.join(os.path.abspath(''), \"images/lion.jpg\")\n", " ]\n", " )\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n", "\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/image_classifier_2/requirements.txt b/demo/image_classifier_2/requirements.txt index 2ab187b1e3517..0396385ef5137 100644 --- a/demo/image_classifier_2/requirements.txt +++ b/demo/image_classifier_2/requirements.txt @@ -1,3 +1,4 @@ pillow torch -torchvision \ No newline at end of file +torchvision +requests diff --git a/demo/image_classifier_2/run.ipynb b/demo/image_classifier_2/run.ipynb index 980e09519df10..b87f6a86592f3 100644 --- a/demo/image_classifier_2/run.ipynb +++ b/demo/image_classifier_2/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: image_classifier_2"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio pillow torch torchvision"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/imagenet_labels.json https://github.com/gradio-app/gradio/raw/main/demo/image_classifier_2/files/imagenet_labels.json"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import requests\n", "import torch\n", "from PIL import Image\n", "from torchvision import transforms\n", "\n", "import gradio as gr\n", "\n", "model = torch.hub.load(\"pytorch/vision:v0.6.0\", \"resnet18\", pretrained=True).eval()\n", "\n", "# Download human-readable labels for ImageNet.\n", "response = requests.get(\"https://git.io/JJkYN\")\n", "labels = response.text.split(\"\\n\")\n", "\n", "def predict(inp):\n", " inp = Image.fromarray(inp.astype(\"uint8\"), \"RGB\")\n", " inp = transforms.ToTensor()(inp).unsqueeze(0)\n", " with torch.no_grad():\n", " prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)\n", " return {labels[i]: float(prediction[i]) for i in range(1000)}\n", "\n", "inputs = gr.Image()\n", "outputs = gr.Label(num_top_classes=3)\n", "\n", "demo = gr.Interface(fn=predict, inputs=inputs, outputs=outputs)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: image_classifier_2"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio pillow torch torchvision requests "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/imagenet_labels.json https://github.com/gradio-app/gradio/raw/main/demo/image_classifier_2/files/imagenet_labels.json"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import requests\n", "import torch\n", "from PIL import Image\n", "from torchvision import transforms\n", "\n", "import gradio as gr\n", "\n", "model = torch.hub.load(\"pytorch/vision:v0.6.0\", \"resnet18\", pretrained=True).eval()\n", "\n", "# Download human-readable labels for ImageNet.\n", "response = requests.get(\"https://git.io/JJkYN\")\n", "labels = response.text.split(\"\\n\")\n", "\n", "def predict(inp):\n", " inp = Image.fromarray(inp.astype(\"uint8\"), \"RGB\")\n", " inp = transforms.ToTensor()(inp).unsqueeze(0)\n", " with torch.no_grad():\n", " prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)\n", " return {labels[i]: float(prediction[i]) for i in range(1000)}\n", "\n", "inputs = gr.Image()\n", "outputs = gr.Label(num_top_classes=3)\n", "\n", "demo = gr.Interface(fn=predict, inputs=inputs, outputs=outputs)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/image_segmentation/requirements.txt b/demo/image_segmentation/requirements.txt new file mode 100644 index 0000000000000..24ce15ab7ead3 --- /dev/null +++ b/demo/image_segmentation/requirements.txt @@ -0,0 +1 @@ +numpy diff --git a/demo/image_segmentation/run.ipynb b/demo/image_segmentation/run.ipynb index ac39c2862e509..a4406e9d0af3c 100644 --- a/demo/image_segmentation/run.ipynb +++ b/demo/image_segmentation/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: image_segmentation\n", "### Simple image segmentation using gradio's AnnotatedImage component.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import numpy as np\n", "import random\n", "\n", "with gr.Blocks() as demo:\n", " section_labels = [\n", " \"apple\",\n", " \"banana\",\n", " \"carrot\",\n", " \"donut\",\n", " \"eggplant\",\n", " \"fish\",\n", " \"grapes\",\n", " \"hamburger\",\n", " \"ice cream\",\n", " \"juice\",\n", " ]\n", "\n", " with gr.Row():\n", " num_boxes = gr.Slider(0, 5, 2, step=1, label=\"Number of boxes\")\n", " num_segments = gr.Slider(0, 5, 1, step=1, label=\"Number of segments\")\n", "\n", " with gr.Row():\n", " img_input = gr.Image()\n", " img_output = gr.AnnotatedImage(\n", " color_map={\"banana\": \"#a89a00\", \"carrot\": \"#ffae00\"}\n", " )\n", "\n", " section_btn = gr.Button(\"Identify Sections\")\n", " selected_section = gr.Textbox(label=\"Selected Section\")\n", "\n", " def section(img, num_boxes, num_segments):\n", " sections = []\n", " for a in range(num_boxes):\n", " x = random.randint(0, img.shape[1])\n", " y = random.randint(0, img.shape[0])\n", " w = random.randint(0, img.shape[1] - x)\n", " h = random.randint(0, img.shape[0] - y)\n", " sections.append(((x, y, x + w, y + h), section_labels[a]))\n", " for b in range(num_segments):\n", " x = random.randint(0, img.shape[1])\n", " y = random.randint(0, img.shape[0])\n", " r = random.randint(0, min(x, y, img.shape[1] - x, img.shape[0] - y))\n", " mask = np.zeros(img.shape[:2])\n", " for i in range(img.shape[0]):\n", " for j in range(img.shape[1]):\n", " dist_square = (i - y) ** 2 + (j - x) ** 2\n", " if dist_square < r**2:\n", " mask[i, j] = round((r**2 - dist_square) / r**2 * 4) / 4\n", " sections.append((mask, section_labels[b + num_boxes]))\n", " return (img, sections)\n", "\n", " section_btn.click(section, [img_input, num_boxes, num_segments], img_output)\n", "\n", " def select_section(evt: gr.SelectData):\n", " return section_labels[evt.index]\n", "\n", " img_output.select(select_section, None, selected_section)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: image_segmentation\n", "### Simple image segmentation using gradio's AnnotatedImage component.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import numpy as np\n", "import random\n", "\n", "with gr.Blocks() as demo:\n", " section_labels = [\n", " \"apple\",\n", " \"banana\",\n", " \"carrot\",\n", " \"donut\",\n", " \"eggplant\",\n", " \"fish\",\n", " \"grapes\",\n", " \"hamburger\",\n", " \"ice cream\",\n", " \"juice\",\n", " ]\n", "\n", " with gr.Row():\n", " num_boxes = gr.Slider(0, 5, 2, step=1, label=\"Number of boxes\")\n", " num_segments = gr.Slider(0, 5, 1, step=1, label=\"Number of segments\")\n", "\n", " with gr.Row():\n", " img_input = gr.Image()\n", " img_output = gr.AnnotatedImage(\n", " color_map={\"banana\": \"#a89a00\", \"carrot\": \"#ffae00\"}\n", " )\n", "\n", " section_btn = gr.Button(\"Identify Sections\")\n", " selected_section = gr.Textbox(label=\"Selected Section\")\n", "\n", " def section(img, num_boxes, num_segments):\n", " sections = []\n", " for a in range(num_boxes):\n", " x = random.randint(0, img.shape[1])\n", " y = random.randint(0, img.shape[0])\n", " w = random.randint(0, img.shape[1] - x)\n", " h = random.randint(0, img.shape[0] - y)\n", " sections.append(((x, y, x + w, y + h), section_labels[a]))\n", " for b in range(num_segments):\n", " x = random.randint(0, img.shape[1])\n", " y = random.randint(0, img.shape[0])\n", " r = random.randint(0, min(x, y, img.shape[1] - x, img.shape[0] - y))\n", " mask = np.zeros(img.shape[:2])\n", " for i in range(img.shape[0]):\n", " for j in range(img.shape[1]):\n", " dist_square = (i - y) ** 2 + (j - x) ** 2\n", " if dist_square < r**2:\n", " mask[i, j] = round((r**2 - dist_square) / r**2 * 4) / 4\n", " sections.append((mask, section_labels[b + num_boxes]))\n", " return (img, sections)\n", "\n", " section_btn.click(section, [img_input, num_boxes, num_segments], img_output)\n", "\n", " def select_section(evt: gr.SelectData):\n", " return section_labels[evt.index]\n", "\n", " img_output.select(select_section, None, selected_section)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/image_selections/requirements.txt b/demo/image_selections/requirements.txt new file mode 100644 index 0000000000000..24ce15ab7ead3 --- /dev/null +++ b/demo/image_selections/requirements.txt @@ -0,0 +1 @@ +numpy diff --git a/demo/image_selections/run.ipynb b/demo/image_selections/run.ipynb index 5a5ee6a911946..236eea722abcd 100644 --- a/demo/image_selections/run.ipynb +++ b/demo/image_selections/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: image_selections"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import numpy as np\n", "\n", "with gr.Blocks() as demo:\n", " tolerance = gr.Slider(label=\"Tolerance\", info=\"How different colors can be in a segment.\", minimum=0, maximum=256*3, value=50)\n", " with gr.Row():\n", " input_img = gr.Image(label=\"Input\")\n", " output_img = gr.Image(label=\"Selected Segment\")\n", "\n", " def get_select_coords(img, tolerance, evt: gr.SelectData):\n", " visited_pixels = set()\n", " pixels_in_queue = set()\n", " pixels_in_segment = set()\n", " start_pixel = img[evt.index[1], evt.index[0]]\n", " pixels_in_queue.add((evt.index[1], evt.index[0]))\n", " while len(pixels_in_queue) > 0:\n", " pixel = pixels_in_queue.pop()\n", " visited_pixels.add(pixel)\n", " neighbors = []\n", " if pixel[0] > 0:\n", " neighbors.append((pixel[0] - 1, pixel[1]))\n", " if pixel[0] < img.shape[0] - 1:\n", " neighbors.append((pixel[0] + 1, pixel[1]))\n", " if pixel[1] > 0:\n", " neighbors.append((pixel[0], pixel[1] - 1))\n", " if pixel[1] < img.shape[1] - 1:\n", " neighbors.append((pixel[0], pixel[1] + 1))\n", " for neighbor in neighbors:\n", " if neighbor in visited_pixels:\n", " continue\n", " neighbor_pixel = img[neighbor[0], neighbor[1]]\n", " if np.abs(neighbor_pixel - start_pixel).sum() < tolerance:\n", " pixels_in_queue.add(neighbor)\n", " pixels_in_segment.add(neighbor)\n", "\n", " out = img.copy() * 0.2\n", " out = out.astype(np.uint8)\n", " for pixel in pixels_in_segment:\n", " out[pixel[0], pixel[1]] = img[pixel[0], pixel[1]]\n", " return out\n", "\n", " input_img.select(get_select_coords, [input_img, tolerance], output_img)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: image_selections"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import numpy as np\n", "\n", "with gr.Blocks() as demo:\n", " tolerance = gr.Slider(label=\"Tolerance\", info=\"How different colors can be in a segment.\", minimum=0, maximum=256*3, value=50)\n", " with gr.Row():\n", " input_img = gr.Image(label=\"Input\")\n", " output_img = gr.Image(label=\"Selected Segment\")\n", "\n", " def get_select_coords(img, tolerance, evt: gr.SelectData):\n", " visited_pixels = set()\n", " pixels_in_queue = set()\n", " pixels_in_segment = set()\n", " start_pixel = img[evt.index[1], evt.index[0]]\n", " pixels_in_queue.add((evt.index[1], evt.index[0]))\n", " while len(pixels_in_queue) > 0:\n", " pixel = pixels_in_queue.pop()\n", " visited_pixels.add(pixel)\n", " neighbors = []\n", " if pixel[0] > 0:\n", " neighbors.append((pixel[0] - 1, pixel[1]))\n", " if pixel[0] < img.shape[0] - 1:\n", " neighbors.append((pixel[0] + 1, pixel[1]))\n", " if pixel[1] > 0:\n", " neighbors.append((pixel[0], pixel[1] - 1))\n", " if pixel[1] < img.shape[1] - 1:\n", " neighbors.append((pixel[0], pixel[1] + 1))\n", " for neighbor in neighbors:\n", " if neighbor in visited_pixels:\n", " continue\n", " neighbor_pixel = img[neighbor[0], neighbor[1]]\n", " if np.abs(neighbor_pixel - start_pixel).sum() < tolerance:\n", " pixels_in_queue.add(neighbor)\n", " pixels_in_segment.add(neighbor)\n", "\n", " out = img.copy() * 0.2\n", " out = out.astype(np.uint8)\n", " for pixel in pixels_in_segment:\n", " out[pixel[0], pixel[1]] = img[pixel[0], pixel[1]]\n", " return out\n", "\n", " input_img.select(get_select_coords, [input_img, tolerance], output_img)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/json_component/requirements.txt b/demo/json_component/requirements.txt new file mode 100644 index 0000000000000..24ce15ab7ead3 --- /dev/null +++ b/demo/json_component/requirements.txt @@ -0,0 +1 @@ +numpy diff --git a/demo/json_component/run.ipynb b/demo/json_component/run.ipynb index bd2644a43d7f2..7f38e294f4690 100644 --- a/demo/json_component/run.ipynb +++ b/demo/json_component/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: json_component"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import numpy as np\n", "\n", "with gr.Blocks() as demo:\n", " inp = gr.JSON(\n", " label=\"InputJSON\",\n", " value={\n", " \"Key 1\": \"Value 1\",\n", " \"Key 2\": {\"Key 3\": \"Value 2\", \"Key 4\": \"Value 3\"},\n", " \"Key 5\": [\"Item 1\", \"Item 2\", \"Item 3\"],\n", " \"Key 6\": 123,\n", " \"Key 7\": 123.456,\n", " \"Key 8\": True,\n", " \"Key 9\": False,\n", " \"Key 10\": None,\n", " \"Key 11\": np.array([1, 2, 3]),\n", " }\n", " )\n", " out = gr.JSON(label=\"OutputJSON\")\n", " btn = gr.Button(\"Submit\")\n", " btn.click(lambda x: x, inp, out)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: json_component"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import numpy as np\n", "\n", "with gr.Blocks() as demo:\n", " inp = gr.JSON(\n", " label=\"InputJSON\",\n", " value={\n", " \"Key 1\": \"Value 1\",\n", " \"Key 2\": {\"Key 3\": \"Value 2\", \"Key 4\": \"Value 3\"},\n", " \"Key 5\": [\"Item 1\", \"Item 2\", \"Item 3\"],\n", " \"Key 6\": 123,\n", " \"Key 7\": 123.456,\n", " \"Key 8\": True,\n", " \"Key 9\": False,\n", " \"Key 10\": None,\n", " \"Key 11\": np.array([1, 2, 3]),\n", " }\n", " )\n", " out = gr.JSON(label=\"OutputJSON\")\n", " btn = gr.Button(\"Submit\")\n", " btn.click(lambda x: x, inp, out)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/kitchen_sink/requirements.txt b/demo/kitchen_sink/requirements.txt new file mode 100644 index 0000000000000..24ce15ab7ead3 --- /dev/null +++ b/demo/kitchen_sink/requirements.txt @@ -0,0 +1 @@ +numpy diff --git a/demo/kitchen_sink/run.ipynb b/demo/kitchen_sink/run.ipynb index cb1313650adc6..68f1e9bf727b6 100644 --- a/demo/kitchen_sink/run.ipynb +++ b/demo/kitchen_sink/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: kitchen_sink"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/cantina.wav https://github.com/gradio-app/gradio/raw/main/demo/kitchen_sink/files/cantina.wav\n", "!wget -q -O files/cheetah1.jpg https://github.com/gradio-app/gradio/raw/main/demo/kitchen_sink/files/cheetah1.jpg\n", "!wget -q -O files/lion.jpg https://github.com/gradio-app/gradio/raw/main/demo/kitchen_sink/files/lion.jpg\n", "!wget -q -O files/logo.png https://github.com/gradio-app/gradio/raw/main/demo/kitchen_sink/files/logo.png\n", "!wget -q -O files/time.csv https://github.com/gradio-app/gradio/raw/main/demo/kitchen_sink/files/time.csv\n", "!wget -q -O files/titanic.csv https://github.com/gradio-app/gradio/raw/main/demo/kitchen_sink/files/titanic.csv\n", "!wget -q -O files/tower.jpg https://github.com/gradio-app/gradio/raw/main/demo/kitchen_sink/files/tower.jpg\n", "!wget -q -O files/world.mp4 https://github.com/gradio-app/gradio/raw/main/demo/kitchen_sink/files/world.mp4"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import os\n", "import json\n", "\n", "import numpy as np\n", "\n", "import gradio as gr\n", "\n", "CHOICES = [\"foo\", \"bar\", \"baz\"]\n", "JSONOBJ = \"\"\"{\"items\":{\"item\":[{\"id\": \"0001\",\"type\": null,\"is_good\": false,\"ppu\": 0.55,\"batters\":{\"batter\":[{ \"id\": \"1001\", \"type\": \"Regular\" },{ \"id\": \"1002\", \"type\": \"Chocolate\" },{ \"id\": \"1003\", \"type\": \"Blueberry\" },{ \"id\": \"1004\", \"type\": \"Devil's Food\" }]},\"topping\":[{ \"id\": \"5001\", \"type\": \"None\" },{ \"id\": \"5002\", \"type\": \"Glazed\" },{ \"id\": \"5005\", \"type\": \"Sugar\" },{ \"id\": \"5007\", \"type\": \"Powdered Sugar\" },{ \"id\": \"5006\", \"type\": \"Chocolate with Sprinkles\" },{ \"id\": \"5003\", \"type\": \"Chocolate\" },{ \"id\": \"5004\", \"type\": \"Maple\" }]}]}}\"\"\"\n", "\n", "def fn(\n", " text1,\n", " text2,\n", " num,\n", " slider1,\n", " slider2,\n", " single_checkbox,\n", " checkboxes,\n", " radio,\n", " dropdown,\n", " multi_dropdown,\n", " im1,\n", " # im2,\n", " # im3,\n", " im4,\n", " video,\n", " audio1,\n", " audio2,\n", " file,\n", " df1,\n", "):\n", " return (\n", " (text1 if single_checkbox else text2)\n", " + \", selected:\"\n", " + \", \".join(checkboxes), # Text\n", " {\n", " \"positive\": num / (num + slider1 + slider2),\n", " \"negative\": slider1 / (num + slider1 + slider2),\n", " \"neutral\": slider2 / (num + slider1 + slider2),\n", " }, # Label\n", " (audio1[0], np.flipud(audio1[1]))\n", " if audio1 is not None\n", " else os.path.join(os.path.abspath(''), \"files/cantina.wav\"), # Audio\n", " np.flipud(im1)\n", " if im1 is not None\n", " else os.path.join(os.path.abspath(''), \"files/cheetah1.jpg\"), # Image\n", " video\n", " if video is not None\n", " else os.path.join(os.path.abspath(''), \"files/world.mp4\"), # Video\n", " [\n", " (\"The\", \"art\"),\n", " (\"quick brown\", \"adj\"),\n", " (\"fox\", \"nn\"),\n", " (\"jumped\", \"vrb\"),\n", " (\"testing testing testing\", None),\n", " (\"over\", \"prp\"),\n", " (\"the\", \"art\"),\n", " (\"testing\", None),\n", " (\"lazy\", \"adj\"),\n", " (\"dogs\", \"nn\"),\n", " (\".\", \"punc\"),\n", " ]\n", " + [(f\"test {x}\", f\"test {x}\") for x in range(10)], # HighlightedText\n", " # [(\"The testing testing testing\", None), (\"quick brown\", 0.2), (\"fox\", 1), (\"jumped\", -1), (\"testing testing testing\", 0), (\"over\", 0), (\"the\", 0), (\"testing\", 0), (\"lazy\", 1), (\"dogs\", 0), (\".\", 1)] + [(f\"test {x}\", x/10) for x in range(-10, 10)], # HighlightedText\n", " [\n", " (\"The testing testing testing\", None),\n", " (\"over\", 0.6),\n", " (\"the\", 0.2),\n", " (\"testing\", None),\n", " (\"lazy\", -0.1),\n", " (\"dogs\", 0.4),\n", " (\".\", 0),\n", " ]\n", " + [(\"test\", x / 10) for x in range(-10, 10)], # HighlightedText\n", " json.loads(JSONOBJ), # JSON\n", " \"\", # HTML\n", " os.path.join(os.path.abspath(''), \"files/titanic.csv\"),\n", " df1, # Dataframe\n", " np.random.randint(0, 10, (4, 4)), # Dataframe\n", " )\n", "\n", "demo = gr.Interface(\n", " fn,\n", " inputs=[\n", " gr.Textbox(value=\"Lorem ipsum\", label=\"Textbox\"),\n", " gr.Textbox(lines=3, placeholder=\"Type here..\", label=\"Textbox 2\"),\n", " gr.Number(label=\"Number\", value=42),\n", " gr.Slider(10, 20, value=15, label=\"Slider: 10 - 20\"),\n", " gr.Slider(maximum=20, step=0.04, label=\"Slider: step @ 0.04\"),\n", " gr.Checkbox(label=\"Checkbox\"),\n", " gr.CheckboxGroup(label=\"CheckboxGroup\", choices=CHOICES, value=CHOICES[0:2]),\n", " gr.Radio(label=\"Radio\", choices=CHOICES, value=CHOICES[2]),\n", " gr.Dropdown(label=\"Dropdown\", choices=CHOICES),\n", " gr.Dropdown(\n", " label=\"Multiselect Dropdown (Max choice: 2)\",\n", " choices=CHOICES,\n", " multiselect=True,\n", " max_choices=2,\n", " ),\n", " gr.Image(label=\"Image\"),\n", " # gr.Image(label=\"Image w/ Cropper\", tool=\"select\"),\n", " # gr.Image(label=\"Sketchpad\", source=\"canvas\"),\n", " gr.Image(label=\"Webcam\", sources=[\"webcam\"]),\n", " gr.Video(label=\"Video\"),\n", " gr.Audio(label=\"Audio\"),\n", " gr.Audio(label=\"Microphone\", sources=[\"microphone\"]),\n", " gr.File(label=\"File\"),\n", " gr.Dataframe(label=\"Dataframe\", headers=[\"Name\", \"Age\", \"Gender\"]),\n", " ],\n", " outputs=[\n", " gr.Textbox(label=\"Textbox\"),\n", " gr.Label(label=\"Label\"),\n", " gr.Audio(label=\"Audio\"),\n", " gr.Image(label=\"Image\"),\n", " gr.Video(label=\"Video\"),\n", " gr.HighlightedText(\n", " label=\"HighlightedText\", color_map={\"punc\": \"pink\", \"test 0\": \"blue\"}\n", " ),\n", " gr.HighlightedText(label=\"HighlightedText\", show_legend=True),\n", " gr.JSON(label=\"JSON\"),\n", " gr.HTML(label=\"HTML\"),\n", " gr.File(label=\"File\"),\n", " gr.Dataframe(label=\"Dataframe\"),\n", " gr.Dataframe(label=\"Numpy\"),\n", " ],\n", " examples=[\n", " [\n", " \"the quick brown fox\",\n", " \"jumps over the lazy dog\",\n", " 10,\n", " 12,\n", " 4,\n", " True,\n", " [\"foo\", \"baz\"],\n", " \"baz\",\n", " \"bar\",\n", " [\"foo\", \"bar\"],\n", " os.path.join(os.path.abspath(''), \"files/cheetah1.jpg\"),\n", " # os.path.join(os.path.abspath(''), \"files/cheetah1.jpg\"),\n", " # os.path.join(os.path.abspath(''), \"files/cheetah1.jpg\"),\n", " os.path.join(os.path.abspath(''), \"files/cheetah1.jpg\"),\n", " os.path.join(os.path.abspath(''), \"files/world.mp4\"),\n", " os.path.join(os.path.abspath(''), \"files/cantina.wav\"),\n", " os.path.join(os.path.abspath(''), \"files/cantina.wav\"),\n", " os.path.join(os.path.abspath(''), \"files/titanic.csv\"),\n", " [[1, 2, 3, 4], [4, 5, 6, 7], [8, 9, 1, 2], [3, 4, 5, 6]],\n", " ]\n", " ]\n", " * 3,\n", " title=\"Kitchen Sink\",\n", " description=\"Try out all the components!\",\n", " article=\"Learn more about [Gradio](http://gradio.app)\",\n", " cache_examples=True,\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: kitchen_sink"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/cantina.wav https://github.com/gradio-app/gradio/raw/main/demo/kitchen_sink/files/cantina.wav\n", "!wget -q -O files/cheetah1.jpg https://github.com/gradio-app/gradio/raw/main/demo/kitchen_sink/files/cheetah1.jpg\n", "!wget -q -O files/lion.jpg https://github.com/gradio-app/gradio/raw/main/demo/kitchen_sink/files/lion.jpg\n", "!wget -q -O files/logo.png https://github.com/gradio-app/gradio/raw/main/demo/kitchen_sink/files/logo.png\n", "!wget -q -O files/time.csv https://github.com/gradio-app/gradio/raw/main/demo/kitchen_sink/files/time.csv\n", "!wget -q -O files/titanic.csv https://github.com/gradio-app/gradio/raw/main/demo/kitchen_sink/files/titanic.csv\n", "!wget -q -O files/tower.jpg https://github.com/gradio-app/gradio/raw/main/demo/kitchen_sink/files/tower.jpg\n", "!wget -q -O files/world.mp4 https://github.com/gradio-app/gradio/raw/main/demo/kitchen_sink/files/world.mp4"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import os\n", "import json\n", "\n", "import numpy as np\n", "\n", "import gradio as gr\n", "\n", "CHOICES = [\"foo\", \"bar\", \"baz\"]\n", "JSONOBJ = \"\"\"{\"items\":{\"item\":[{\"id\": \"0001\",\"type\": null,\"is_good\": false,\"ppu\": 0.55,\"batters\":{\"batter\":[{ \"id\": \"1001\", \"type\": \"Regular\" },{ \"id\": \"1002\", \"type\": \"Chocolate\" },{ \"id\": \"1003\", \"type\": \"Blueberry\" },{ \"id\": \"1004\", \"type\": \"Devil's Food\" }]},\"topping\":[{ \"id\": \"5001\", \"type\": \"None\" },{ \"id\": \"5002\", \"type\": \"Glazed\" },{ \"id\": \"5005\", \"type\": \"Sugar\" },{ \"id\": \"5007\", \"type\": \"Powdered Sugar\" },{ \"id\": \"5006\", \"type\": \"Chocolate with Sprinkles\" },{ \"id\": \"5003\", \"type\": \"Chocolate\" },{ \"id\": \"5004\", \"type\": \"Maple\" }]}]}}\"\"\"\n", "\n", "def fn(\n", " text1,\n", " text2,\n", " num,\n", " slider1,\n", " slider2,\n", " single_checkbox,\n", " checkboxes,\n", " radio,\n", " dropdown,\n", " multi_dropdown,\n", " im1,\n", " # im2,\n", " # im3,\n", " im4,\n", " video,\n", " audio1,\n", " audio2,\n", " file,\n", " df1,\n", "):\n", " return (\n", " (text1 if single_checkbox else text2)\n", " + \", selected:\"\n", " + \", \".join(checkboxes), # Text\n", " {\n", " \"positive\": num / (num + slider1 + slider2),\n", " \"negative\": slider1 / (num + slider1 + slider2),\n", " \"neutral\": slider2 / (num + slider1 + slider2),\n", " }, # Label\n", " (audio1[0], np.flipud(audio1[1]))\n", " if audio1 is not None\n", " else os.path.join(os.path.abspath(''), \"files/cantina.wav\"), # Audio\n", " np.flipud(im1)\n", " if im1 is not None\n", " else os.path.join(os.path.abspath(''), \"files/cheetah1.jpg\"), # Image\n", " video\n", " if video is not None\n", " else os.path.join(os.path.abspath(''), \"files/world.mp4\"), # Video\n", " [\n", " (\"The\", \"art\"),\n", " (\"quick brown\", \"adj\"),\n", " (\"fox\", \"nn\"),\n", " (\"jumped\", \"vrb\"),\n", " (\"testing testing testing\", None),\n", " (\"over\", \"prp\"),\n", " (\"the\", \"art\"),\n", " (\"testing\", None),\n", " (\"lazy\", \"adj\"),\n", " (\"dogs\", \"nn\"),\n", " (\".\", \"punc\"),\n", " ]\n", " + [(f\"test {x}\", f\"test {x}\") for x in range(10)], # HighlightedText\n", " # [(\"The testing testing testing\", None), (\"quick brown\", 0.2), (\"fox\", 1), (\"jumped\", -1), (\"testing testing testing\", 0), (\"over\", 0), (\"the\", 0), (\"testing\", 0), (\"lazy\", 1), (\"dogs\", 0), (\".\", 1)] + [(f\"test {x}\", x/10) for x in range(-10, 10)], # HighlightedText\n", " [\n", " (\"The testing testing testing\", None),\n", " (\"over\", 0.6),\n", " (\"the\", 0.2),\n", " (\"testing\", None),\n", " (\"lazy\", -0.1),\n", " (\"dogs\", 0.4),\n", " (\".\", 0),\n", " ]\n", " + [(\"test\", x / 10) for x in range(-10, 10)], # HighlightedText\n", " json.loads(JSONOBJ), # JSON\n", " \"\", # HTML\n", " os.path.join(os.path.abspath(''), \"files/titanic.csv\"),\n", " df1, # Dataframe\n", " np.random.randint(0, 10, (4, 4)), # Dataframe\n", " )\n", "\n", "demo = gr.Interface(\n", " fn,\n", " inputs=[\n", " gr.Textbox(value=\"Lorem ipsum\", label=\"Textbox\"),\n", " gr.Textbox(lines=3, placeholder=\"Type here..\", label=\"Textbox 2\"),\n", " gr.Number(label=\"Number\", value=42),\n", " gr.Slider(10, 20, value=15, label=\"Slider: 10 - 20\"),\n", " gr.Slider(maximum=20, step=0.04, label=\"Slider: step @ 0.04\"),\n", " gr.Checkbox(label=\"Checkbox\"),\n", " gr.CheckboxGroup(label=\"CheckboxGroup\", choices=CHOICES, value=CHOICES[0:2]),\n", " gr.Radio(label=\"Radio\", choices=CHOICES, value=CHOICES[2]),\n", " gr.Dropdown(label=\"Dropdown\", choices=CHOICES),\n", " gr.Dropdown(\n", " label=\"Multiselect Dropdown (Max choice: 2)\",\n", " choices=CHOICES,\n", " multiselect=True,\n", " max_choices=2,\n", " ),\n", " gr.Image(label=\"Image\"),\n", " # gr.Image(label=\"Image w/ Cropper\", tool=\"select\"),\n", " # gr.Image(label=\"Sketchpad\", source=\"canvas\"),\n", " gr.Image(label=\"Webcam\", sources=[\"webcam\"]),\n", " gr.Video(label=\"Video\"),\n", " gr.Audio(label=\"Audio\"),\n", " gr.Audio(label=\"Microphone\", sources=[\"microphone\"]),\n", " gr.File(label=\"File\"),\n", " gr.Dataframe(label=\"Dataframe\", headers=[\"Name\", \"Age\", \"Gender\"]),\n", " ],\n", " outputs=[\n", " gr.Textbox(label=\"Textbox\"),\n", " gr.Label(label=\"Label\"),\n", " gr.Audio(label=\"Audio\"),\n", " gr.Image(label=\"Image\", elem_id=\"output-img\"),\n", " gr.Video(label=\"Video\"),\n", " gr.HighlightedText(\n", " label=\"HighlightedText\", color_map={\"punc\": \"pink\", \"test 0\": \"blue\"}\n", " ),\n", " gr.HighlightedText(label=\"HighlightedText\", show_legend=True),\n", " gr.JSON(label=\"JSON\"),\n", " gr.HTML(label=\"HTML\"),\n", " gr.File(label=\"File\"),\n", " gr.Dataframe(label=\"Dataframe\"),\n", " gr.Dataframe(label=\"Numpy\"),\n", " ],\n", " examples=[\n", " [\n", " \"the quick brown fox\",\n", " \"jumps over the lazy dog\",\n", " 10,\n", " 12,\n", " 4,\n", " True,\n", " [\"foo\", \"baz\"],\n", " \"baz\",\n", " \"bar\",\n", " [\"foo\", \"bar\"],\n", " os.path.join(os.path.abspath(''), \"files/cheetah1.jpg\"),\n", " # os.path.join(os.path.abspath(''), \"files/cheetah1.jpg\"),\n", " # os.path.join(os.path.abspath(''), \"files/cheetah1.jpg\"),\n", " os.path.join(os.path.abspath(''), \"files/cheetah1.jpg\"),\n", " os.path.join(os.path.abspath(''), \"files/world.mp4\"),\n", " os.path.join(os.path.abspath(''), \"files/cantina.wav\"),\n", " os.path.join(os.path.abspath(''), \"files/cantina.wav\"),\n", " os.path.join(os.path.abspath(''), \"files/titanic.csv\"),\n", " [[1, 2, 3, 4], [4, 5, 6, 7], [8, 9, 1, 2], [3, 4, 5, 6]],\n", " ]\n", " ]\n", " * 3,\n", " title=\"Kitchen Sink\",\n", " description=\"Try out all the components!\",\n", " article=\"Learn more about [Gradio](http://gradio.app)\",\n", " cache_examples=True,\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/kitchen_sink/run.py b/demo/kitchen_sink/run.py index 9e959528d6230..2ea4833c1436b 100755 --- a/demo/kitchen_sink/run.py +++ b/demo/kitchen_sink/run.py @@ -113,7 +113,7 @@ def fn( gr.Textbox(label="Textbox"), gr.Label(label="Label"), gr.Audio(label="Audio"), - gr.Image(label="Image"), + gr.Image(label="Image", elem_id="output-img"), gr.Video(label="Video"), gr.HighlightedText( label="HighlightedText", color_map={"punc": "pink", "test 0": "blue"} diff --git a/demo/kitchen_sink_random/requirements.txt b/demo/kitchen_sink_random/requirements.txt new file mode 100644 index 0000000000000..babdd14a51a41 --- /dev/null +++ b/demo/kitchen_sink_random/requirements.txt @@ -0,0 +1,2 @@ +matplotlib +pandas diff --git a/demo/leaderboard/DESCRIPTION.md b/demo/leaderboard/DESCRIPTION.md deleted file mode 100644 index 39267b584fec0..0000000000000 --- a/demo/leaderboard/DESCRIPTION.md +++ /dev/null @@ -1 +0,0 @@ -A simple dashboard ranking spaces by number of likes. \ No newline at end of file diff --git a/demo/leaderboard/run.ipynb b/demo/leaderboard/run.ipynb deleted file mode 100644 index b917e7f91ef4a..0000000000000 --- a/demo/leaderboard/run.ipynb +++ /dev/null @@ -1 +0,0 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: leaderboard\n", "### A simple dashboard ranking spaces by number of likes.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import requests\n", "import pandas as pd\n", "from huggingface_hub.hf_api import SpaceInfo\n", "path = \"https://huggingface.co/api/spaces\"\n", "\n", "def get_blocks_party_spaces():\n", " r = requests.get(path)\n", " d = r.json()\n", " spaces = [SpaceInfo(**x) for x in d]\n", " blocks_spaces = {}\n", " for i in range(0,len(spaces)):\n", " if spaces[i].id.split('/')[0] == 'Gradio-Blocks' and hasattr(spaces[i], 'likes') and spaces[i].id != 'Gradio-Blocks/Leaderboard' and spaces[i].id != 'Gradio-Blocks/README':\n", " blocks_spaces[spaces[i].id]=spaces[i].likes\n", " df = pd.DataFrame(\n", " [{\"Spaces_Name\": Spaces, \"likes\": likes} for Spaces,likes in blocks_spaces.items()])\n", " df = df.sort_values(by=['likes'],ascending=False)\n", " return df\n", "\n", "block = gr.Blocks()\n", "\n", "with block:\n", " gr.Markdown(\"\"\"Leaderboard for the most popular Blocks Event Spaces. To learn more and join, see Blocks Party Event\"\"\")\n", " with gr.Tabs():\n", " with gr.TabItem(\"Blocks Party Leaderboard\"):\n", " with gr.Row():\n", " data = gr.Dataframe(type=\"pandas\")\n", " with gr.Row():\n", " data_run = gr.Button(\"Refresh\")\n", " data_run.click(get_blocks_party_spaces, inputs=None, outputs=data)\n", " # running the function on page load in addition to when the button is clicked\n", " block.load(get_blocks_party_spaces, inputs=None, outputs=data)\n", "\n", "block.launch()\n", "\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/leaderboard/run.py b/demo/leaderboard/run.py deleted file mode 100644 index d561afb3245e7..0000000000000 --- a/demo/leaderboard/run.py +++ /dev/null @@ -1,35 +0,0 @@ -import gradio as gr -import requests -import pandas as pd -from huggingface_hub.hf_api import SpaceInfo -path = "https://huggingface.co/api/spaces" - -def get_blocks_party_spaces(): - r = requests.get(path) - d = r.json() - spaces = [SpaceInfo(**x) for x in d] - blocks_spaces = {} - for i in range(0,len(spaces)): - if spaces[i].id.split('/')[0] == 'Gradio-Blocks' and hasattr(spaces[i], 'likes') and spaces[i].id != 'Gradio-Blocks/Leaderboard' and spaces[i].id != 'Gradio-Blocks/README': - blocks_spaces[spaces[i].id]=spaces[i].likes - df = pd.DataFrame( - [{"Spaces_Name": Spaces, "likes": likes} for Spaces,likes in blocks_spaces.items()]) - df = df.sort_values(by=['likes'],ascending=False) - return df - -block = gr.Blocks() - -with block: - gr.Markdown("""Leaderboard for the most popular Blocks Event Spaces. To learn more and join, see Blocks Party Event""") - with gr.Tabs(): - with gr.TabItem("Blocks Party Leaderboard"): - with gr.Row(): - data = gr.Dataframe(type="pandas") - with gr.Row(): - data_run = gr.Button("Refresh") - data_run.click(get_blocks_party_spaces, inputs=None, outputs=data) - # running the function on page load in addition to when the button is clicked - block.load(get_blocks_party_spaces, inputs=None, outputs=data) - -block.launch() - diff --git a/demo/line_plot_demo/requirements.txt b/demo/line_plot_demo/requirements.txt new file mode 100644 index 0000000000000..fb6c7ed7ec60d --- /dev/null +++ b/demo/line_plot_demo/requirements.txt @@ -0,0 +1 @@ +pandas diff --git a/demo/line_plot_demo/run.ipynb b/demo/line_plot_demo/run.ipynb index e368d4b3f65be..0c2ab3dc5c9b4 100644 --- a/demo/line_plot_demo/run.ipynb +++ b/demo/line_plot_demo/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: line_plot_demo"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import pandas as pd\n", "from random import randint, random\n", "import gradio as gr\n", "\n", "\n", "temp_sensor_data = pd.DataFrame(\n", " {\n", " \"time\": pd.date_range(\"2021-01-01\", end=\"2021-01-05\", periods=200),\n", " \"temperature\": [randint(50 + 10 * (i % 2), 65 + 15 * (i % 2)) for i in range(200)],\n", " \"humidity\": [randint(50 + 10 * (i % 2), 65 + 15 * (i % 2)) for i in range(200)],\n", " \"location\": [\"indoor\", \"outdoor\"] * 100,\n", " }\n", ")\n", "\n", "food_rating_data = pd.DataFrame(\n", " {\n", " \"cuisine\": [[\"Italian\", \"Mexican\", \"Chinese\"][i % 3] for i in range(100)],\n", " \"rating\": [random() * 4 + 0.5 * (i % 3) for i in range(100)],\n", " \"price\": [randint(10, 50) + 4 * (i % 3) for i in range(100)],\n", " \"wait\": [random() for i in range(100)],\n", " }\n", ")\n", "\n", "with gr.Blocks() as line_plots:\n", " with gr.Row():\n", " start = gr.DateTime(\"2021-01-01 00:00:00\", label=\"Start\")\n", " end = gr.DateTime(\"2021-01-05 00:00:00\", label=\"End\")\n", " apply_btn = gr.Button(\"Apply\", scale=0)\n", " with gr.Row():\n", " group_by = gr.Radio([\"None\", \"30m\", \"1h\", \"4h\", \"1d\"], value=\"None\", label=\"Group by\")\n", " aggregate = gr.Radio([\"sum\", \"mean\", \"median\", \"min\", \"max\"], value=\"sum\", label=\"Aggregation\")\n", "\n", " temp_by_time = gr.LinePlot(\n", " temp_sensor_data,\n", " x=\"time\",\n", " y=\"temperature\",\n", " )\n", " temp_by_time_location = gr.LinePlot(\n", " temp_sensor_data,\n", " x=\"time\",\n", " y=\"temperature\",\n", " color=\"location\",\n", " )\n", "\n", " time_graphs = [temp_by_time, temp_by_time_location]\n", " group_by.change(\n", " lambda group: [gr.LinePlot(x_bin=None if group == \"None\" else group)] * len(time_graphs),\n", " group_by,\n", " time_graphs\n", " )\n", " aggregate.change(\n", " lambda aggregate: [gr.LinePlot(y_aggregate=aggregate)] * len(time_graphs),\n", " aggregate,\n", " time_graphs\n", " )\n", "\n", " def rescale(select: gr.SelectData):\n", " return select.index\n", " rescale_evt = gr.on([plot.select for plot in time_graphs], rescale, None, [start, end])\n", "\n", " for trigger in [apply_btn.click, rescale_evt.then]:\n", " trigger(\n", " lambda start, end: [gr.LinePlot(x_lim=[start, end])] * len(time_graphs), [start, end], time_graphs\n", " )\n", "\n", " price_by_cuisine = gr.LinePlot(\n", " food_rating_data,\n", " x=\"cuisine\",\n", " y=\"price\",\n", " )\n", " with gr.Row():\n", " price_by_rating = gr.LinePlot(\n", " food_rating_data,\n", " x=\"rating\",\n", " y=\"price\",\n", " )\n", " price_by_rating_color = gr.LinePlot(\n", " food_rating_data,\n", " x=\"rating\",\n", " y=\"price\",\n", " color=\"cuisine\",\n", " color_map={\"Italian\": \"red\", \"Mexican\": \"green\", \"Chinese\": \"blue\"},\n", " )\n", "\n", "if __name__ == \"__main__\":\n", " line_plots.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: line_plot_demo"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio pandas "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import pandas as pd\n", "from random import randint, random\n", "import gradio as gr\n", "\n", "\n", "temp_sensor_data = pd.DataFrame(\n", " {\n", " \"time\": pd.date_range(\"2021-01-01\", end=\"2021-01-05\", periods=200),\n", " \"temperature\": [randint(50 + 10 * (i % 2), 65 + 15 * (i % 2)) for i in range(200)],\n", " \"humidity\": [randint(50 + 10 * (i % 2), 65 + 15 * (i % 2)) for i in range(200)],\n", " \"location\": [\"indoor\", \"outdoor\"] * 100,\n", " }\n", ")\n", "\n", "food_rating_data = pd.DataFrame(\n", " {\n", " \"cuisine\": [[\"Italian\", \"Mexican\", \"Chinese\"][i % 3] for i in range(100)],\n", " \"rating\": [random() * 4 + 0.5 * (i % 3) for i in range(100)],\n", " \"price\": [randint(10, 50) + 4 * (i % 3) for i in range(100)],\n", " \"wait\": [random() for i in range(100)],\n", " }\n", ")\n", "\n", "with gr.Blocks() as line_plots:\n", " with gr.Row():\n", " start = gr.DateTime(\"2021-01-01 00:00:00\", label=\"Start\")\n", " end = gr.DateTime(\"2021-01-05 00:00:00\", label=\"End\")\n", " apply_btn = gr.Button(\"Apply\", scale=0)\n", " with gr.Row():\n", " group_by = gr.Radio([\"None\", \"30m\", \"1h\", \"4h\", \"1d\"], value=\"None\", label=\"Group by\")\n", " aggregate = gr.Radio([\"sum\", \"mean\", \"median\", \"min\", \"max\"], value=\"sum\", label=\"Aggregation\")\n", "\n", " temp_by_time = gr.LinePlot(\n", " temp_sensor_data,\n", " x=\"time\",\n", " y=\"temperature\",\n", " )\n", " temp_by_time_location = gr.LinePlot(\n", " temp_sensor_data,\n", " x=\"time\",\n", " y=\"temperature\",\n", " color=\"location\",\n", " )\n", "\n", " time_graphs = [temp_by_time, temp_by_time_location]\n", " group_by.change(\n", " lambda group: [gr.LinePlot(x_bin=None if group == \"None\" else group)] * len(time_graphs),\n", " group_by,\n", " time_graphs\n", " )\n", " aggregate.change(\n", " lambda aggregate: [gr.LinePlot(y_aggregate=aggregate)] * len(time_graphs),\n", " aggregate,\n", " time_graphs\n", " )\n", "\n", " def rescale(select: gr.SelectData):\n", " return select.index\n", " rescale_evt = gr.on([plot.select for plot in time_graphs], rescale, None, [start, end])\n", "\n", " for trigger in [apply_btn.click, rescale_evt.then]:\n", " trigger(\n", " lambda start, end: [gr.LinePlot(x_lim=[start, end])] * len(time_graphs), [start, end], time_graphs\n", " )\n", "\n", " price_by_cuisine = gr.LinePlot(\n", " food_rating_data,\n", " x=\"cuisine\",\n", " y=\"price\",\n", " )\n", " with gr.Row():\n", " price_by_rating = gr.LinePlot(\n", " food_rating_data,\n", " x=\"rating\",\n", " y=\"price\",\n", " )\n", " price_by_rating_color = gr.LinePlot(\n", " food_rating_data,\n", " x=\"rating\",\n", " y=\"price\",\n", " color=\"cuisine\",\n", " color_map={\"Italian\": \"red\", \"Mexican\": \"green\", \"Chinese\": \"blue\"},\n", " )\n", "\n", "if __name__ == \"__main__\":\n", " line_plots.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/live_dashboard/requirements.txt b/demo/live_dashboard/requirements.txt index d42d0ad03bdf8..38c55157207e1 100644 --- a/demo/live_dashboard/requirements.txt +++ b/demo/live_dashboard/requirements.txt @@ -1 +1,3 @@ -plotly \ No newline at end of file +numpy +pandas +plotly diff --git a/demo/live_dashboard/run.ipynb b/demo/live_dashboard/run.ipynb index 395112059d154..faa2d952c7eea 100644 --- a/demo/live_dashboard/run.ipynb +++ b/demo/live_dashboard/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: live_dashboard\n", "### This demo shows how you can build a live interactive dashboard with gradio.\n", "The current time is refreshed every second and the plot every half second by using the 'every' keyword in the event handler.\n", "Changing the value of the slider will control the period of the sine curve (the distance between peaks). \n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio plotly"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import math\n", "\n", "import pandas as pd\n", "\n", "import gradio as gr\n", "import datetime\n", "import numpy as np\n", "\n", "def get_time():\n", " return datetime.datetime.now()\n", "\n", "plot_end = 2 * math.pi\n", "\n", "def get_plot(period=1):\n", " global plot_end\n", " x = np.arange(plot_end - 2 * math.pi, plot_end, 0.02)\n", " y = np.sin(2 * math.pi * period * x)\n", " update = gr.LinePlot(\n", " value=pd.DataFrame({\"x\": x, \"y\": y}),\n", " x=\"x\",\n", " y=\"y\",\n", " title=\"Plot (updates every second)\",\n", " width=600,\n", " height=350,\n", " )\n", " plot_end += 2 * math.pi\n", " if plot_end > 1000:\n", " plot_end = 2 * math.pi\n", " return update\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " with gr.Column():\n", " c_time2 = gr.Textbox(label=\"Current Time refreshed every second\")\n", " gr.Textbox(\n", " \"Change the value of the slider to automatically update the plot\",\n", " label=\"\",\n", " )\n", " period = gr.Slider(\n", " label=\"Period of plot\", value=1, minimum=0, maximum=10, step=1\n", " )\n", " plot = gr.LinePlot(show_label=False)\n", " with gr.Column():\n", " name = gr.Textbox(label=\"Enter your name\")\n", " greeting = gr.Textbox(label=\"Greeting\")\n", " button = gr.Button(value=\"Greet\")\n", " button.click(lambda s: f\"Hello {s}\", name, greeting)\n", "\n", " demo.load(lambda: datetime.datetime.now(), None, c_time2, every=1)\n", " dep = demo.load(get_plot, None, plot, every=1)\n", " period.change(get_plot, period, plot, every=1, cancels=[dep])\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: live_dashboard\n", "### This demo shows how you can build a live interactive dashboard with gradio.\n", "The current time is refreshed every second and the plot every half second by using the 'every' keyword in the event handler.\n", "Changing the value of the slider will control the period of the sine curve (the distance between peaks). \n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy pandas plotly "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import math\n", "\n", "import pandas as pd\n", "\n", "import gradio as gr\n", "import datetime\n", "import numpy as np\n", "\n", "def get_time():\n", " return datetime.datetime.now()\n", "\n", "plot_end = 2 * math.pi\n", "\n", "def get_plot(period=1):\n", " global plot_end\n", " x = np.arange(plot_end - 2 * math.pi, plot_end, 0.02)\n", " y = np.sin(2 * math.pi * period * x)\n", " update = gr.LinePlot(\n", " value=pd.DataFrame({\"x\": x, \"y\": y}),\n", " x=\"x\",\n", " y=\"y\",\n", " title=\"Plot (updates every second)\",\n", " width=600,\n", " height=350,\n", " )\n", " plot_end += 2 * math.pi\n", " if plot_end > 1000:\n", " plot_end = 2 * math.pi\n", " return update\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " with gr.Column():\n", " c_time2 = gr.Textbox(label=\"Current Time refreshed every second\")\n", " gr.Textbox(\n", " \"Change the value of the slider to automatically update the plot\",\n", " label=\"\",\n", " )\n", " period = gr.Slider(\n", " label=\"Period of plot\", value=1, minimum=0, maximum=10, step=1\n", " )\n", " plot = gr.LinePlot(show_label=False)\n", " with gr.Column():\n", " name = gr.Textbox(label=\"Enter your name\")\n", " greeting = gr.Textbox(label=\"Greeting\")\n", " button = gr.Button(value=\"Greet\")\n", " button.click(lambda s: f\"Hello {s}\", name, greeting)\n", "\n", " demo.load(lambda: datetime.datetime.now(), None, c_time2, every=1)\n", " dep = demo.load(get_plot, None, plot, every=1)\n", " period.change(get_plot, period, plot, every=1, cancels=[dep])\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/login_with_huggingface/requirements.txt b/demo/login_with_huggingface/requirements.txt new file mode 100644 index 0000000000000..6b964ccca3c1b --- /dev/null +++ b/demo/login_with_huggingface/requirements.txt @@ -0,0 +1 @@ +huggingface_hub diff --git a/demo/login_with_huggingface/run.ipynb b/demo/login_with_huggingface/run.ipynb index dbb9058f2a9d5..3b08a319a43da 100644 --- a/demo/login_with_huggingface/run.ipynb +++ b/demo/login_with_huggingface/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: login_with_huggingface"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["from __future__ import annotations\n", "\n", "import gradio as gr\n", "from huggingface_hub import whoami\n", "\n", "def hello(profile: gr.OAuthProfile | None) -> str:\n", " if profile is None:\n", " return \"I don't know you.\"\n", " return f\"Hello {profile.name}\"\n", "\n", "def list_organizations(oauth_token: gr.OAuthToken | None) -> str:\n", " if oauth_token is None:\n", " return \"Please deploy this on Spaces and log in to list organizations.\"\n", " org_names = [org[\"name\"] for org in whoami(oauth_token.token)[\"orgs\"]]\n", " return f\"You belong to {', '.join(org_names)}.\"\n", "\n", "with gr.Blocks() as demo:\n", " gr.LoginButton()\n", " m1 = gr.Markdown()\n", " m2 = gr.Markdown()\n", " demo.load(hello, inputs=None, outputs=m1)\n", " demo.load(list_organizations, inputs=None, outputs=m2)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: login_with_huggingface"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio huggingface_hub "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["from __future__ import annotations\n", "\n", "import gradio as gr\n", "from huggingface_hub import whoami\n", "\n", "def hello(profile: gr.OAuthProfile | None) -> str:\n", " if profile is None:\n", " return \"I don't know you.\"\n", " return f\"Hello {profile.name}\"\n", "\n", "def list_organizations(oauth_token: gr.OAuthToken | None) -> str:\n", " if oauth_token is None:\n", " return \"Please deploy this on Spaces and log in to list organizations.\"\n", " org_names = [org[\"name\"] for org in whoami(oauth_token.token)[\"orgs\"]]\n", " return f\"You belong to {', '.join(org_names)}.\"\n", "\n", "with gr.Blocks() as demo:\n", " gr.LoginButton()\n", " m1 = gr.Markdown()\n", " m2 = gr.Markdown()\n", " demo.load(hello, inputs=None, outputs=m1)\n", " demo.load(list_organizations, inputs=None, outputs=m2)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/logoutbutton_component/requirements.txt b/demo/logoutbutton_component/requirements.txt deleted file mode 100644 index f7359a07d4b7d..0000000000000 --- a/demo/logoutbutton_component/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -gradio[oauth] \ No newline at end of file diff --git a/demo/logoutbutton_component/run.ipynb b/demo/logoutbutton_component/run.ipynb deleted file mode 100644 index 6f852cf11042f..0000000000000 --- a/demo/logoutbutton_component/run.ipynb +++ /dev/null @@ -1 +0,0 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: logoutbutton_component"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio gradio[oauth]"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "with gr.Blocks() as demo:\n", " gr.LogoutButton()\n", "\n", "demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/logoutbutton_component/run.py b/demo/logoutbutton_component/run.py deleted file mode 100644 index 6a647554975f9..0000000000000 --- a/demo/logoutbutton_component/run.py +++ /dev/null @@ -1,6 +0,0 @@ -import gradio as gr - -with gr.Blocks() as demo: - gr.LogoutButton() - -demo.launch() diff --git a/demo/magic_8_ball/requirements.txt b/demo/magic_8_ball/requirements.txt new file mode 100644 index 0000000000000..e5ff487e42f85 --- /dev/null +++ b/demo/magic_8_ball/requirements.txt @@ -0,0 +1,7 @@ +git+https://github.com/huggingface/parler-tts.git +accelerate +spaces +torch +pydub +transformers +huggingface_hub diff --git a/demo/magic_8_ball/run.ipynb b/demo/magic_8_ball/run.ipynb new file mode 100644 index 0000000000000..c7e9c261a01d6 --- /dev/null +++ b/demo/magic_8_ball/run.ipynb @@ -0,0 +1 @@ +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: magic_8_ball"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio git+https://github.com/huggingface/parler-tts.git accelerate spaces torch pydub transformers huggingface_hub "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/magic_8_ball/streamer.py"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import io\n", "from threading import Thread\n", "import random\n", "import os\n", "\n", "import numpy as np\n", "import spaces\n", "import gradio as gr\n", "import torch\n", "\n", "from parler_tts import ParlerTTSForConditionalGeneration\n", "from pydub import AudioSegment\n", "from transformers import AutoTokenizer, AutoFeatureExtractor, set_seed\n", "from huggingface_hub import InferenceClient\n", "from streamer import ParlerTTSStreamer\n", "import time\n", "\n", "\n", "device = (\n", " \"cuda:0\"\n", " if torch.cuda.is_available()\n", " else \"mps\"\n", " if torch.backends.mps.is_available()\n", " else \"cpu\"\n", ")\n", "torch_dtype = torch.float16 if device != \"cpu\" else torch.float32\n", "\n", "repo_id = \"parler-tts/parler_tts_mini_v0.1\"\n", "\n", "jenny_repo_id = \"ylacombe/parler-tts-mini-jenny-30H\"\n", "\n", "model = ParlerTTSForConditionalGeneration.from_pretrained(\n", " jenny_repo_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True\n", ").to(device)\n", "\n", "client = InferenceClient(token=os.getenv(\"HF_TOKEN\"))\n", "\n", "tokenizer = AutoTokenizer.from_pretrained(repo_id)\n", "feature_extractor = AutoFeatureExtractor.from_pretrained(repo_id)\n", "\n", "SAMPLE_RATE = feature_extractor.sampling_rate\n", "SEED = 42\n", "\n", "\n", "def numpy_to_mp3(audio_array, sampling_rate):\n", " # Normalize audio_array if it's floating-point\n", " if np.issubdtype(audio_array.dtype, np.floating):\n", " max_val = np.max(np.abs(audio_array))\n", " audio_array = (audio_array / max_val) * 32767 # Normalize to 16-bit range\n", " audio_array = audio_array.astype(np.int16)\n", "\n", " # Create an audio segment from the numpy array\n", " audio_segment = AudioSegment(\n", " audio_array.tobytes(),\n", " frame_rate=sampling_rate,\n", " sample_width=audio_array.dtype.itemsize,\n", " channels=1,\n", " )\n", "\n", " # Export the audio segment to MP3 bytes - use a high bitrate to maximise quality\n", " mp3_io = io.BytesIO()\n", " audio_segment.export(mp3_io, format=\"mp3\", bitrate=\"320k\")\n", "\n", " # Get the MP3 bytes\n", " mp3_bytes = mp3_io.getvalue()\n", " mp3_io.close()\n", "\n", " return mp3_bytes\n", "\n", "\n", "sampling_rate = model.audio_encoder.config.sampling_rate\n", "frame_rate = model.audio_encoder.config.frame_rate\n", "\n", "\n", "def generate_response(audio):\n", " gr.Info(\"Transcribing Audio\", duration=5)\n", " question = client.automatic_speech_recognition(audio).text # type: ignore\n", " messages = [\n", " {\n", " \"role\": \"system\",\n", " \"content\": (\n", " \"You are a magic 8 ball.\"\n", " \"Someone will present to you a situation or question and your job \"\n", " \"is to answer with a cryptic addage or proverb such as \"\n", " \"'curiosity killed the cat' or 'The early bird gets the worm'.\"\n", " \"Keep your answers short and do not include the phrase 'Magic 8 Ball' in your response. If the question does not make sense or is off-topic, say 'Foolish questions get foolish answers.'\"\n", " \"For example, 'Magic 8 Ball, should I get a dog?', 'A dog is ready for you but are you ready for the dog?'\"\n", " ),\n", " },\n", " {\n", " \"role\": \"user\",\n", " \"content\": f\"Magic 8 Ball please answer this question - {question}\",\n", " },\n", " ]\n", "\n", " response = client.chat_completion( # type: ignore\n", " messages,\n", " max_tokens=64,\n", " seed=random.randint(1, 5000),\n", " model=\"mistralai/Mistral-7B-Instruct-v0.3\",\n", " )\n", " response = response.choices[0].message.content.replace(\"Magic 8 Ball\", \"\") # type: ignore\n", " return response, None, None\n", "\n", "\n", "@spaces.GPU\n", "def read_response(answer):\n", " play_steps_in_s = 2.0\n", " play_steps = int(frame_rate * play_steps_in_s)\n", "\n", " description = \"Jenny speaks at an average pace with a calm delivery in a very confined sounding environment with clear audio quality.\"\n", " description_tokens = tokenizer(description, return_tensors=\"pt\").to(device)\n", "\n", " streamer = ParlerTTSStreamer(model, device=device, play_steps=play_steps)\n", " prompt = tokenizer(answer, return_tensors=\"pt\").to(device)\n", "\n", " generation_kwargs = dict( # noqa: C408\n", " input_ids=description_tokens.input_ids,\n", " prompt_input_ids=prompt.input_ids,\n", " streamer=streamer,\n", " do_sample=True,\n", " temperature=1.0,\n", " min_new_tokens=10,\n", " )\n", "\n", " set_seed(SEED)\n", " thread = Thread(target=model.generate, kwargs=generation_kwargs)\n", " thread.start()\n", " start = time.time()\n", " for new_audio in streamer:\n", " print(\n", " f\"Sample of length: {round(new_audio.shape[0] / sampling_rate, 2)} seconds after {time.time() - start} seconds\"\n", " )\n", " yield answer, numpy_to_mp3(new_audio, sampling_rate=sampling_rate)\n", "\n", "\n", "with gr.Blocks() as demo:\n", " gr.HTML(\n", " \"\"\"\n", "

Magic 8 Ball \ud83c\udfb1

\n", "

Ask a question and receive wisdom

\n", "

Powered by Parler-TTS\n", " \"\"\"\n", " )\n", " with gr.Group():\n", " with gr.Row():\n", " audio_out = gr.Audio(\n", " label=\"Spoken Answer\", streaming=True, autoplay=True, loop=False\n", " )\n", " answer = gr.Textbox(label=\"Answer\")\n", " state = gr.State()\n", " with gr.Row():\n", " gr.Markdown(\n", " \"Example questions: 'Should I get a dog?', 'What is the meaning of life?'\"\n", " )\n", " audio_in = gr.Audio(\n", " label=\"Speak you question\", sources=\"microphone\", type=\"filepath\"\n", " )\n", " with gr.Row():\n", " gr.HTML(\n", " \"\"\"

Examples: 'What is the meaning of life?', 'Should I get a dog?'

\"\"\"\n", " )\n", " audio_in.stop_recording(\n", " generate_response, audio_in, [state, answer, audio_out]\n", " ).then(fn=read_response, inputs=state, outputs=[answer, audio_out])\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/magic_8_ball/run.py b/demo/magic_8_ball/run.py new file mode 100644 index 0000000000000..a9763c3229b7c --- /dev/null +++ b/demo/magic_8_ball/run.py @@ -0,0 +1,169 @@ +import io +from threading import Thread +import random +import os + +import numpy as np +import spaces +import gradio as gr +import torch + +from parler_tts import ParlerTTSForConditionalGeneration +from pydub import AudioSegment +from transformers import AutoTokenizer, AutoFeatureExtractor, set_seed +from huggingface_hub import InferenceClient +from streamer import ParlerTTSStreamer +import time + + +device = ( + "cuda:0" + if torch.cuda.is_available() + else "mps" + if torch.backends.mps.is_available() + else "cpu" +) +torch_dtype = torch.float16 if device != "cpu" else torch.float32 + +repo_id = "parler-tts/parler_tts_mini_v0.1" + +jenny_repo_id = "ylacombe/parler-tts-mini-jenny-30H" + +model = ParlerTTSForConditionalGeneration.from_pretrained( + jenny_repo_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True +).to(device) + +client = InferenceClient(token=os.getenv("HF_TOKEN")) + +tokenizer = AutoTokenizer.from_pretrained(repo_id) +feature_extractor = AutoFeatureExtractor.from_pretrained(repo_id) + +SAMPLE_RATE = feature_extractor.sampling_rate +SEED = 42 + + +def numpy_to_mp3(audio_array, sampling_rate): + # Normalize audio_array if it's floating-point + if np.issubdtype(audio_array.dtype, np.floating): + max_val = np.max(np.abs(audio_array)) + audio_array = (audio_array / max_val) * 32767 # Normalize to 16-bit range + audio_array = audio_array.astype(np.int16) + + # Create an audio segment from the numpy array + audio_segment = AudioSegment( + audio_array.tobytes(), + frame_rate=sampling_rate, + sample_width=audio_array.dtype.itemsize, + channels=1, + ) + + # Export the audio segment to MP3 bytes - use a high bitrate to maximise quality + mp3_io = io.BytesIO() + audio_segment.export(mp3_io, format="mp3", bitrate="320k") + + # Get the MP3 bytes + mp3_bytes = mp3_io.getvalue() + mp3_io.close() + + return mp3_bytes + + +sampling_rate = model.audio_encoder.config.sampling_rate +frame_rate = model.audio_encoder.config.frame_rate + + +def generate_response(audio): + gr.Info("Transcribing Audio", duration=5) + question = client.automatic_speech_recognition(audio).text # type: ignore + messages = [ + { + "role": "system", + "content": ( + "You are a magic 8 ball." + "Someone will present to you a situation or question and your job " + "is to answer with a cryptic addage or proverb such as " + "'curiosity killed the cat' or 'The early bird gets the worm'." + "Keep your answers short and do not include the phrase 'Magic 8 Ball' in your response. If the question does not make sense or is off-topic, say 'Foolish questions get foolish answers.'" + "For example, 'Magic 8 Ball, should I get a dog?', 'A dog is ready for you but are you ready for the dog?'" + ), + }, + { + "role": "user", + "content": f"Magic 8 Ball please answer this question - {question}", + }, + ] + + response = client.chat_completion( # type: ignore + messages, + max_tokens=64, + seed=random.randint(1, 5000), + model="mistralai/Mistral-7B-Instruct-v0.3", + ) + response = response.choices[0].message.content.replace("Magic 8 Ball", "") # type: ignore + return response, None, None + + +@spaces.GPU +def read_response(answer): + play_steps_in_s = 2.0 + play_steps = int(frame_rate * play_steps_in_s) + + description = "Jenny speaks at an average pace with a calm delivery in a very confined sounding environment with clear audio quality." + description_tokens = tokenizer(description, return_tensors="pt").to(device) + + streamer = ParlerTTSStreamer(model, device=device, play_steps=play_steps) + prompt = tokenizer(answer, return_tensors="pt").to(device) + + generation_kwargs = dict( # noqa: C408 + input_ids=description_tokens.input_ids, + prompt_input_ids=prompt.input_ids, + streamer=streamer, + do_sample=True, + temperature=1.0, + min_new_tokens=10, + ) + + set_seed(SEED) + thread = Thread(target=model.generate, kwargs=generation_kwargs) + thread.start() + start = time.time() + for new_audio in streamer: + print( + f"Sample of length: {round(new_audio.shape[0] / sampling_rate, 2)} seconds after {time.time() - start} seconds" + ) + yield answer, numpy_to_mp3(new_audio, sampling_rate=sampling_rate) + + +with gr.Blocks() as demo: + gr.HTML( + """ +

Magic 8 Ball 🎱

+

Ask a question and receive wisdom

+

Powered by Parler-TTS + """ + ) + with gr.Group(): + with gr.Row(): + audio_out = gr.Audio( + label="Spoken Answer", streaming=True, autoplay=True, loop=False + ) + answer = gr.Textbox(label="Answer") + state = gr.State() + with gr.Row(): + gr.Markdown( + "Example questions: 'Should I get a dog?', 'What is the meaning of life?'" + ) + audio_in = gr.Audio( + label="Speak you question", sources="microphone", type="filepath" + ) + with gr.Row(): + gr.HTML( + """

Examples: 'What is the meaning of life?', 'Should I get a dog?'

""" + ) + audio_in.stop_recording( + generate_response, audio_in, [state, answer, audio_out] + ).then(fn=read_response, inputs=state, outputs=[answer, audio_out]) + + +if __name__ == "__main__": + demo.launch() diff --git a/demo/magic_8_ball/streamer.py b/demo/magic_8_ball/streamer.py new file mode 100644 index 0000000000000..289b05c06ed64 --- /dev/null +++ b/demo/magic_8_ball/streamer.py @@ -0,0 +1,146 @@ +from queue import Queue +from transformers.generation.streamers import BaseStreamer +from typing import Optional +from parler_tts import ParlerTTSForConditionalGeneration +import numpy as np +import math +import torch + + +class ParlerTTSStreamer(BaseStreamer): + def __init__( + self, + model: ParlerTTSForConditionalGeneration, + device: Optional[str] = None, + play_steps: Optional[int] = 10, + stride: Optional[int] = None, + timeout: Optional[float] = None, + ): + """ + Streamer that stores playback-ready audio in a queue, to be used by a downstream application as an iterator. This is + useful for applications that benefit from accessing the generated audio in a non-blocking way (e.g. in an interactive + Gradio demo). + Parameters: + model (`ParlerTTSForConditionalGeneration`): + The Parler-TTS model used to generate the audio waveform. + device (`str`, *optional*): + The torch device on which to run the computation. If `None`, will default to the device of the model. + play_steps (`int`, *optional*, defaults to 10): + The number of generation steps with which to return the generated audio array. Using fewer steps will + mean the first chunk is ready faster, but will require more codec decoding steps overall. This value + should be tuned to your device and latency requirements. + stride (`int`, *optional*): + The window (stride) between adjacent audio samples. Using a stride between adjacent audio samples reduces + the hard boundary between them, giving smoother playback. If `None`, will default to a value equivalent to + play_steps // 6 in the audio space. + timeout (`int`, *optional*): + The timeout for the audio queue. If `None`, the queue will block indefinitely. Useful to handle exceptions + in `.generate()`, when it is called in a separate thread. + """ + self.decoder = model.decoder + self.audio_encoder = model.audio_encoder + self.generation_config = model.generation_config + self.device = device if device is not None else model.device + + # variables used in the streaming process + self.play_steps = play_steps + if stride is not None: + self.stride = stride + else: + hop_length = math.floor( + self.audio_encoder.config.sampling_rate + / self.audio_encoder.config.frame_rate + ) + self.stride = hop_length * (play_steps - self.decoder.num_codebooks) // 6 + self.token_cache = None + self.to_yield = 0 + + # varibles used in the thread process + self.audio_queue = Queue() + self.stop_signal = None + self.timeout = timeout + + def apply_delay_pattern_mask(self, input_ids): + # build the delay pattern mask for offsetting each codebook prediction by 1 (this behaviour is specific to Parler) + _, delay_pattern_mask = self.decoder.build_delay_pattern_mask( + input_ids[:, :1], + bos_token_id=self.generation_config.bos_token_id, + pad_token_id=self.generation_config.decoder_start_token_id, + max_length=input_ids.shape[-1], + ) + # apply the pattern mask to the input ids + input_ids = self.decoder.apply_delay_pattern_mask(input_ids, delay_pattern_mask) + + # revert the pattern delay mask by filtering the pad token id + mask = (delay_pattern_mask != self.generation_config.bos_token_id) & ( + delay_pattern_mask != self.generation_config.pad_token_id + ) + input_ids = input_ids[mask].reshape(1, self.decoder.num_codebooks, -1) + # append the frame dimension back to the audio codes + input_ids = input_ids[None, ...] + + # send the input_ids to the correct device + input_ids = input_ids.to(self.audio_encoder.device) + + decode_sequentially = ( + self.generation_config.bos_token_id in input_ids + or self.generation_config.pad_token_id in input_ids + or self.generation_config.eos_token_id in input_ids + ) + if not decode_sequentially: + output_values = self.audio_encoder.decode( + input_ids, + audio_scales=[None], + ) + else: + sample = input_ids[:, 0] + sample_mask = (sample >= self.audio_encoder.config.codebook_size).sum( + dim=(0, 1) + ) == 0 + sample = sample[:, :, sample_mask] + output_values = self.audio_encoder.decode(sample[None, ...], [None]) + + audio_values = output_values.audio_values[0, 0] + return audio_values.cpu().float().numpy() + + def put(self, value): + batch_size = value.shape[0] // self.decoder.num_codebooks + if batch_size > 1: + raise ValueError("ParlerTTSStreamer only supports batch size 1") + + if self.token_cache is None: + self.token_cache = value + else: + self.token_cache = torch.concatenate( + [self.token_cache, value[:, None]], dim=-1 + ) + + if self.token_cache.shape[-1] % self.play_steps == 0: # type: ignore + audio_values = self.apply_delay_pattern_mask(self.token_cache) + self.on_finalized_audio(audio_values[self.to_yield : -self.stride]) + self.to_yield += len(audio_values) - self.to_yield - self.stride + + def end(self): + """Flushes any remaining cache and appends the stop symbol.""" + if self.token_cache is not None: + audio_values = self.apply_delay_pattern_mask(self.token_cache) + else: + audio_values = np.zeros(self.to_yield) + + self.on_finalized_audio(audio_values[self.to_yield :], stream_end=True) + + def on_finalized_audio(self, audio: np.ndarray, stream_end: bool = False): + """Put the new audio in the queue. If the stream is ending, also put a stop signal in the queue.""" + self.audio_queue.put(audio, timeout=self.timeout) + if stream_end: + self.audio_queue.put(self.stop_signal, timeout=self.timeout) + + def __iter__(self): + return self + + def __next__(self): + value = self.audio_queue.get(timeout=self.timeout) + if not isinstance(value, np.ndarray) and value == self.stop_signal: + raise StopIteration() + else: + return value diff --git a/demo/map_airbnb/requirements.txt b/demo/map_airbnb/requirements.txt index d42d0ad03bdf8..582e17fcf7a29 100644 --- a/demo/map_airbnb/requirements.txt +++ b/demo/map_airbnb/requirements.txt @@ -1 +1,2 @@ -plotly \ No newline at end of file +plotly +datasets diff --git a/demo/map_airbnb/run.ipynb b/demo/map_airbnb/run.ipynb index a500326a662e6..fe82b3a818310 100644 --- a/demo/map_airbnb/run.ipynb +++ b/demo/map_airbnb/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: map_airbnb\n", "### Display an interactive map of AirBnB locations with Plotly. Data is hosted on HuggingFace Datasets. \n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio plotly"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# type: ignore\n", "import gradio as gr\n", "import plotly.graph_objects as go\n", "from datasets import load_dataset\n", "\n", "dataset = load_dataset(\"gradio/NYC-Airbnb-Open-Data\", split=\"train\")\n", "df = dataset.to_pandas()\n", "\n", "def filter_map(min_price, max_price, boroughs):\n", "\n", " filtered_df = df[(df['neighbourhood_group'].isin(boroughs)) &\n", " (df['price'] > min_price) & (df['price'] < max_price)]\n", " names = filtered_df[\"name\"].tolist()\n", " prices = filtered_df[\"price\"].tolist()\n", " text_list = [(names[i], prices[i]) for i in range(0, len(names))]\n", " fig = go.Figure(go.Scattermapbox(\n", " customdata=text_list,\n", " lat=filtered_df['latitude'].tolist(),\n", " lon=filtered_df['longitude'].tolist(),\n", " mode='markers',\n", " marker=go.scattermapbox.Marker(\n", " size=6\n", " ),\n", " hoverinfo=\"text\",\n", " hovertemplate='Name: %{customdata[0]}
Price: $%{customdata[1]}'\n", " ))\n", "\n", " fig.update_layout(\n", " mapbox_style=\"open-street-map\",\n", " hovermode='closest',\n", " mapbox=dict(\n", " bearing=0,\n", " center=go.layout.mapbox.Center(\n", " lat=40.67,\n", " lon=-73.90\n", " ),\n", " pitch=0,\n", " zoom=9\n", " ),\n", " )\n", "\n", " return fig\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Column():\n", " with gr.Row():\n", " min_price = gr.Number(value=250, label=\"Minimum Price\")\n", " max_price = gr.Number(value=1000, label=\"Maximum Price\")\n", " boroughs = gr.CheckboxGroup(choices=[\"Queens\", \"Brooklyn\", \"Manhattan\", \"Bronx\", \"Staten Island\"], value=[\"Queens\", \"Brooklyn\"], label=\"Select Boroughs:\")\n", " btn = gr.Button(value=\"Update Filter\")\n", " map = gr.Plot()\n", " demo.load(filter_map, [min_price, max_price, boroughs], map)\n", " btn.click(filter_map, [min_price, max_price, boroughs], map)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: map_airbnb\n", "### Display an interactive map of AirBnB locations with Plotly. Data is hosted on HuggingFace Datasets. \n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio plotly datasets "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# type: ignore\n", "import gradio as gr\n", "import plotly.graph_objects as go\n", "from datasets import load_dataset\n", "\n", "dataset = load_dataset(\"gradio/NYC-Airbnb-Open-Data\", split=\"train\")\n", "df = dataset.to_pandas()\n", "\n", "def filter_map(min_price, max_price, boroughs):\n", "\n", " filtered_df = df[(df['neighbourhood_group'].isin(boroughs)) &\n", " (df['price'] > min_price) & (df['price'] < max_price)]\n", " names = filtered_df[\"name\"].tolist()\n", " prices = filtered_df[\"price\"].tolist()\n", " text_list = [(names[i], prices[i]) for i in range(0, len(names))]\n", " fig = go.Figure(go.Scattermapbox(\n", " customdata=text_list,\n", " lat=filtered_df['latitude'].tolist(),\n", " lon=filtered_df['longitude'].tolist(),\n", " mode='markers',\n", " marker=go.scattermapbox.Marker(\n", " size=6\n", " ),\n", " hoverinfo=\"text\",\n", " hovertemplate='Name: %{customdata[0]}
Price: $%{customdata[1]}'\n", " ))\n", "\n", " fig.update_layout(\n", " mapbox_style=\"open-street-map\",\n", " hovermode='closest',\n", " mapbox=dict(\n", " bearing=0,\n", " center=go.layout.mapbox.Center(\n", " lat=40.67,\n", " lon=-73.90\n", " ),\n", " pitch=0,\n", " zoom=9\n", " ),\n", " )\n", "\n", " return fig\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Column():\n", " with gr.Row():\n", " min_price = gr.Number(value=250, label=\"Minimum Price\")\n", " max_price = gr.Number(value=1000, label=\"Maximum Price\")\n", " boroughs = gr.CheckboxGroup(choices=[\"Queens\", \"Brooklyn\", \"Manhattan\", \"Bronx\", \"Staten Island\"], value=[\"Queens\", \"Brooklyn\"], label=\"Select Boroughs:\")\n", " btn = gr.Button(value=\"Update Filter\")\n", " map = gr.Plot()\n", " demo.load(filter_map, [min_price, max_price, boroughs], map)\n", " btn.click(filter_map, [min_price, max_price, boroughs], map)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/markdown_component/run.ipynb b/demo/markdown_component/run.ipynb index 7867cad8885ab..82748cea4737a 100644 --- a/demo/markdown_component/run.ipynb +++ b/demo/markdown_component/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: markdown_component"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "with gr.Blocks() as demo:\n", " gr.Markdown(value=\"This _example_ was **written** in [Markdown](https://en.wikipedia.org/wiki/Markdown)\\n\")\n", "\n", "demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: markdown_component"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "with gr.Blocks() as demo:\n", " gr.Markdown(\n", " value=\"This _example_ was **written** in [Markdown](https://en.wikipedia.org/wiki/Markdown)\\n Markdown\"\n", " )\n", "\n", "demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/markdown_component/run.py b/demo/markdown_component/run.py index f02bda05bb2b7..71422fbb4a5bb 100644 --- a/demo/markdown_component/run.py +++ b/demo/markdown_component/run.py @@ -1,6 +1,8 @@ import gradio as gr with gr.Blocks() as demo: - gr.Markdown(value="This _example_ was **written** in [Markdown](https://en.wikipedia.org/wiki/Markdown)\n") + gr.Markdown( + value="This _example_ was **written** in [Markdown](https://en.wikipedia.org/wiki/Markdown)\n Markdown" + ) demo.launch() diff --git a/demo/mini_leaderboard/requirements.txt b/demo/mini_leaderboard/requirements.txt new file mode 100644 index 0000000000000..fb6c7ed7ec60d --- /dev/null +++ b/demo/mini_leaderboard/requirements.txt @@ -0,0 +1 @@ +pandas diff --git a/demo/mini_leaderboard/run.ipynb b/demo/mini_leaderboard/run.ipynb index 509ce3966ac94..db8e589ee0769 100644 --- a/demo/mini_leaderboard/run.ipynb +++ b/demo/mini_leaderboard/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: mini_leaderboard"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('assets')\n", "!wget -q -O assets/__init__.py https://github.com/gradio-app/gradio/raw/main/demo/mini_leaderboard/assets/__init__.py\n", "!wget -q -O assets/custom_css.css https://github.com/gradio-app/gradio/raw/main/demo/mini_leaderboard/assets/custom_css.css\n", "!wget -q -O assets/leaderboard_data.json https://github.com/gradio-app/gradio/raw/main/demo/mini_leaderboard/assets/leaderboard_data.json"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import pandas as pd\n", "from pathlib import Path\n", "\n", "abs_path = Path(__file__).parent.absolute()\n", "\n", "df = pd.read_json(str(abs_path / \"assets/leaderboard_data.json\"))\n", "invisible_df = df.copy()\n", "\n", "COLS = [\n", " \"T\",\n", " \"Model\",\n", " \"Average \u2b06\ufe0f\",\n", " \"ARC\",\n", " \"HellaSwag\",\n", " \"MMLU\",\n", " \"TruthfulQA\",\n", " \"Winogrande\",\n", " \"GSM8K\",\n", " \"Type\",\n", " \"Architecture\",\n", " \"Precision\",\n", " \"Merged\",\n", " \"Hub License\",\n", " \"#Params (B)\",\n", " \"Hub \u2764\ufe0f\",\n", " \"Model sha\",\n", " \"model_name_for_query\",\n", "]\n", "ON_LOAD_COLS = [\n", " \"T\",\n", " \"Model\",\n", " \"Average \u2b06\ufe0f\",\n", " \"ARC\",\n", " \"HellaSwag\",\n", " \"MMLU\",\n", " \"TruthfulQA\",\n", " \"Winogrande\",\n", " \"GSM8K\",\n", " \"model_name_for_query\",\n", "]\n", "TYPES = [\n", " \"str\",\n", " \"markdown\",\n", " \"number\",\n", " \"number\",\n", " \"number\",\n", " \"number\",\n", " \"number\",\n", " \"number\",\n", " \"number\",\n", " \"str\",\n", " \"str\",\n", " \"str\",\n", " \"str\",\n", " \"bool\",\n", " \"str\",\n", " \"number\",\n", " \"number\",\n", " \"bool\",\n", " \"str\",\n", " \"bool\",\n", " \"bool\",\n", " \"str\",\n", "]\n", "NUMERIC_INTERVALS = {\n", " \"?\": pd.Interval(-1, 0, closed=\"right\"),\n", " \"~1.5\": pd.Interval(0, 2, closed=\"right\"),\n", " \"~3\": pd.Interval(2, 4, closed=\"right\"),\n", " \"~7\": pd.Interval(4, 9, closed=\"right\"),\n", " \"~13\": pd.Interval(9, 20, closed=\"right\"),\n", " \"~35\": pd.Interval(20, 45, closed=\"right\"),\n", " \"~60\": pd.Interval(45, 70, closed=\"right\"),\n", " \"70+\": pd.Interval(70, 10000, closed=\"right\"),\n", "}\n", "MODEL_TYPE = [str(s) for s in df[\"T\"].unique()]\n", "Precision = [str(s) for s in df[\"Precision\"].unique()]\n", "\n", "# Searching and filtering\n", "def update_table(\n", " hidden_df: pd.DataFrame,\n", " columns: list,\n", " type_query: list,\n", " precision_query: str,\n", " size_query: list,\n", " query: str,\n", "):\n", " filtered_df = filter_models(hidden_df, type_query, size_query, precision_query) # type: ignore\n", " filtered_df = filter_queries(query, filtered_df)\n", " df = select_columns(filtered_df, columns)\n", " return df\n", "\n", "def search_table(df: pd.DataFrame, query: str) -> pd.DataFrame:\n", " return df[(df[\"model_name_for_query\"].str.contains(query, case=False))] # type: ignore\n", "\n", "def select_columns(df: pd.DataFrame, columns: list) -> pd.DataFrame:\n", " # We use COLS to maintain sorting\n", " filtered_df = df[[c for c in COLS if c in df.columns and c in columns]]\n", " return filtered_df # type: ignore\n", "\n", "def filter_queries(query: str, filtered_df: pd.DataFrame) -> pd.DataFrame:\n", " final_df = []\n", " if query != \"\":\n", " queries = [q.strip() for q in query.split(\";\")]\n", " for _q in queries:\n", " _q = _q.strip()\n", " if _q != \"\":\n", " temp_filtered_df = search_table(filtered_df, _q)\n", " if len(temp_filtered_df) > 0:\n", " final_df.append(temp_filtered_df)\n", " if len(final_df) > 0:\n", " filtered_df = pd.concat(final_df)\n", " filtered_df = filtered_df.drop_duplicates( # type: ignore\n", " subset=[\"Model\", \"Precision\", \"Model sha\"]\n", " )\n", "\n", " return filtered_df\n", "\n", "def filter_models(\n", " df: pd.DataFrame,\n", " type_query: list,\n", " size_query: list,\n", " precision_query: list,\n", ") -> pd.DataFrame:\n", " # Show all models\n", " filtered_df = df\n", "\n", " type_emoji = [t[0] for t in type_query]\n", " filtered_df = filtered_df.loc[df[\"T\"].isin(type_emoji)]\n", " filtered_df = filtered_df.loc[df[\"Precision\"].isin(precision_query + [\"None\"])]\n", "\n", " numeric_interval = pd.IntervalIndex(\n", " sorted([NUMERIC_INTERVALS[s] for s in size_query]) # type: ignore\n", " )\n", " params_column = pd.to_numeric(df[\"#Params (B)\"], errors=\"coerce\")\n", " mask = params_column.apply(lambda x: any(numeric_interval.contains(x))) # type: ignore\n", " filtered_df = filtered_df.loc[mask]\n", "\n", " return filtered_df\n", "\n", "demo = gr.Blocks(css=str(abs_path / \"assets/leaderboard_data.json\"))\n", "with demo:\n", " gr.Markdown(\"\"\"Test Space of the LLM Leaderboard\"\"\", elem_classes=\"markdown-text\")\n", "\n", " with gr.Tabs(elem_classes=\"tab-buttons\") as tabs:\n", " with gr.TabItem(\"\ud83c\udfc5 LLM Benchmark\", elem_id=\"llm-benchmark-tab-table\", id=0):\n", " with gr.Row():\n", " with gr.Column():\n", " with gr.Row():\n", " search_bar = gr.Textbox(\n", " placeholder=\" \ud83d\udd0d Search for your model (separate multiple queries with `;`) and press ENTER...\",\n", " show_label=False,\n", " elem_id=\"search-bar\",\n", " )\n", " with gr.Row():\n", " shown_columns = gr.CheckboxGroup(\n", " choices=COLS,\n", " value=ON_LOAD_COLS,\n", " label=\"Select columns to show\",\n", " elem_id=\"column-select\",\n", " interactive=True,\n", " )\n", " with gr.Column(min_width=320):\n", " filter_columns_type = gr.CheckboxGroup(\n", " label=\"Model types\",\n", " choices=MODEL_TYPE,\n", " value=MODEL_TYPE,\n", " interactive=True,\n", " elem_id=\"filter-columns-type\",\n", " )\n", " filter_columns_precision = gr.CheckboxGroup(\n", " label=\"Precision\",\n", " choices=Precision,\n", " value=Precision,\n", " interactive=True,\n", " elem_id=\"filter-columns-precision\",\n", " )\n", " filter_columns_size = gr.CheckboxGroup(\n", " label=\"Model sizes (in billions of parameters)\",\n", " choices=list(NUMERIC_INTERVALS.keys()),\n", " value=list(NUMERIC_INTERVALS.keys()),\n", " interactive=True,\n", " elem_id=\"filter-columns-size\",\n", " )\n", "\n", " leaderboard_table = gr.components.Dataframe(\n", " value=df[ON_LOAD_COLS], # type: ignore\n", " headers=ON_LOAD_COLS,\n", " datatype=TYPES,\n", " elem_id=\"leaderboard-table\",\n", " interactive=False,\n", " visible=True,\n", " column_widths=[\"2%\", \"33%\"],\n", " )\n", "\n", " # Dummy leaderboard for handling the case when the user uses backspace key\n", " hidden_leaderboard_table_for_search = gr.components.Dataframe(\n", " value=invisible_df[COLS], # type: ignore\n", " headers=COLS,\n", " datatype=TYPES,\n", " visible=False,\n", " )\n", " search_bar.submit(\n", " update_table,\n", " [\n", " hidden_leaderboard_table_for_search,\n", " shown_columns,\n", " filter_columns_type,\n", " filter_columns_precision,\n", " filter_columns_size,\n", " search_bar,\n", " ],\n", " leaderboard_table,\n", " )\n", " for selector in [\n", " shown_columns,\n", " filter_columns_type,\n", " filter_columns_precision,\n", " filter_columns_size,\n", " ]:\n", " selector.change(\n", " update_table,\n", " [\n", " hidden_leaderboard_table_for_search,\n", " shown_columns,\n", " filter_columns_type,\n", " filter_columns_precision,\n", " filter_columns_size,\n", " search_bar,\n", " ],\n", " leaderboard_table,\n", " queue=True,\n", " )\n", "\n", "if __name__ == \"__main__\":\n", " demo.queue(default_concurrency_limit=40).launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: mini_leaderboard"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio pandas "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('assets')\n", "!wget -q -O assets/__init__.py https://github.com/gradio-app/gradio/raw/main/demo/mini_leaderboard/assets/__init__.py\n", "!wget -q -O assets/custom_css.css https://github.com/gradio-app/gradio/raw/main/demo/mini_leaderboard/assets/custom_css.css\n", "!wget -q -O assets/leaderboard_data.json https://github.com/gradio-app/gradio/raw/main/demo/mini_leaderboard/assets/leaderboard_data.json"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import pandas as pd\n", "from pathlib import Path\n", "\n", "abs_path = Path(__file__).parent.absolute()\n", "\n", "df = pd.read_json(str(abs_path / \"assets/leaderboard_data.json\"))\n", "invisible_df = df.copy()\n", "\n", "COLS = [\n", " \"T\",\n", " \"Model\",\n", " \"Average \u2b06\ufe0f\",\n", " \"ARC\",\n", " \"HellaSwag\",\n", " \"MMLU\",\n", " \"TruthfulQA\",\n", " \"Winogrande\",\n", " \"GSM8K\",\n", " \"Type\",\n", " \"Architecture\",\n", " \"Precision\",\n", " \"Merged\",\n", " \"Hub License\",\n", " \"#Params (B)\",\n", " \"Hub \u2764\ufe0f\",\n", " \"Model sha\",\n", " \"model_name_for_query\",\n", "]\n", "ON_LOAD_COLS = [\n", " \"T\",\n", " \"Model\",\n", " \"Average \u2b06\ufe0f\",\n", " \"ARC\",\n", " \"HellaSwag\",\n", " \"MMLU\",\n", " \"TruthfulQA\",\n", " \"Winogrande\",\n", " \"GSM8K\",\n", " \"model_name_for_query\",\n", "]\n", "TYPES = [\n", " \"str\",\n", " \"markdown\",\n", " \"number\",\n", " \"number\",\n", " \"number\",\n", " \"number\",\n", " \"number\",\n", " \"number\",\n", " \"number\",\n", " \"str\",\n", " \"str\",\n", " \"str\",\n", " \"str\",\n", " \"bool\",\n", " \"str\",\n", " \"number\",\n", " \"number\",\n", " \"bool\",\n", " \"str\",\n", " \"bool\",\n", " \"bool\",\n", " \"str\",\n", "]\n", "NUMERIC_INTERVALS = {\n", " \"?\": pd.Interval(-1, 0, closed=\"right\"),\n", " \"~1.5\": pd.Interval(0, 2, closed=\"right\"),\n", " \"~3\": pd.Interval(2, 4, closed=\"right\"),\n", " \"~7\": pd.Interval(4, 9, closed=\"right\"),\n", " \"~13\": pd.Interval(9, 20, closed=\"right\"),\n", " \"~35\": pd.Interval(20, 45, closed=\"right\"),\n", " \"~60\": pd.Interval(45, 70, closed=\"right\"),\n", " \"70+\": pd.Interval(70, 10000, closed=\"right\"),\n", "}\n", "MODEL_TYPE = [str(s) for s in df[\"T\"].unique()]\n", "Precision = [str(s) for s in df[\"Precision\"].unique()]\n", "\n", "# Searching and filtering\n", "def update_table(\n", " hidden_df: pd.DataFrame,\n", " columns: list,\n", " type_query: list,\n", " precision_query: str,\n", " size_query: list,\n", " query: str,\n", "):\n", " filtered_df = filter_models(hidden_df, type_query, size_query, precision_query) # type: ignore\n", " filtered_df = filter_queries(query, filtered_df)\n", " df = select_columns(filtered_df, columns)\n", " return df\n", "\n", "def search_table(df: pd.DataFrame, query: str) -> pd.DataFrame:\n", " return df[(df[\"model_name_for_query\"].str.contains(query, case=False))] # type: ignore\n", "\n", "def select_columns(df: pd.DataFrame, columns: list) -> pd.DataFrame:\n", " # We use COLS to maintain sorting\n", " filtered_df = df[[c for c in COLS if c in df.columns and c in columns]]\n", " return filtered_df # type: ignore\n", "\n", "def filter_queries(query: str, filtered_df: pd.DataFrame) -> pd.DataFrame:\n", " final_df = []\n", " if query != \"\":\n", " queries = [q.strip() for q in query.split(\";\")]\n", " for _q in queries:\n", " _q = _q.strip()\n", " if _q != \"\":\n", " temp_filtered_df = search_table(filtered_df, _q)\n", " if len(temp_filtered_df) > 0:\n", " final_df.append(temp_filtered_df)\n", " if len(final_df) > 0:\n", " filtered_df = pd.concat(final_df)\n", " filtered_df = filtered_df.drop_duplicates( # type: ignore\n", " subset=[\"Model\", \"Precision\", \"Model sha\"]\n", " )\n", "\n", " return filtered_df\n", "\n", "def filter_models(\n", " df: pd.DataFrame,\n", " type_query: list,\n", " size_query: list,\n", " precision_query: list,\n", ") -> pd.DataFrame:\n", " # Show all models\n", " filtered_df = df\n", "\n", " type_emoji = [t[0] for t in type_query]\n", " filtered_df = filtered_df.loc[df[\"T\"].isin(type_emoji)]\n", " filtered_df = filtered_df.loc[df[\"Precision\"].isin(precision_query + [\"None\"])]\n", "\n", " numeric_interval = pd.IntervalIndex(\n", " sorted([NUMERIC_INTERVALS[s] for s in size_query]) # type: ignore\n", " )\n", " params_column = pd.to_numeric(df[\"#Params (B)\"], errors=\"coerce\")\n", " mask = params_column.apply(lambda x: any(numeric_interval.contains(x))) # type: ignore\n", " filtered_df = filtered_df.loc[mask]\n", "\n", " return filtered_df\n", "\n", "demo = gr.Blocks(css=str(abs_path / \"assets/leaderboard_data.json\"))\n", "with demo:\n", " gr.Markdown(\"\"\"Test Space of the LLM Leaderboard\"\"\", elem_classes=\"markdown-text\")\n", "\n", " with gr.Tabs(elem_classes=\"tab-buttons\") as tabs:\n", " with gr.TabItem(\"\ud83c\udfc5 LLM Benchmark\", elem_id=\"llm-benchmark-tab-table\", id=0):\n", " with gr.Row():\n", " with gr.Column():\n", " with gr.Row():\n", " search_bar = gr.Textbox(\n", " placeholder=\" \ud83d\udd0d Search for your model (separate multiple queries with `;`) and press ENTER...\",\n", " show_label=False,\n", " elem_id=\"search-bar\",\n", " )\n", " with gr.Row():\n", " shown_columns = gr.CheckboxGroup(\n", " choices=COLS,\n", " value=ON_LOAD_COLS,\n", " label=\"Select columns to show\",\n", " elem_id=\"column-select\",\n", " interactive=True,\n", " )\n", " with gr.Column(min_width=320):\n", " filter_columns_type = gr.CheckboxGroup(\n", " label=\"Model types\",\n", " choices=MODEL_TYPE,\n", " value=MODEL_TYPE,\n", " interactive=True,\n", " elem_id=\"filter-columns-type\",\n", " )\n", " filter_columns_precision = gr.CheckboxGroup(\n", " label=\"Precision\",\n", " choices=Precision,\n", " value=Precision,\n", " interactive=True,\n", " elem_id=\"filter-columns-precision\",\n", " )\n", " filter_columns_size = gr.CheckboxGroup(\n", " label=\"Model sizes (in billions of parameters)\",\n", " choices=list(NUMERIC_INTERVALS.keys()),\n", " value=list(NUMERIC_INTERVALS.keys()),\n", " interactive=True,\n", " elem_id=\"filter-columns-size\",\n", " )\n", "\n", " leaderboard_table = gr.components.Dataframe(\n", " value=df[ON_LOAD_COLS], # type: ignore\n", " headers=ON_LOAD_COLS,\n", " datatype=TYPES,\n", " elem_id=\"leaderboard-table\",\n", " interactive=False,\n", " visible=True,\n", " column_widths=[\"2%\", \"33%\"],\n", " )\n", "\n", " # Dummy leaderboard for handling the case when the user uses backspace key\n", " hidden_leaderboard_table_for_search = gr.components.Dataframe(\n", " value=invisible_df[COLS], # type: ignore\n", " headers=COLS,\n", " datatype=TYPES,\n", " visible=False,\n", " )\n", " search_bar.submit(\n", " update_table,\n", " [\n", " hidden_leaderboard_table_for_search,\n", " shown_columns,\n", " filter_columns_type,\n", " filter_columns_precision,\n", " filter_columns_size,\n", " search_bar,\n", " ],\n", " leaderboard_table,\n", " )\n", " for selector in [\n", " shown_columns,\n", " filter_columns_type,\n", " filter_columns_precision,\n", " filter_columns_size,\n", " ]:\n", " selector.change(\n", " update_table,\n", " [\n", " hidden_leaderboard_table_for_search,\n", " shown_columns,\n", " filter_columns_type,\n", " filter_columns_precision,\n", " filter_columns_size,\n", " search_bar,\n", " ],\n", " leaderboard_table,\n", " queue=True,\n", " )\n", "\n", "if __name__ == \"__main__\":\n", " demo.queue(default_concurrency_limit=40).launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/native_plots/requirements.txt b/demo/native_plots/requirements.txt index d1c8a7ae0396d..fe1f1ac6ca525 100644 --- a/demo/native_plots/requirements.txt +++ b/demo/native_plots/requirements.txt @@ -1 +1,2 @@ -vega_datasets \ No newline at end of file +vega_datasets +pandas diff --git a/demo/native_plots/run.ipynb b/demo/native_plots/run.ipynb index 19d02266ed36a..5c47d25801eb3 100644 --- a/demo/native_plots/run.ipynb +++ b/demo/native_plots/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: native_plots"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio vega_datasets"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/native_plots/bar_plot_demo.py\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/native_plots/data.py\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/native_plots/line_plot_demo.py\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/native_plots/scatter_plot_demo.py"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "from scatter_plot_demo import scatter_plots\n", "from line_plot_demo import line_plots\n", "from bar_plot_demo import bar_plots\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Tabs():\n", " with gr.TabItem(\"Line Plot\"):\n", " line_plots.render()\n", " with gr.TabItem(\"Scatter Plot\"):\n", " scatter_plots.render()\n", " with gr.TabItem(\"Bar Plot\"):\n", " bar_plots.render()\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: native_plots"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio vega_datasets pandas "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/native_plots/bar_plot_demo.py\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/native_plots/data.py\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/native_plots/line_plot_demo.py\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/native_plots/scatter_plot_demo.py"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "from scatter_plot_demo import scatter_plots\n", "from line_plot_demo import line_plots\n", "from bar_plot_demo import bar_plots\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Tabs():\n", " with gr.TabItem(\"Line Plot\"):\n", " line_plots.render()\n", " with gr.TabItem(\"Scatter Plot\"):\n", " scatter_plots.render()\n", " with gr.TabItem(\"Bar Plot\"):\n", " bar_plots.render()\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/outbreak_forecast/requirements.txt b/demo/outbreak_forecast/requirements.txt index 5615a533fc386..f513b41b84d2c 100644 --- a/demo/outbreak_forecast/requirements.txt +++ b/demo/outbreak_forecast/requirements.txt @@ -2,4 +2,4 @@ numpy matplotlib bokeh plotly -altair \ No newline at end of file +altair diff --git a/demo/outbreak_forecast/run.ipynb b/demo/outbreak_forecast/run.ipynb index 1ec9538ecdb55..48151f13b9897 100644 --- a/demo/outbreak_forecast/run.ipynb +++ b/demo/outbreak_forecast/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: outbreak_forecast\n", "### Generate a plot based on 5 inputs.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy matplotlib bokeh plotly altair"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import altair\n", "\n", "import gradio as gr\n", "from math import sqrt\n", "import matplotlib.pyplot as plt\n", "import numpy as np\n", "import plotly.express as px\n", "import pandas as pd\n", "\n", "def outbreak(plot_type, r, month, countries, social_distancing):\n", " months = [\"January\", \"February\", \"March\", \"April\", \"May\"]\n", " m = months.index(month)\n", " start_day = 30 * m\n", " final_day = 30 * (m + 1)\n", " x = np.arange(start_day, final_day + 1)\n", " pop_count = {\"USA\": 350, \"Canada\": 40, \"Mexico\": 300, \"UK\": 120}\n", " if social_distancing:\n", " r = sqrt(r)\n", " df = pd.DataFrame({\"day\": x})\n", " for country in countries:\n", " df[country] = x ** (r) * (pop_count[country] + 1)\n", "\n", " if plot_type == \"Matplotlib\":\n", " fig = plt.figure()\n", " plt.plot(df[\"day\"], df[countries].to_numpy())\n", " plt.title(\"Outbreak in \" + month)\n", " plt.ylabel(\"Cases\")\n", " plt.xlabel(\"Days since Day 0\")\n", " plt.legend(countries)\n", " return fig\n", " elif plot_type == \"Plotly\":\n", " fig = px.line(df, x=\"day\", y=countries)\n", " fig.update_layout(\n", " title=\"Outbreak in \" + month,\n", " xaxis_title=\"Cases\",\n", " yaxis_title=\"Days Since Day 0\",\n", " )\n", " return fig\n", " elif plot_type == \"Altair\":\n", " df = df.melt(id_vars=\"day\").rename(columns={\"variable\": \"country\"})\n", " fig = altair.Chart(df).mark_line().encode(x=\"day\", y=\"value\", color=\"country\")\n", " return fig\n", " else:\n", " raise ValueError(\"A plot type must be selected\")\n", "\n", "inputs = [\n", " gr.Dropdown([\"Matplotlib\", \"Plotly\", \"Altair\"], label=\"Plot Type\"),\n", " gr.Slider(1, 4, 3.2, label=\"R\"),\n", " gr.Dropdown([\"January\", \"February\", \"March\", \"April\", \"May\"], label=\"Month\"),\n", " gr.CheckboxGroup(\n", " [\"USA\", \"Canada\", \"Mexico\", \"UK\"], label=\"Countries\", value=[\"USA\", \"Canada\"]\n", " ),\n", " gr.Checkbox(label=\"Social Distancing?\"),\n", "]\n", "outputs = gr.Plot()\n", "\n", "demo = gr.Interface(\n", " fn=outbreak,\n", " inputs=inputs,\n", " outputs=outputs,\n", " examples=[\n", " [\"Matplotlib\", 2, \"March\", [\"Mexico\", \"UK\"], True],\n", " [\"Altair\", 2, \"March\", [\"Mexico\", \"Canada\"], True],\n", " [\"Plotly\", 3.6, \"February\", [\"Canada\", \"Mexico\", \"UK\"], False],\n", " ],\n", " cache_examples=True,\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: outbreak_forecast\n", "### Generate a plot based on 5 inputs.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy matplotlib bokeh plotly altair "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from math import sqrt\n", "import numpy as np\n", "import pandas as pd\n", "\n", "def outbreak(plot_type, r, month, countries, social_distancing):\n", " months = [\"January\", \"February\", \"March\", \"April\", \"May\"]\n", " m = months.index(month)\n", " start_day = 30 * m\n", " final_day = 30 * (m + 1)\n", " x = np.arange(start_day, final_day + 1)\n", " pop_count = {\"USA\": 350, \"Canada\": 40, \"Mexico\": 300, \"UK\": 120}\n", " if social_distancing:\n", " r = sqrt(r)\n", " df = pd.DataFrame({\"day\": x})\n", " for country in countries:\n", " df[country] = x ** (r) * (pop_count[country] + 1)\n", "\n", " if plot_type == \"Matplotlib\":\n", " import matplotlib.pyplot as plt\n", "\n", " fig = plt.figure()\n", " plt.plot(df[\"day\"], df[countries].to_numpy())\n", " plt.title(\"Outbreak in \" + month)\n", " plt.ylabel(\"Cases\")\n", " plt.xlabel(\"Days since Day 0\")\n", " plt.legend(countries)\n", " return fig\n", " elif plot_type == \"Plotly\":\n", " import plotly.express as px\n", "\n", " fig = px.line(df, x=\"day\", y=countries)\n", " fig.update_layout(\n", " title=\"Outbreak in \" + month,\n", " xaxis_title=\"Cases\",\n", " yaxis_title=\"Days Since Day 0\",\n", " )\n", " return fig\n", " elif plot_type == \"Altair\":\n", " import altair\n", "\n", " df = df.melt(id_vars=\"day\").rename(columns={\"variable\": \"country\"})\n", " fig = altair.Chart(df).mark_line().encode(x=\"day\", y=\"value\", color=\"country\")\n", " return fig\n", " elif plot_type == \"Bokeh\":\n", " from bokeh.plotting import figure\n", " from bokeh.models import ColumnDataSource\n", "\n", " source = ColumnDataSource(df)\n", " fig = figure(title=\"Outbreak in \" + month, x_axis_label=\"Days since Day 0\", y_axis_label=\"Cases\")\n", " for country in countries:\n", " fig.line(\"day\", country, source=source, legend_label=country)\n", " return fig\n", " else:\n", " raise ValueError(\"A plot type must be selected\")\n", "\n", "inputs = [\n", " gr.Dropdown([\"Matplotlib\", \"Plotly\", \"Altair\", \"Bokeh\"], label=\"Plot Type\", value=\"Matplotlib\"),\n", " gr.Slider(1, 4, 3.2, label=\"R\"),\n", " gr.Dropdown([\"January\", \"February\", \"March\", \"April\", \"May\"], label=\"Month\", value=\"March\"),\n", " gr.CheckboxGroup(\n", " [\"USA\", \"Canada\", \"Mexico\", \"UK\"], label=\"Countries\", value=[\"USA\", \"Canada\"]\n", " ),\n", " gr.Checkbox(label=\"Social Distancing?\"),\n", "]\n", "outputs = gr.Plot()\n", "\n", "demo = gr.Interface(\n", " fn=outbreak,\n", " inputs=inputs,\n", " outputs=outputs,\n", " examples=[\n", " [\"Matplotlib\", 2, \"March\", [\"Mexico\", \"UK\"], True],\n", " [\"Altair\", 2, \"March\", [\"Mexico\", \"Canada\"], True],\n", " [\"Plotly\", 3.6, \"February\", [\"Canada\", \"Mexico\", \"UK\"], False],\n", " [\"Bokeh\", 3.2, \"April\", [\"Canada\", \"UK\"], False],\n", " ],\n", " cache_examples=True,\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/outbreak_forecast/run.py b/demo/outbreak_forecast/run.py index 1802e5766a6ef..797479db7999f 100644 --- a/demo/outbreak_forecast/run.py +++ b/demo/outbreak_forecast/run.py @@ -1,10 +1,6 @@ -import altair - import gradio as gr from math import sqrt -import matplotlib.pyplot as plt import numpy as np -import plotly.express as px import pandas as pd def outbreak(plot_type, r, month, countries, social_distancing): @@ -21,6 +17,8 @@ def outbreak(plot_type, r, month, countries, social_distancing): df[country] = x ** (r) * (pop_count[country] + 1) if plot_type == "Matplotlib": + import matplotlib.pyplot as plt + fig = plt.figure() plt.plot(df["day"], df[countries].to_numpy()) plt.title("Outbreak in " + month) @@ -29,6 +27,8 @@ def outbreak(plot_type, r, month, countries, social_distancing): plt.legend(countries) return fig elif plot_type == "Plotly": + import plotly.express as px + fig = px.line(df, x="day", y=countries) fig.update_layout( title="Outbreak in " + month, @@ -37,16 +37,27 @@ def outbreak(plot_type, r, month, countries, social_distancing): ) return fig elif plot_type == "Altair": + import altair + df = df.melt(id_vars="day").rename(columns={"variable": "country"}) fig = altair.Chart(df).mark_line().encode(x="day", y="value", color="country") return fig + elif plot_type == "Bokeh": + from bokeh.plotting import figure + from bokeh.models import ColumnDataSource + + source = ColumnDataSource(df) + fig = figure(title="Outbreak in " + month, x_axis_label="Days since Day 0", y_axis_label="Cases") + for country in countries: + fig.line("day", country, source=source, legend_label=country) + return fig else: raise ValueError("A plot type must be selected") inputs = [ - gr.Dropdown(["Matplotlib", "Plotly", "Altair"], label="Plot Type"), + gr.Dropdown(["Matplotlib", "Plotly", "Altair", "Bokeh"], label="Plot Type", value="Matplotlib"), gr.Slider(1, 4, 3.2, label="R"), - gr.Dropdown(["January", "February", "March", "April", "May"], label="Month"), + gr.Dropdown(["January", "February", "March", "April", "May"], label="Month", value="March"), gr.CheckboxGroup( ["USA", "Canada", "Mexico", "UK"], label="Countries", value=["USA", "Canada"] ), @@ -62,6 +73,7 @@ def outbreak(plot_type, r, month, countries, social_distancing): ["Matplotlib", 2, "March", ["Mexico", "UK"], True], ["Altair", 2, "March", ["Mexico", "Canada"], True], ["Plotly", 3.6, "February", ["Canada", "Mexico", "UK"], False], + ["Bokeh", 3.2, "April", ["Canada", "UK"], False], ], cache_examples=True, ) diff --git a/demo/plot_guide_line/requirements.txt b/demo/plot_guide_line/requirements.txt new file mode 100644 index 0000000000000..5da331cf67f41 --- /dev/null +++ b/demo/plot_guide_line/requirements.txt @@ -0,0 +1,2 @@ +numpy +pandas diff --git a/demo/plot_guide_line/run.ipynb b/demo/plot_guide_line/run.ipynb index a760543134e43..2e6209707a91b 100644 --- a/demo/plot_guide_line/run.ipynb +++ b/demo/plot_guide_line/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: plot_guide_line"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import pandas as pd\n", "import numpy as np\n", "import random\n", "\n", "df = pd.DataFrame({\n", " 'height': np.random.randint(50, 70, 25),\n", " 'weight': np.random.randint(120, 320, 25),\n", " 'age': np.random.randint(18, 65, 25),\n", " 'ethnicity': [random.choice([\"white\", \"black\", \"asian\"]) for _ in range(25)]\n", "})\n", "\n", "with gr.Blocks() as demo:\n", " gr.LinePlot(df, x=\"weight\", y=\"height\")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: plot_guide_line"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy pandas "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import pandas as pd\n", "import numpy as np\n", "import random\n", "\n", "df = pd.DataFrame({\n", " 'height': np.random.randint(50, 70, 25),\n", " 'weight': np.random.randint(120, 320, 25),\n", " 'age': np.random.randint(18, 65, 25),\n", " 'ethnicity': [random.choice([\"white\", \"black\", \"asian\"]) for _ in range(25)]\n", "})\n", "\n", "with gr.Blocks() as demo:\n", " gr.LinePlot(df, x=\"weight\", y=\"height\")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/plot_guide_temporal/requirements.txt b/demo/plot_guide_temporal/requirements.txt new file mode 100644 index 0000000000000..5da331cf67f41 --- /dev/null +++ b/demo/plot_guide_temporal/requirements.txt @@ -0,0 +1,2 @@ +numpy +pandas diff --git a/demo/plot_guide_temporal/run.ipynb b/demo/plot_guide_temporal/run.ipynb index fac5b4d1f0be3..8778388fdf9e5 100644 --- a/demo/plot_guide_temporal/run.ipynb +++ b/demo/plot_guide_temporal/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: plot_guide_temporal"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import pandas as pd\n", "import numpy as np\n", "import random\n", "\n", "from datetime import datetime, timedelta\n", "now = datetime.now()\n", "\n", "df = pd.DataFrame({\n", " 'time': [now - timedelta(minutes=5*i) for i in range(25)],\n", " 'price': np.random.randint(100, 1000, 25),\n", " 'origin': [random.choice([\"DFW\", \"DAL\", \"HOU\"]) for _ in range(25)],\n", " 'destination': [random.choice([\"JFK\", \"LGA\", \"EWR\"]) for _ in range(25)],\n", "})\n", "\n", "with gr.Blocks() as demo:\n", " gr.LinePlot(df, x=\"time\", y=\"price\")\n", " gr.ScatterPlot(df, x=\"time\", y=\"price\", color=\"origin\")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: plot_guide_temporal"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy pandas "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import pandas as pd\n", "import numpy as np\n", "import random\n", "\n", "from datetime import datetime, timedelta\n", "now = datetime.now()\n", "\n", "df = pd.DataFrame({\n", " 'time': [now - timedelta(minutes=5*i) for i in range(25)],\n", " 'price': np.random.randint(100, 1000, 25),\n", " 'origin': [random.choice([\"DFW\", \"DAL\", \"HOU\"]) for _ in range(25)],\n", " 'destination': [random.choice([\"JFK\", \"LGA\", \"EWR\"]) for _ in range(25)],\n", "})\n", "\n", "with gr.Blocks() as demo:\n", " gr.LinePlot(df, x=\"time\", y=\"price\")\n", " gr.ScatterPlot(df, x=\"time\", y=\"price\", color=\"origin\")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/random_demos.py b/demo/random_demos.py index 58b93a90a6c22..c3cccde454378 100644 --- a/demo/random_demos.py +++ b/demo/random_demos.py @@ -26,7 +26,7 @@ # Some demos are just too large or need to be run in a special way, so we'll just skip them demos_list.remove('streaming_wav2vec') demos_list.remove('blocks_neural_instrument_coding') -demos_list.remove('flagged') +demos_list.remove('.gradio/flagged') for d, demo_name in enumerate(random.sample(demos_list, args.num_demos)): print(f"Launching demo {d+1}/{args.num_demos}: {demo_name}") diff --git a/demo/reverse_audio/requirements.txt b/demo/reverse_audio/requirements.txt new file mode 100644 index 0000000000000..24ce15ab7ead3 --- /dev/null +++ b/demo/reverse_audio/requirements.txt @@ -0,0 +1 @@ +numpy diff --git a/demo/reverse_audio/run.ipynb b/demo/reverse_audio/run.ipynb index 464bab62c96ca..61b1da80519a6 100644 --- a/demo/reverse_audio/run.ipynb +++ b/demo/reverse_audio/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: reverse_audio"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('audio')\n", "!wget -q -O audio/cantina.wav https://github.com/gradio-app/gradio/raw/main/demo/reverse_audio/audio/cantina.wav\n", "!wget -q -O audio/recording1.wav https://github.com/gradio-app/gradio/raw/main/demo/reverse_audio/audio/recording1.wav"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["\n", "import numpy as np\n", "\n", "import gradio as gr\n", "\n", "def reverse_audio(audio):\n", " sr, data = audio\n", " return (sr, np.flipud(data))\n", "\n", "input_audio = gr.Audio(\n", " sources=[\"microphone\"],\n", " waveform_options=gr.WaveformOptions(\n", " waveform_color=\"#01C6FF\",\n", " waveform_progress_color=\"#0066B4\",\n", " skip_length=2,\n", " show_controls=False,\n", " ),\n", ")\n", "demo = gr.Interface(\n", " fn=reverse_audio,\n", " inputs=input_audio,\n", " outputs=\"audio\"\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: reverse_audio"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('audio')\n", "!wget -q -O audio/cantina.wav https://github.com/gradio-app/gradio/raw/main/demo/reverse_audio/audio/cantina.wav\n", "!wget -q -O audio/recording1.wav https://github.com/gradio-app/gradio/raw/main/demo/reverse_audio/audio/recording1.wav"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["\n", "import numpy as np\n", "\n", "import gradio as gr\n", "\n", "def reverse_audio(audio):\n", " sr, data = audio\n", " return (sr, np.flipud(data))\n", "\n", "input_audio = gr.Audio(\n", " sources=[\"microphone\"],\n", " waveform_options=gr.WaveformOptions(\n", " waveform_color=\"#01C6FF\",\n", " waveform_progress_color=\"#0066B4\",\n", " skip_length=2,\n", " show_controls=False,\n", " ),\n", ")\n", "demo = gr.Interface(\n", " fn=reverse_audio,\n", " inputs=input_audio,\n", " outputs=\"audio\"\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/rt-detr-object-detection/draw_boxes.py b/demo/rt-detr-object-detection/draw_boxes.py new file mode 100644 index 0000000000000..9a0442e6b5ead --- /dev/null +++ b/demo/rt-detr-object-detection/draw_boxes.py @@ -0,0 +1,45 @@ +from PIL import ImageDraw, ImageFont # type: ignore +import colorsys + + +def get_color(label): + # Simple hash function to generate consistent colors for each label + hash_value = hash(label) + hue = (hash_value % 100) / 100.0 + saturation = 0.7 + value = 0.9 + rgb = colorsys.hsv_to_rgb(hue, saturation, value) + return tuple(int(x * 255) for x in rgb) + + +def draw_bounding_boxes(image, results: dict, model, threshold=0.3): + draw = ImageDraw.Draw(image) + font = ImageFont.load_default() + + for score, label_id, box in zip( + results["scores"], results["labels"], results["boxes"] + ): + if score > threshold: + label = model.config.id2label[label_id.item()] + box = [round(i, 2) for i in box.tolist()] + color = get_color(label) + + # Draw bounding box + draw.rectangle(box, outline=color, width=3) # type: ignore + + # Prepare text + text = f"{label}: {score:.2f}" + text_bbox = draw.textbbox((0, 0), text, font=font) + text_width = text_bbox[2] - text_bbox[0] + text_height = text_bbox[3] - text_bbox[1] + + # Draw text background + draw.rectangle( + [box[0], box[1] - text_height - 4, box[0] + text_width, box[1]], # type: ignore + fill=color, # type: ignore + ) + + # Draw text + draw.text((box[0], box[1] - text_height - 4), text, fill="white", font=font) + + return image diff --git a/demo/rt-detr-object-detection/requirements.txt b/demo/rt-detr-object-detection/requirements.txt new file mode 100644 index 0000000000000..b1c8b943d9230 --- /dev/null +++ b/demo/rt-detr-object-detection/requirements.txt @@ -0,0 +1,5 @@ +safetensors==0.4.3 +opencv-python +torch +transformers>=4.43.0 +Pillow diff --git a/demo/rt-detr-object-detection/run.ipynb b/demo/rt-detr-object-detection/run.ipynb new file mode 100644 index 0000000000000..9b629830e7b52 --- /dev/null +++ b/demo/rt-detr-object-detection/run.ipynb @@ -0,0 +1 @@ +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: rt-detr-object-detection"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio safetensors==0.4.3 opencv-python torch transformers>=4.43.0 Pillow "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/rt-detr-object-detection/draw_boxes.py"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import spaces\n", "import gradio as gr\n", "import cv2\n", "from PIL import Image\n", "import torch\n", "import time\n", "import numpy as np\n", "import uuid\n", "\n", "from transformers import RTDetrForObjectDetection, RTDetrImageProcessor # type: ignore\n", "\n", "from draw_boxes import draw_bounding_boxes\n", "\n", "image_processor = RTDetrImageProcessor.from_pretrained(\"PekingU/rtdetr_r50vd\")\n", "model = RTDetrForObjectDetection.from_pretrained(\"PekingU/rtdetr_r50vd\").to(\"cuda\")\n", "\n", "\n", "SUBSAMPLE = 2\n", "\n", "\n", "@spaces.GPU\n", "def stream_object_detection(video, conf_threshold):\n", " cap = cv2.VideoCapture(video)\n", "\n", " video_codec = cv2.VideoWriter_fourcc(*\"mp4v\") # type: ignore\n", " fps = int(cap.get(cv2.CAP_PROP_FPS))\n", "\n", " desired_fps = fps // SUBSAMPLE\n", " width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) // 2\n", " height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) // 2\n", "\n", " iterating, frame = cap.read()\n", "\n", " n_frames = 0\n", "\n", " name = f\"output_{uuid.uuid4()}.mp4\"\n", " segment_file = cv2.VideoWriter(name, video_codec, desired_fps, (width, height)) # type: ignore\n", " batch = []\n", "\n", " while iterating:\n", " frame = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5)\n", " frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n", " if n_frames % SUBSAMPLE == 0:\n", " batch.append(frame)\n", " if len(batch) == 2 * desired_fps:\n", " inputs = image_processor(images=batch, return_tensors=\"pt\").to(\"cuda\")\n", "\n", " print(f\"starting batch of size {len(batch)}\")\n", " start = time.time()\n", " with torch.no_grad():\n", " outputs = model(**inputs)\n", " end = time.time()\n", " print(\"time taken for inference\", end - start)\n", "\n", " start = time.time()\n", " boxes = image_processor.post_process_object_detection(\n", " outputs,\n", " target_sizes=torch.tensor([(height, width)] * len(batch)),\n", " threshold=conf_threshold,\n", " )\n", "\n", " for _, (array, box) in enumerate(zip(batch, boxes)):\n", " pil_image = draw_bounding_boxes(\n", " Image.fromarray(array), box, model, conf_threshold\n", " )\n", " frame = np.array(pil_image)\n", " # Convert RGB to BGR\n", " frame = frame[:, :, ::-1].copy()\n", " segment_file.write(frame)\n", "\n", " batch = []\n", " segment_file.release()\n", " yield name\n", " end = time.time()\n", " print(\"time taken for processing boxes\", end - start)\n", " name = f\"output_{uuid.uuid4()}.mp4\"\n", " segment_file = cv2.VideoWriter(\n", " name, video_codec, desired_fps, (width, height)\n", " ) # type: ignore\n", "\n", " iterating, frame = cap.read()\n", " n_frames += 1\n", "\n", "\n", "with gr.Blocks() as demo:\n", " gr.HTML(\n", " \"\"\"\n", "

\n", " Video Object Detection with RT-DETR\n", "

\n", " \"\"\"\n", " )\n", " with gr.Row():\n", " with gr.Column():\n", " video = gr.Video(label=\"Video Source\")\n", " conf_threshold = gr.Slider(\n", " label=\"Confidence Threshold\",\n", " minimum=0.0,\n", " maximum=1.0,\n", " step=0.05,\n", " value=0.30,\n", " )\n", " with gr.Column():\n", " output_video = gr.Video(\n", " label=\"Processed Video\", streaming=True, autoplay=True\n", " )\n", "\n", " video.upload(\n", " fn=stream_object_detection,\n", " inputs=[video, conf_threshold],\n", " outputs=[output_video],\n", " )\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/rt-detr-object-detection/run.py b/demo/rt-detr-object-detection/run.py new file mode 100644 index 0000000000000..ec089664009ed --- /dev/null +++ b/demo/rt-detr-object-detection/run.py @@ -0,0 +1,115 @@ +import spaces +import gradio as gr +import cv2 +from PIL import Image +import torch +import time +import numpy as np +import uuid + +from transformers import RTDetrForObjectDetection, RTDetrImageProcessor # type: ignore + +from draw_boxes import draw_bounding_boxes + +image_processor = RTDetrImageProcessor.from_pretrained("PekingU/rtdetr_r50vd") +model = RTDetrForObjectDetection.from_pretrained("PekingU/rtdetr_r50vd").to("cuda") + + +SUBSAMPLE = 2 + + +@spaces.GPU +def stream_object_detection(video, conf_threshold): + cap = cv2.VideoCapture(video) + + video_codec = cv2.VideoWriter_fourcc(*"mp4v") # type: ignore + fps = int(cap.get(cv2.CAP_PROP_FPS)) + + desired_fps = fps // SUBSAMPLE + width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) // 2 + height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) // 2 + + iterating, frame = cap.read() + + n_frames = 0 + + name = f"output_{uuid.uuid4()}.mp4" + segment_file = cv2.VideoWriter(name, video_codec, desired_fps, (width, height)) # type: ignore + batch = [] + + while iterating: + frame = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5) + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + if n_frames % SUBSAMPLE == 0: + batch.append(frame) + if len(batch) == 2 * desired_fps: + inputs = image_processor(images=batch, return_tensors="pt").to("cuda") + + print(f"starting batch of size {len(batch)}") + start = time.time() + with torch.no_grad(): + outputs = model(**inputs) + end = time.time() + print("time taken for inference", end - start) + + start = time.time() + boxes = image_processor.post_process_object_detection( + outputs, + target_sizes=torch.tensor([(height, width)] * len(batch)), + threshold=conf_threshold, + ) + + for _, (array, box) in enumerate(zip(batch, boxes)): + pil_image = draw_bounding_boxes( + Image.fromarray(array), box, model, conf_threshold + ) + frame = np.array(pil_image) + # Convert RGB to BGR + frame = frame[:, :, ::-1].copy() + segment_file.write(frame) + + batch = [] + segment_file.release() + yield name + end = time.time() + print("time taken for processing boxes", end - start) + name = f"output_{uuid.uuid4()}.mp4" + segment_file = cv2.VideoWriter( + name, video_codec, desired_fps, (width, height) + ) # type: ignore + + iterating, frame = cap.read() + n_frames += 1 + + +with gr.Blocks() as demo: + gr.HTML( + """ +

+ Video Object Detection with RT-DETR +

+ """ + ) + with gr.Row(): + with gr.Column(): + video = gr.Video(label="Video Source") + conf_threshold = gr.Slider( + label="Confidence Threshold", + minimum=0.0, + maximum=1.0, + step=0.05, + value=0.30, + ) + with gr.Column(): + output_video = gr.Video( + label="Processed Video", streaming=True, autoplay=True + ) + + video.upload( + fn=stream_object_detection, + inputs=[video, conf_threshold], + outputs=[output_video], + ) + +if __name__ == "__main__": + demo.launch() diff --git a/demo/same-person-or-different/run.ipynb b/demo/same-person-or-different/run.ipynb index a514212e239af..39002fc6ead6c 100644 --- a/demo/same-person-or-different/run.ipynb +++ b/demo/same-person-or-different/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: same-person-or-different\n", "### This demo identifies if two speakers are the same person using Gradio's Audio and HTML components.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio git+https://github.com/huggingface/transformers torchaudio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/same-person-or-different/packages.txt\n", "os.mkdir('samples')\n", "!wget -q -O samples/cate_blanch.mp3 https://github.com/gradio-app/gradio/raw/main/demo/same-person-or-different/samples/cate_blanch.mp3\n", "!wget -q -O samples/cate_blanch_2.mp3 https://github.com/gradio-app/gradio/raw/main/demo/same-person-or-different/samples/cate_blanch_2.mp3\n", "!wget -q -O samples/heath_ledger.mp3 https://github.com/gradio-app/gradio/raw/main/demo/same-person-or-different/samples/heath_ledger.mp3"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import torch\n", "from torchaudio.sox_effects import apply_effects_file\n", "from transformers import AutoFeatureExtractor, AutoModelForAudioXVector\n", "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n", "\n", "OUTPUT_OK = (\n", " \"\"\"\n", "
\n", "

The speakers are

\n", "

{:.1f}%

\n", "

similar

\n", "

Welcome, human!

\n", "
(You must get at least 85% to be considered the same person)
\n", "
\n", "\"\"\"\n", ")\n", "OUTPUT_FAIL = (\n", " \"\"\"\n", "
\n", "

The speakers are

\n", "

{:.1f}%

\n", "

similar

\n", "

You shall not pass!

\n", "
(You must get at least 85% to be considered the same person)
\n", "
\n", "\"\"\"\n", ")\n", "\n", "EFFECTS = [\n", " [\"remix\", \"-\"],\n", " [\"channels\", \"1\"],\n", " [\"rate\", \"16000\"],\n", " [\"gain\", \"-1.0\"],\n", " [\"silence\", \"1\", \"0.1\", \"0.1%\", \"-1\", \"0.1\", \"0.1%\"],\n", " [\"trim\", \"0\", \"10\"],\n", "]\n", "\n", "THRESHOLD = 0.85\n", "\n", "model_name = \"microsoft/unispeech-sat-base-plus-sv\"\n", "feature_extractor = AutoFeatureExtractor.from_pretrained(model_name)\n", "model = AutoModelForAudioXVector.from_pretrained(model_name).to(device)\n", "cosine_sim = torch.nn.CosineSimilarity(dim=-1)\n", "\n", "def similarity_fn(path1, path2):\n", " if not (path1 and path2):\n", " return 'ERROR: Please record audio for *both* speakers!'\n", "\n", " wav1, _ = apply_effects_file(path1, EFFECTS)\n", " wav2, _ = apply_effects_file(path2, EFFECTS)\n", " print(wav1.shape, wav2.shape)\n", "\n", " input1 = feature_extractor(wav1.squeeze(0), return_tensors=\"pt\", sampling_rate=16000).input_values.to(device)\n", " input2 = feature_extractor(wav2.squeeze(0), return_tensors=\"pt\", sampling_rate=16000).input_values.to(device)\n", "\n", " with torch.no_grad():\n", " emb1 = model(input1).embeddings\n", " emb2 = model(input2).embeddings\n", " emb1 = torch.nn.functional.normalize(emb1, dim=-1).cpu()\n", " emb2 = torch.nn.functional.normalize(emb2, dim=-1).cpu()\n", " similarity = cosine_sim(emb1, emb2).numpy()[0]\n", "\n", " if similarity >= THRESHOLD:\n", " output = OUTPUT_OK.format(similarity * 100)\n", " else:\n", " output = OUTPUT_FAIL.format(similarity * 100)\n", "\n", " return output\n", "\n", "inputs = [\n", " gr.Audio(sources=[\"microphone\"], type=\"filepath\", label=\"Speaker #1\"),\n", " gr.Audio(sources=[\"microphone\"], type=\"filepath\", label=\"Speaker #2\"),\n", "]\n", "output = gr.HTML(label=\"\")\n", "\n", "description = (\n", " \"This demo from Microsoft will compare two speech samples and determine if they are from the same speaker. \"\n", " \"Try it with your own voice!\"\n", ")\n", "article = (\n", " \"

\"\n", " \"\ud83c\udf99\ufe0f Learn more about UniSpeech-SAT | \"\n", " \"\ud83d\udcda UniSpeech-SAT paper | \"\n", " \"\ud83d\udcda X-Vector paper\"\n", " \"

\"\n", ")\n", "examples = [\n", " [\"samples/cate_blanch.mp3\", \"samples/cate_blanch_2.mp3\"],\n", " [\"samples/cate_blanch.mp3\", \"samples/heath_ledger.mp3\"],\n", "]\n", "\n", "interface = gr.Interface(\n", " fn=similarity_fn,\n", " inputs=inputs,\n", " outputs=output,\n", " layout=\"horizontal\",\n", " allow_flagging=\"never\",\n", " live=False,\n", " examples=examples,\n", " cache_examples=False\n", ")\n", "interface.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: same-person-or-different\n", "### This demo identifies if two speakers are the same person using Gradio's Audio and HTML components.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio git+https://github.com/huggingface/transformers torchaudio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/same-person-or-different/packages.txt\n", "os.mkdir('samples')\n", "!wget -q -O samples/cate_blanch.mp3 https://github.com/gradio-app/gradio/raw/main/demo/same-person-or-different/samples/cate_blanch.mp3\n", "!wget -q -O samples/cate_blanch_2.mp3 https://github.com/gradio-app/gradio/raw/main/demo/same-person-or-different/samples/cate_blanch_2.mp3\n", "!wget -q -O samples/heath_ledger.mp3 https://github.com/gradio-app/gradio/raw/main/demo/same-person-or-different/samples/heath_ledger.mp3"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import torch\n", "from torchaudio.sox_effects import apply_effects_file\n", "from transformers import AutoFeatureExtractor, AutoModelForAudioXVector\n", "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n", "\n", "OUTPUT_OK = (\n", " \"\"\"\n", "
\n", "

The speakers are

\n", "

{:.1f}%

\n", "

similar

\n", "

Welcome, human!

\n", "
(You must get at least 85% to be considered the same person)
\n", "
\n", "\"\"\"\n", ")\n", "OUTPUT_FAIL = (\n", " \"\"\"\n", "
\n", "

The speakers are

\n", "

{:.1f}%

\n", "

similar

\n", "

You shall not pass!

\n", "
(You must get at least 85% to be considered the same person)
\n", "
\n", "\"\"\"\n", ")\n", "\n", "EFFECTS = [\n", " [\"remix\", \"-\"],\n", " [\"channels\", \"1\"],\n", " [\"rate\", \"16000\"],\n", " [\"gain\", \"-1.0\"],\n", " [\"silence\", \"1\", \"0.1\", \"0.1%\", \"-1\", \"0.1\", \"0.1%\"],\n", " [\"trim\", \"0\", \"10\"],\n", "]\n", "\n", "THRESHOLD = 0.85\n", "\n", "model_name = \"microsoft/unispeech-sat-base-plus-sv\"\n", "feature_extractor = AutoFeatureExtractor.from_pretrained(model_name)\n", "model = AutoModelForAudioXVector.from_pretrained(model_name).to(device)\n", "cosine_sim = torch.nn.CosineSimilarity(dim=-1)\n", "\n", "def similarity_fn(path1, path2):\n", " if not (path1 and path2):\n", " return 'ERROR: Please record audio for *both* speakers!'\n", "\n", " wav1, _ = apply_effects_file(path1, EFFECTS)\n", " wav2, _ = apply_effects_file(path2, EFFECTS)\n", " print(wav1.shape, wav2.shape)\n", "\n", " input1 = feature_extractor(wav1.squeeze(0), return_tensors=\"pt\", sampling_rate=16000).input_values.to(device)\n", " input2 = feature_extractor(wav2.squeeze(0), return_tensors=\"pt\", sampling_rate=16000).input_values.to(device)\n", "\n", " with torch.no_grad():\n", " emb1 = model(input1).embeddings\n", " emb2 = model(input2).embeddings\n", " emb1 = torch.nn.functional.normalize(emb1, dim=-1).cpu()\n", " emb2 = torch.nn.functional.normalize(emb2, dim=-1).cpu()\n", " similarity = cosine_sim(emb1, emb2).numpy()[0]\n", "\n", " if similarity >= THRESHOLD:\n", " output = OUTPUT_OK.format(similarity * 100)\n", " else:\n", " output = OUTPUT_FAIL.format(similarity * 100)\n", "\n", " return output\n", "\n", "inputs = [\n", " gr.Audio(sources=[\"microphone\"], type=\"filepath\", label=\"Speaker #1\"),\n", " gr.Audio(sources=[\"microphone\"], type=\"filepath\", label=\"Speaker #2\"),\n", "]\n", "output = gr.HTML(label=\"\")\n", "\n", "description = (\n", " \"This demo from Microsoft will compare two speech samples and determine if they are from the same speaker. \"\n", " \"Try it with your own voice!\"\n", ")\n", "article = (\n", " \"

\"\n", " \"\ud83c\udf99\ufe0f Learn more about UniSpeech-SAT | \"\n", " \"\ud83d\udcda UniSpeech-SAT paper | \"\n", " \"\ud83d\udcda X-Vector paper\"\n", " \"

\"\n", ")\n", "examples = [\n", " [\"samples/cate_blanch.mp3\", \"samples/cate_blanch_2.mp3\"],\n", " [\"samples/cate_blanch.mp3\", \"samples/heath_ledger.mp3\"],\n", "]\n", "\n", "interface = gr.Interface(\n", " fn=similarity_fn,\n", " inputs=inputs,\n", " outputs=output,\n", " layout=\"horizontal\",\n", " flagging_mode=\"never\",\n", " live=False,\n", " examples=examples,\n", " cache_examples=False\n", ")\n", "interface.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/same-person-or-different/run.py b/demo/same-person-or-different/run.py index d9db14fe163ae..f2840db8bfb8d 100644 --- a/demo/same-person-or-different/run.py +++ b/demo/same-person-or-different/run.py @@ -95,7 +95,7 @@ def similarity_fn(path1, path2): inputs=inputs, outputs=output, layout="horizontal", - allow_flagging="never", + flagging_mode="never", live=False, examples=examples, cache_examples=False diff --git a/demo/scatter_plot_demo/requirements.txt b/demo/scatter_plot_demo/requirements.txt new file mode 100644 index 0000000000000..fb6c7ed7ec60d --- /dev/null +++ b/demo/scatter_plot_demo/requirements.txt @@ -0,0 +1 @@ +pandas diff --git a/demo/scatter_plot_demo/run.ipynb b/demo/scatter_plot_demo/run.ipynb index ae93530961d76..43f70c28f84d1 100644 --- a/demo/scatter_plot_demo/run.ipynb +++ b/demo/scatter_plot_demo/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: scatter_plot_demo"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import pandas as pd\n", "from random import randint, random\n", "import gradio as gr\n", "\n", "\n", "temp_sensor_data = pd.DataFrame(\n", " {\n", " \"time\": pd.date_range(\"2021-01-01\", end=\"2021-01-05\", periods=200),\n", " \"temperature\": [randint(50 + 10 * (i % 2), 65 + 15 * (i % 2)) for i in range(200)],\n", " \"humidity\": [randint(50 + 10 * (i % 2), 65 + 15 * (i % 2)) for i in range(200)],\n", " \"location\": [\"indoor\", \"outdoor\"] * 100,\n", " }\n", ")\n", "\n", "food_rating_data = pd.DataFrame(\n", " {\n", " \"cuisine\": [[\"Italian\", \"Mexican\", \"Chinese\"][i % 3] for i in range(100)],\n", " \"rating\": [random() * 4 + 0.5 * (i % 3) for i in range(100)],\n", " \"price\": [randint(10, 50) + 4 * (i % 3) for i in range(100)],\n", " \"wait\": [random() for i in range(100)],\n", " }\n", ")\n", "\n", "with gr.Blocks() as scatter_plots:\n", " with gr.Row():\n", " start = gr.DateTime(\"2021-01-01 00:00:00\", label=\"Start\")\n", " end = gr.DateTime(\"2021-01-05 00:00:00\", label=\"End\")\n", " apply_btn = gr.Button(\"Apply\", scale=0)\n", " with gr.Row():\n", " group_by = gr.Radio([\"None\", \"30m\", \"1h\", \"4h\", \"1d\"], value=\"None\", label=\"Group by\")\n", " aggregate = gr.Radio([\"sum\", \"mean\", \"median\", \"min\", \"max\"], value=\"sum\", label=\"Aggregation\")\n", "\n", " temp_by_time = gr.ScatterPlot(\n", " temp_sensor_data,\n", " x=\"time\",\n", " y=\"temperature\",\n", " )\n", " temp_by_time_location = gr.ScatterPlot(\n", " temp_sensor_data,\n", " x=\"time\",\n", " y=\"temperature\",\n", " color=\"location\",\n", " )\n", "\n", " time_graphs = [temp_by_time, temp_by_time_location]\n", " group_by.change(\n", " lambda group: [gr.ScatterPlot(x_bin=None if group == \"None\" else group)] * len(time_graphs),\n", " group_by,\n", " time_graphs\n", " )\n", " aggregate.change(\n", " lambda aggregate: [gr.ScatterPlot(y_aggregate=aggregate)] * len(time_graphs),\n", " aggregate,\n", " time_graphs\n", " )\n", "\n", " price_by_cuisine = gr.ScatterPlot(\n", " food_rating_data,\n", " x=\"cuisine\",\n", " y=\"price\",\n", " )\n", " with gr.Row():\n", " price_by_rating = gr.ScatterPlot(\n", " food_rating_data,\n", " x=\"rating\",\n", " y=\"price\",\n", " color=\"wait\",\n", " show_actions_button=True,\n", " )\n", " price_by_rating_color = gr.ScatterPlot(\n", " food_rating_data,\n", " x=\"rating\",\n", " y=\"price\",\n", " color=\"cuisine\",\n", " )\n", "\n", "if __name__ == \"__main__\":\n", " scatter_plots.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: scatter_plot_demo"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio pandas "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import pandas as pd\n", "from random import randint, random\n", "import gradio as gr\n", "\n", "\n", "temp_sensor_data = pd.DataFrame(\n", " {\n", " \"time\": pd.date_range(\"2021-01-01\", end=\"2021-01-05\", periods=200),\n", " \"temperature\": [randint(50 + 10 * (i % 2), 65 + 15 * (i % 2)) for i in range(200)],\n", " \"humidity\": [randint(50 + 10 * (i % 2), 65 + 15 * (i % 2)) for i in range(200)],\n", " \"location\": [\"indoor\", \"outdoor\"] * 100,\n", " }\n", ")\n", "\n", "food_rating_data = pd.DataFrame(\n", " {\n", " \"cuisine\": [[\"Italian\", \"Mexican\", \"Chinese\"][i % 3] for i in range(100)],\n", " \"rating\": [random() * 4 + 0.5 * (i % 3) for i in range(100)],\n", " \"price\": [randint(10, 50) + 4 * (i % 3) for i in range(100)],\n", " \"wait\": [random() for i in range(100)],\n", " }\n", ")\n", "\n", "with gr.Blocks() as scatter_plots:\n", " with gr.Row():\n", " start = gr.DateTime(\"2021-01-01 00:00:00\", label=\"Start\")\n", " end = gr.DateTime(\"2021-01-05 00:00:00\", label=\"End\")\n", " apply_btn = gr.Button(\"Apply\", scale=0)\n", " with gr.Row():\n", " group_by = gr.Radio([\"None\", \"30m\", \"1h\", \"4h\", \"1d\"], value=\"None\", label=\"Group by\")\n", " aggregate = gr.Radio([\"sum\", \"mean\", \"median\", \"min\", \"max\"], value=\"sum\", label=\"Aggregation\")\n", "\n", " temp_by_time = gr.ScatterPlot(\n", " temp_sensor_data,\n", " x=\"time\",\n", " y=\"temperature\",\n", " )\n", " temp_by_time_location = gr.ScatterPlot(\n", " temp_sensor_data,\n", " x=\"time\",\n", " y=\"temperature\",\n", " color=\"location\",\n", " )\n", "\n", " time_graphs = [temp_by_time, temp_by_time_location]\n", " group_by.change(\n", " lambda group: [gr.ScatterPlot(x_bin=None if group == \"None\" else group)] * len(time_graphs),\n", " group_by,\n", " time_graphs\n", " )\n", " aggregate.change(\n", " lambda aggregate: [gr.ScatterPlot(y_aggregate=aggregate)] * len(time_graphs),\n", " aggregate,\n", " time_graphs\n", " )\n", "\n", " price_by_cuisine = gr.ScatterPlot(\n", " food_rating_data,\n", " x=\"cuisine\",\n", " y=\"price\",\n", " )\n", " with gr.Row():\n", " price_by_rating = gr.ScatterPlot(\n", " food_rating_data,\n", " x=\"rating\",\n", " y=\"price\",\n", " color=\"wait\",\n", " show_actions_button=True,\n", " )\n", " price_by_rating_color = gr.ScatterPlot(\n", " food_rating_data,\n", " x=\"rating\",\n", " y=\"price\",\n", " color=\"cuisine\",\n", " )\n", "\n", "if __name__ == \"__main__\":\n", " scatter_plots.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/sepia_filter/requirements.txt b/demo/sepia_filter/requirements.txt new file mode 100644 index 0000000000000..24ce15ab7ead3 --- /dev/null +++ b/demo/sepia_filter/requirements.txt @@ -0,0 +1 @@ +numpy diff --git a/demo/sepia_filter/run.ipynb b/demo/sepia_filter/run.ipynb index 606ca44c21125..a8745ad77c107 100644 --- a/demo/sepia_filter/run.ipynb +++ b/demo/sepia_filter/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: sepia_filter"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import numpy as np\n", "import gradio as gr\n", "\n", "def sepia(input_img):\n", " sepia_filter = np.array([\n", " [0.393, 0.769, 0.189],\n", " [0.349, 0.686, 0.168],\n", " [0.272, 0.534, 0.131]\n", " ])\n", " sepia_img = input_img.dot(sepia_filter.T)\n", " sepia_img /= sepia_img.max()\n", " return sepia_img\n", "\n", "demo = gr.Interface(sepia, gr.Image(), \"image\")\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: sepia_filter"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import numpy as np\n", "import gradio as gr\n", "\n", "def sepia(input_img):\n", " sepia_filter = np.array([\n", " [0.393, 0.769, 0.189],\n", " [0.349, 0.686, 0.168],\n", " [0.272, 0.534, 0.131]\n", " ])\n", " sepia_img = input_img.dot(sepia_filter.T)\n", " sepia_img /= sepia_img.max()\n", " return sepia_img\n", "\n", "demo = gr.Interface(sepia, gr.Image(), \"image\")\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/simple_state/run.ipynb b/demo/simple_state/run.ipynb index 7d3e7943b78c5..39e90551f5e25 100644 --- a/demo/simple_state/run.ipynb +++ b/demo/simple_state/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: simple_state"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "with gr.Blocks() as demo:\n", " cart = gr.State([])\n", " items_to_add = gr.CheckboxGroup([\"Cereal\", \"Milk\", \"Orange Juice\", \"Water\"])\n", "\n", " def add_items(new_items, previous_cart):\n", " cart = previous_cart + new_items\n", " return cart\n", "\n", " gr.Button(\"Add Items\").click(add_items, [items_to_add, cart], cart)\n", "\n", " cart_size = gr.Number(label=\"Cart Size\")\n", " gr.Button(\"Get Cart Size\").click(lambda cart: len(cart), cart, cart_size)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: simple_state"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "with gr.Blocks() as demo:\n", " cart = gr.State([])\n", " items_to_add = gr.CheckboxGroup([\"Cereal\", \"Milk\", \"Orange Juice\", \"Water\"])\n", "\n", " def add_items(new_items, previous_cart):\n", " cart = previous_cart + new_items\n", " return cart\n", "\n", " gr.Button(\"Add Items\").click(add_items, [items_to_add, cart], cart)\n", "\n", " cart_size = gr.Number(label=\"Cart Size\")\n", " cart.change(lambda cart: len(cart), cart, cart_size)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/simple_state/run.py b/demo/simple_state/run.py index 4ab629a44a620..40836a8a65dfd 100644 --- a/demo/simple_state/run.py +++ b/demo/simple_state/run.py @@ -11,7 +11,7 @@ def add_items(new_items, previous_cart): gr.Button("Add Items").click(add_items, [items_to_add, cart], cart) cart_size = gr.Number(label="Cart Size") - gr.Button("Get Cart Size").click(lambda cart: len(cart), cart, cart_size) + cart.change(lambda cart: len(cart), cart, cart_size) if __name__ == "__main__": demo.launch() \ No newline at end of file diff --git a/demo/sine_curve/requirements.txt b/demo/sine_curve/requirements.txt index d42d0ad03bdf8..5c84b3c19f3fe 100644 --- a/demo/sine_curve/requirements.txt +++ b/demo/sine_curve/requirements.txt @@ -1 +1,2 @@ -plotly \ No newline at end of file +numpy +plotly diff --git a/demo/sine_curve/run.ipynb b/demo/sine_curve/run.ipynb index e9b8053904028..9b15230b75126 100644 --- a/demo/sine_curve/run.ipynb +++ b/demo/sine_curve/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: sine_curve"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio plotly"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import math\n", "import gradio as gr\n", "import plotly.express as px\n", "import numpy as np\n", "\n", "plot_end = 2 * math.pi\n", "\n", "def get_plot(period=1):\n", " global plot_end\n", " x = np.arange(plot_end - 2 * math.pi, plot_end, 0.02)\n", " y = np.sin(2*math.pi*period * x)\n", " fig = px.line(x=x, y=y)\n", " plot_end += 2 * math.pi\n", " if plot_end > 1000:\n", " plot_end = 2 * math.pi\n", " return fig\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " with gr.Column():\n", " gr.Markdown(\"Change the value of the slider to automatically update the plot\")\n", " period = gr.Slider(label=\"Period of plot\", value=1, minimum=0, maximum=10, step=1)\n", " plot = gr.Plot(label=\"Plot (updates every half second)\")\n", "\n", " dep = demo.load(get_plot, None, plot, every=1)\n", " period.change(get_plot, period, plot, every=1, cancels=[dep])\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: sine_curve"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy plotly "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import math\n", "import gradio as gr\n", "import plotly.express as px\n", "import numpy as np\n", "\n", "plot_end = 2 * math.pi\n", "\n", "def get_plot(period=1):\n", " global plot_end\n", " x = np.arange(plot_end - 2 * math.pi, plot_end, 0.02)\n", " y = np.sin(2*math.pi*period * x)\n", " fig = px.line(x=x, y=y)\n", " plot_end += 2 * math.pi\n", " if plot_end > 1000:\n", " plot_end = 2 * math.pi\n", " return fig\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " with gr.Column():\n", " gr.Markdown(\"Change the value of the slider to automatically update the plot\")\n", " period = gr.Slider(label=\"Period of plot\", value=1, minimum=0, maximum=10, step=1)\n", " plot = gr.Plot(label=\"Plot (updates every half second)\")\n", "\n", " dep = demo.load(get_plot, None, plot, every=1)\n", " period.change(get_plot, period, plot, every=1, cancels=[dep])\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/skip/run.ipynb b/demo/skip/run.ipynb new file mode 100644 index 0000000000000..23a083f79da5a --- /dev/null +++ b/demo/skip/run.ipynb @@ -0,0 +1 @@ +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: skip"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import random\n", "import gradio as gr\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " clear_button = gr.Button(\"Clear\")\n", " skip_button = gr.Button(\"Skip\")\n", " random_button = gr.Button(\"Random\")\n", " numbers = [gr.Number(), gr.Number()]\n", "\n", " clear_button.click(lambda : (None, None), outputs=numbers)\n", " skip_button.click(lambda : [gr.skip(), gr.skip()], outputs=numbers)\n", " random_button.click(lambda : (random.randint(0, 100), random.randint(0, 100)), outputs=numbers)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/skip/run.py b/demo/skip/run.py new file mode 100644 index 0000000000000..b43974b23e710 --- /dev/null +++ b/demo/skip/run.py @@ -0,0 +1,16 @@ +import random +import gradio as gr + +with gr.Blocks() as demo: + with gr.Row(): + clear_button = gr.Button("Clear") + skip_button = gr.Button("Skip") + random_button = gr.Button("Random") + numbers = [gr.Number(), gr.Number()] + + clear_button.click(lambda : (None, None), outputs=numbers) + skip_button.click(lambda : [gr.skip(), gr.skip()], outputs=numbers) + random_button.click(lambda : (random.randint(0, 100), random.randint(0, 100)), outputs=numbers) + +if __name__ == "__main__": + demo.launch() \ No newline at end of file diff --git a/demo/stable-diffusion/requirements.txt b/demo/stable-diffusion/requirements.txt index bb40240976b3d..432766cd6b34a 100644 --- a/demo/stable-diffusion/requirements.txt +++ b/demo/stable-diffusion/requirements.txt @@ -2,4 +2,5 @@ diffusers transformers nvidia-ml-py3 ftfy -torch \ No newline at end of file +torch +Pillow diff --git a/demo/stable-diffusion/run.ipynb b/demo/stable-diffusion/run.ipynb index 9b5296d6c8fd6..e27c0726af3e4 100644 --- a/demo/stable-diffusion/run.ipynb +++ b/demo/stable-diffusion/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: stable-diffusion\n", "### Note: This is a simplified version of the code needed to create the Stable Diffusion demo. See full code here: https://hf.co/spaces/stabilityai/stable-diffusion/tree/main\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio diffusers transformers nvidia-ml-py3 ftfy torch"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import torch\n", "from diffusers import StableDiffusionPipeline # type: ignore\n", "from PIL import Image\n", "import os\n", "\n", "auth_token = os.getenv(\"auth_token\")\n", "model_id = \"CompVis/stable-diffusion-v1-4\"\n", "device = \"cpu\"\n", "pipe = StableDiffusionPipeline.from_pretrained(\n", " model_id, use_auth_token=auth_token, revision=\"fp16\", torch_dtype=torch.float16\n", ")\n", "pipe = pipe.to(device)\n", "\n", "def infer(prompt, samples, steps, scale, seed):\n", " generator = torch.Generator(device=device).manual_seed(seed)\n", " images_list = pipe( # type: ignore\n", " [prompt] * samples,\n", " num_inference_steps=steps,\n", " guidance_scale=scale,\n", " generator=generator,\n", " )\n", " images = []\n", " safe_image = Image.open(r\"unsafe.png\")\n", " for i, image in enumerate(images_list[\"sample\"]): # type: ignore\n", " if images_list[\"nsfw_content_detected\"][i]: # type: ignore\n", " images.append(safe_image)\n", " else:\n", " images.append(image)\n", " return images\n", "\n", "block = gr.Blocks()\n", "\n", "with block:\n", " with gr.Group():\n", " with gr.Row():\n", " text = gr.Textbox(\n", " label=\"Enter your prompt\",\n", " max_lines=1,\n", " placeholder=\"Enter your prompt\",\n", " container=False,\n", " )\n", " btn = gr.Button(\"Generate image\")\n", " gallery = gr.Gallery(\n", " label=\"Generated images\",\n", " show_label=False,\n", " elem_id=\"gallery\",\n", " columns=[2],\n", " )\n", "\n", " advanced_button = gr.Button(\"Advanced options\", elem_id=\"advanced-btn\")\n", "\n", " with gr.Row(elem_id=\"advanced-options\"):\n", " samples = gr.Slider(label=\"Images\", minimum=1, maximum=4, value=4, step=1)\n", " steps = gr.Slider(label=\"Steps\", minimum=1, maximum=50, value=45, step=1)\n", " scale = gr.Slider(\n", " label=\"Guidance Scale\", minimum=0, maximum=50, value=7.5, step=0.1\n", " )\n", " seed = gr.Slider(\n", " label=\"Seed\",\n", " minimum=0,\n", " maximum=2147483647,\n", " step=1,\n", " randomize=True,\n", " )\n", " gr.on([text.submit, btn.click], infer, inputs=[text, samples, steps, scale, seed], outputs=gallery)\n", " advanced_button.click(\n", " None,\n", " [],\n", " text,\n", " )\n", "\n", "block.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: stable-diffusion\n", "### Note: This is a simplified version of the code needed to create the Stable Diffusion demo. See full code here: https://hf.co/spaces/stabilityai/stable-diffusion/tree/main\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio diffusers transformers nvidia-ml-py3 ftfy torch Pillow "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import torch\n", "from diffusers import StableDiffusionPipeline # type: ignore\n", "from PIL import Image\n", "import os\n", "\n", "auth_token = os.getenv(\"auth_token\")\n", "model_id = \"CompVis/stable-diffusion-v1-4\"\n", "device = \"cpu\"\n", "pipe = StableDiffusionPipeline.from_pretrained(\n", " model_id, use_auth_token=auth_token, revision=\"fp16\", torch_dtype=torch.float16\n", ")\n", "pipe = pipe.to(device)\n", "\n", "def infer(prompt, samples, steps, scale, seed):\n", " generator = torch.Generator(device=device).manual_seed(seed)\n", " images_list = pipe( # type: ignore\n", " [prompt] * samples,\n", " num_inference_steps=steps,\n", " guidance_scale=scale,\n", " generator=generator,\n", " )\n", " images = []\n", " safe_image = Image.open(r\"unsafe.png\")\n", " for i, image in enumerate(images_list[\"sample\"]): # type: ignore\n", " if images_list[\"nsfw_content_detected\"][i]: # type: ignore\n", " images.append(safe_image)\n", " else:\n", " images.append(image)\n", " return images\n", "\n", "block = gr.Blocks()\n", "\n", "with block:\n", " with gr.Group():\n", " with gr.Row():\n", " text = gr.Textbox(\n", " label=\"Enter your prompt\",\n", " max_lines=1,\n", " placeholder=\"Enter your prompt\",\n", " container=False,\n", " )\n", " btn = gr.Button(\"Generate image\")\n", " gallery = gr.Gallery(\n", " label=\"Generated images\",\n", " show_label=False,\n", " elem_id=\"gallery\",\n", " columns=[2],\n", " )\n", "\n", " advanced_button = gr.Button(\"Advanced options\", elem_id=\"advanced-btn\")\n", "\n", " with gr.Row(elem_id=\"advanced-options\"):\n", " samples = gr.Slider(label=\"Images\", minimum=1, maximum=4, value=4, step=1)\n", " steps = gr.Slider(label=\"Steps\", minimum=1, maximum=50, value=45, step=1)\n", " scale = gr.Slider(\n", " label=\"Guidance Scale\", minimum=0, maximum=50, value=7.5, step=0.1\n", " )\n", " seed = gr.Slider(\n", " label=\"Seed\",\n", " minimum=0,\n", " maximum=2147483647,\n", " step=1,\n", " randomize=True,\n", " )\n", " gr.on([text.submit, btn.click], infer, inputs=[text, samples, steps, scale, seed], outputs=gallery)\n", " advanced_button.click(\n", " None,\n", " [],\n", " text,\n", " )\n", "\n", "block.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/state_change/run.ipynb b/demo/state_change/run.ipynb index 8b4d611579adb..b7d5eeff63502 100644 --- a/demo/state_change/run.ipynb +++ b/demo/state_change/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: state_change"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "with gr.Blocks() as demo:\n", "\n", " with gr.Row():\n", " state_a = gr.State(0)\n", " btn_a = gr.Button(\"Increment A\")\n", " value_a = gr.Number(label=\"Number A\")\n", " btn_a.click(lambda x: x + 1, state_a, state_a)\n", " state_a.change(lambda x: x, state_a, value_a)\n", " with gr.Row():\n", " state_b = gr.State(0)\n", " btn_b = gr.Button(\"Increment B\")\n", " value_b = gr.Number(label=\"Number B\")\n", " btn_b.click(lambda x: x + 1, state_b, state_b)\n", "\n", " @gr.on(inputs=state_b, outputs=value_b)\n", " def identity(x):\n", " return x\n", "\n", " @gr.render(inputs=[state_a, state_b])\n", " def render(a, b):\n", " for x in range(a):\n", " with gr.Row():\n", " for y in range(b):\n", " gr.Button(f\"Button {x}, {y}\")\n", "\n", " list_state = gr.State([])\n", " dict_state = gr.State(dict())\n", " nested_list_state = gr.State([])\n", " set_state = gr.State(set())\n", "\n", " def transform_list(x):\n", " return {n: n for n in x}, [x[:] for _ in range(len(x))], set(x)\n", "\n", " list_state.change(\n", " transform_list,\n", " inputs=list_state,\n", " outputs=[dict_state, nested_list_state, set_state],\n", " )\n", "\n", " all_textbox = gr.Textbox(label=\"Output\")\n", " click_count = gr.Number(label=\"Clicks\")\n", " change_count = gr.Number(label=\"Changes\")\n", " gr.on(\n", " inputs=[change_count, dict_state, nested_list_state, set_state],\n", " triggers=[dict_state.change, nested_list_state.change, set_state.change],\n", " fn=lambda x, *args: (x + 1, \"\\n\".join(str(arg) for arg in args)),\n", " outputs=[change_count, all_textbox],\n", " )\n", "\n", " count_to_3_btn = gr.Button(\"Count to 3\")\n", " count_to_3_btn.click(lambda: [1, 2, 3], outputs=list_state)\n", " zero_all_btn = gr.Button(\"Zero All\")\n", " zero_all_btn.click(lambda x: [0] * len(x), inputs=list_state, outputs=list_state)\n", "\n", " gr.on(\n", " [count_to_3_btn.click, zero_all_btn.click],\n", " lambda x: x + 1,\n", " click_count,\n", " click_count,\n", " )\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: state_change"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "with gr.Blocks() as demo:\n", "\n", " with gr.Row():\n", " state_a = gr.State(0)\n", " btn_a = gr.Button(\"Increment A\")\n", " value_a = gr.Number(label=\"Number A\")\n", " btn_a.click(lambda x: x + 1, state_a, state_a)\n", " state_a.change(lambda x: x, state_a, value_a)\n", " with gr.Row():\n", " state_b = gr.State(0)\n", " btn_b = gr.Button(\"Increment B\")\n", " value_b = gr.Number(label=\"Number B\")\n", " btn_b.click(lambda x: x + 1, state_b, state_b)\n", "\n", " @gr.on(inputs=state_b, outputs=value_b)\n", " def identity(x):\n", " return x\n", "\n", " @gr.render(inputs=[state_a, state_b])\n", " def render(a, b):\n", " for x in range(a):\n", " with gr.Row():\n", " for y in range(b):\n", " gr.Button(f\"Button {x}, {y}\")\n", "\n", " list_state = gr.State([])\n", " dict_state = gr.State(dict())\n", " nested_list_state = gr.State([])\n", " set_state = gr.State(set())\n", "\n", " def transform_list(x):\n", " return {n: n for n in x}, [x[:] for _ in range(len(x))], set(x)\n", "\n", " list_state.change(\n", " transform_list,\n", " inputs=list_state,\n", " outputs=[dict_state, nested_list_state, set_state],\n", " )\n", "\n", " all_textbox = gr.Textbox(label=\"Output\")\n", " click_count = gr.Number(label=\"Clicks\")\n", " change_count = gr.Number(label=\"Changes\")\n", " gr.on(\n", " inputs=[change_count, dict_state, nested_list_state, set_state],\n", " triggers=[dict_state.change, nested_list_state.change, set_state.change],\n", " fn=lambda x, *args: (x + 1, \"\\n\".join(str(arg) for arg in args)),\n", " outputs=[change_count, all_textbox],\n", " )\n", "\n", " count_to_3_btn = gr.Button(\"Count to 3\")\n", " count_to_3_btn.click(lambda: [1, 2, 3], outputs=list_state)\n", " zero_all_btn = gr.Button(\"Zero All\")\n", " zero_all_btn.click(lambda x: [0] * len(x), inputs=list_state, outputs=list_state)\n", "\n", " gr.on(\n", " [count_to_3_btn.click, zero_all_btn.click],\n", " lambda x: x + 1,\n", " click_count,\n", " click_count,\n", " )\n", "\n", " async def increment(x):\n", " yield x + 1\n", "\n", " n_text = gr.State(0)\n", " add_btn = gr.Button(\"Iterator State Change\")\n", " add_btn.click(increment, n_text, n_text)\n", "\n", " @gr.render(inputs=n_text)\n", " def render_count(count):\n", " for i in range(int(count)):\n", " gr.Markdown(value = f\"Success Box {i} added\", key=i)\n", " \n", " class CustomState():\n", " def __init__(self, val):\n", " self.val = val\n", "\n", " def __hash__(self) -> int:\n", " return self.val\n", "\n", " custom_state = gr.State(CustomState(5))\n", " with gr.Row():\n", " btn_10 = gr.Button(\"Set State to 10\")\n", " custom_changes = gr.Number(0, label=\"Custom State Changes\")\n", " custom_clicks = gr.Number(0, label=\"Custom State Clicks\")\n", "\n", " custom_state.change(increment, custom_changes, custom_changes)\n", " def set_to_10(cs: CustomState):\n", " cs.val = 10\n", " return cs\n", "\n", " btn_10.click(set_to_10, custom_state, custom_state).then(\n", " increment, custom_clicks, custom_clicks\n", " )\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/state_change/run.py b/demo/state_change/run.py index 2846a38ce51c6..344e773a87b2d 100644 --- a/demo/state_change/run.py +++ b/demo/state_change/run.py @@ -61,5 +61,39 @@ def transform_list(x): click_count, ) + async def increment(x): + yield x + 1 + + n_text = gr.State(0) + add_btn = gr.Button("Iterator State Change") + add_btn.click(increment, n_text, n_text) + + @gr.render(inputs=n_text) + def render_count(count): + for i in range(int(count)): + gr.Markdown(value = f"Success Box {i} added", key=i) + + class CustomState(): + def __init__(self, val): + self.val = val + + def __hash__(self) -> int: + return self.val + + custom_state = gr.State(CustomState(5)) + with gr.Row(): + btn_10 = gr.Button("Set State to 10") + custom_changes = gr.Number(0, label="Custom State Changes") + custom_clicks = gr.Number(0, label="Custom State Clicks") + + custom_state.change(increment, custom_changes, custom_changes) + def set_to_10(cs: CustomState): + cs.val = 10 + return cs + + btn_10.click(set_to_10, custom_state, custom_state).then( + increment, custom_clicks, custom_clicks + ) + if __name__ == "__main__": demo.launch() diff --git a/demo/state_cleanup/requirements.txt b/demo/state_cleanup/requirements.txt new file mode 100644 index 0000000000000..0d59398ea6928 --- /dev/null +++ b/demo/state_cleanup/requirements.txt @@ -0,0 +1,2 @@ +numpy +Pillow diff --git a/demo/state_cleanup/run.ipynb b/demo/state_cleanup/run.ipynb index 147e03b74e882..749543afe72b8 100644 --- a/demo/state_cleanup/run.ipynb +++ b/demo/state_cleanup/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: state_cleanup"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["from __future__ import annotations\n", "import gradio as gr\n", "import numpy as np\n", "from PIL import Image\n", "from pathlib import Path\n", "import secrets\n", "import shutil\n", "\n", "current_dir = Path(__file__).parent\n", "\n", "def generate_random_img(history: list[Image.Image], request: gr.Request):\n", " \"\"\"Generate a random red, green, blue, orange, yellor or purple image.\"\"\"\n", " colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 165, 0), (255, 255, 0), (128, 0, 128)]\n", " color = colors[np.random.randint(0, len(colors))]\n", " img = Image.new('RGB', (100, 100), color)\n", "\n", " user_dir: Path = current_dir / str(request.session_hash)\n", " user_dir.mkdir(exist_ok=True)\n", " path = user_dir / f\"{secrets.token_urlsafe(8)}.webp\"\n", "\n", " img.save(path)\n", " history.append(img)\n", "\n", " return img, history, history\n", "\n", "def delete_directory(req: gr.Request):\n", " if not req.username:\n", " return\n", " user_dir: Path = current_dir / req.username\n", " shutil.rmtree(str(user_dir))\n", "\n", "with gr.Blocks(delete_cache=(60, 3600)) as demo:\n", " gr.Markdown(\"\"\"# State Cleanup Demo\n", " \ud83d\uddbc\ufe0f Images are saved in a user-specific directory and deleted when the users closes the page via demo.unload.\n", " \"\"\")\n", " with gr.Row():\n", " with gr.Column(scale=1):\n", " with gr.Row():\n", " img = gr.Image(label=\"Generated Image\", height=300, width=300)\n", " with gr.Row():\n", " gen = gr.Button(value=\"Generate\")\n", " with gr.Row():\n", " history = gr.Gallery(label=\"Previous Generations\", height=500, columns=10)\n", " state = gr.State(value=[], delete_callback=lambda v: print(\"STATE DELETED\"))\n", "\n", " demo.load(generate_random_img, [state], [img, state, history])\n", " gen.click(generate_random_img, [state], [img, state, history])\n", " demo.unload(delete_directory)\n", "\n", "demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: state_cleanup"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy Pillow "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["from __future__ import annotations\n", "import gradio as gr\n", "import numpy as np\n", "from PIL import Image\n", "from pathlib import Path\n", "import secrets\n", "import shutil\n", "\n", "current_dir = Path(__file__).parent\n", "\n", "def generate_random_img(history: list[Image.Image], request: gr.Request):\n", " \"\"\"Generate a random red, green, blue, orange, yellor or purple image.\"\"\"\n", " colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 165, 0), (255, 255, 0), (128, 0, 128)]\n", " color = colors[np.random.randint(0, len(colors))]\n", " img = Image.new('RGB', (100, 100), color)\n", "\n", " user_dir: Path = current_dir / str(request.session_hash)\n", " user_dir.mkdir(exist_ok=True)\n", " path = user_dir / f\"{secrets.token_urlsafe(8)}.webp\"\n", "\n", " img.save(path)\n", " history.append(img)\n", "\n", " return img, history, history\n", "\n", "def delete_directory(req: gr.Request):\n", " if not req.username:\n", " return\n", " user_dir: Path = current_dir / req.username\n", " shutil.rmtree(str(user_dir))\n", "\n", "with gr.Blocks(delete_cache=(60, 3600)) as demo:\n", " gr.Markdown(\"\"\"# State Cleanup Demo\n", " \ud83d\uddbc\ufe0f Images are saved in a user-specific directory and deleted when the users closes the page via demo.unload.\n", " \"\"\")\n", " with gr.Row():\n", " with gr.Column(scale=1):\n", " with gr.Row():\n", " img = gr.Image(label=\"Generated Image\", height=300, width=300)\n", " with gr.Row():\n", " gen = gr.Button(value=\"Generate\")\n", " with gr.Row():\n", " history = gr.Gallery(label=\"Previous Generations\", height=500, columns=10)\n", " state = gr.State(value=[], delete_callback=lambda v: print(\"STATE DELETED\"))\n", "\n", " demo.load(generate_random_img, [state], [img, state, history])\n", " gen.click(generate_random_img, [state], [img, state, history])\n", " demo.unload(delete_directory)\n", "\n", "demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/stream_audio/requirements.txt b/demo/stream_audio/requirements.txt new file mode 100644 index 0000000000000..24ce15ab7ead3 --- /dev/null +++ b/demo/stream_audio/requirements.txt @@ -0,0 +1 @@ +numpy diff --git a/demo/stream_audio/run.ipynb b/demo/stream_audio/run.ipynb index 177973a133c6b..14e678816da1e 100644 --- a/demo/stream_audio/run.ipynb +++ b/demo/stream_audio/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: stream_audio"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import numpy as np\n", "import time\n", "\n", "def add_to_stream(audio, instream):\n", " time.sleep(1)\n", " if audio is None:\n", " return gr.Audio(), instream\n", " if instream is None:\n", " ret = audio\n", " else:\n", " ret = (audio[0], np.concatenate((instream[1], audio[1])))\n", " return ret, ret\n", "\n", "with gr.Blocks() as demo:\n", " inp = gr.Audio(sources=[\"microphone\"])\n", " out = gr.Audio()\n", " stream = gr.State()\n", " clear = gr.Button(\"Clear\")\n", "\n", " inp.stream(add_to_stream, [inp, stream], [out, stream])\n", " clear.click(lambda: [None, None, None], None, [inp, out, stream])\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: stream_audio"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import numpy as np\n", "import time\n", "\n", "def add_to_stream(audio, instream):\n", " time.sleep(1)\n", " if audio is None:\n", " return gr.Audio(), instream\n", " if instream is None:\n", " ret = audio\n", " else:\n", " ret = (audio[0], np.concatenate((instream[1], audio[1])))\n", " return ret, ret\n", "\n", "with gr.Blocks() as demo:\n", " inp = gr.Audio(sources=[\"microphone\"])\n", " out = gr.Audio()\n", " stream = gr.State()\n", " clear = gr.Button(\"Clear\")\n", "\n", " inp.stream(add_to_stream, [inp, stream], [out, stream])\n", " clear.click(lambda: [None, None, None], None, [inp, out, stream])\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/stream_audio_out/run.ipynb b/demo/stream_audio_out/run.ipynb index 94765656a34f7..b486a488cb7b9 100644 --- a/demo/stream_audio_out/run.ipynb +++ b/demo/stream_audio_out/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: stream_audio_out"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('audio')\n", "!wget -q -O audio/cantina.wav https://github.com/gradio-app/gradio/raw/main/demo/stream_audio_out/audio/cantina.wav"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from pydub import AudioSegment\n", "from time import sleep\n", "\n", "with gr.Blocks() as demo:\n", " input_audio = gr.Audio(label=\"Input Audio\", type=\"filepath\", format=\"mp3\")\n", " with gr.Row():\n", " with gr.Column():\n", " stream_as_file_btn = gr.Button(\"Stream as File\")\n", " format = gr.Radio([\"wav\", \"mp3\"], value=\"wav\", label=\"Format\")\n", " stream_as_file_output = gr.Audio(streaming=True)\n", "\n", " def stream_file(audio_file, format):\n", " audio = AudioSegment.from_file(audio_file)\n", " i = 0\n", " chunk_size = 1000\n", " while chunk_size * i < len(audio):\n", " chunk = audio[chunk_size * i : chunk_size * (i + 1)]\n", " i += 1\n", " if chunk:\n", " file = f\"/tmp/{i}.{format}\"\n", " chunk.export(file, format=format)\n", " yield file\n", " sleep(0.5)\n", "\n", " stream_as_file_btn.click(\n", " stream_file, [input_audio, format], stream_as_file_output\n", " )\n", "\n", " gr.Examples(\n", " [[\"audio/cantina.wav\", \"wav\"], [\"audio/cantina.wav\", \"mp3\"]],\n", " [input_audio, format],\n", " fn=stream_file,\n", " outputs=stream_as_file_output,\n", " )\n", "\n", " with gr.Column():\n", " stream_as_bytes_btn = gr.Button(\"Stream as Bytes\")\n", " stream_as_bytes_output = gr.Audio(streaming=True)\n", "\n", " def stream_bytes(audio_file):\n", " chunk_size = 20_000\n", " with open(audio_file, \"rb\") as f:\n", " while True:\n", " chunk = f.read(chunk_size)\n", " if chunk:\n", " yield chunk\n", " sleep(1)\n", " else:\n", " break\n", " stream_as_bytes_btn.click(stream_bytes, input_audio, stream_as_bytes_output)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: stream_audio_out"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('audio')\n", "!wget -q -O audio/cantina.wav https://github.com/gradio-app/gradio/raw/main/demo/stream_audio_out/audio/cantina.wav"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from pydub import AudioSegment\n", "from time import sleep\n", "import os\n", "import tempfile\n", "from pathlib import Path\n", "\n", "with gr.Blocks() as demo:\n", " input_audio = gr.Audio(label=\"Input Audio\", type=\"filepath\", format=\"mp3\")\n", " with gr.Row():\n", " with gr.Column():\n", " stream_as_file_btn = gr.Button(\"Stream as File\")\n", " format = gr.Radio([\"wav\", \"mp3\"], value=\"wav\", label=\"Format\")\n", " stream_as_file_output = gr.Audio(streaming=True, elem_id=\"stream_as_file_output\", autoplay=True, visible=False)\n", "\n", " def stream_file(audio_file, format):\n", " audio = AudioSegment.from_file(audio_file)\n", " i = 0\n", " chunk_size = 1000\n", " while chunk_size * i < len(audio):\n", " chunk = audio[chunk_size * i : chunk_size * (i + 1)]\n", " i += 1\n", " if chunk:\n", " file = Path(tempfile.gettempdir()) / \"stream_audio_demo\" / f\"{i}.{format}\"\n", " file.parent.mkdir(parents=True, exist_ok=True)\n", " chunk.export(str(file), format=format)\n", " yield file\n", " sleep(0.5)\n", "\n", " stream_as_file_btn.click(\n", " stream_file, [input_audio, format], stream_as_file_output\n", " )\n", "\n", " gr.Examples(\n", " [[os.path.join(os.path.abspath(''), \"audio/cantina.wav\"), \"wav\"],\n", " [os.path.join(os.path.abspath(''), \"audio/cantina.wav\"), \"mp3\"]],\n", " [input_audio, format],\n", " fn=stream_file,\n", " outputs=stream_as_file_output,\n", " cache_examples=False,\n", " )\n", "\n", " with gr.Column():\n", " stream_as_bytes_btn = gr.Button(\"Stream as Bytes\")\n", " stream_as_bytes_output = gr.Audio(streaming=True, elem_id=\"stream_as_bytes_output\", autoplay=True)\n", "\n", " def stream_bytes(audio_file):\n", " chunk_size = 20_000\n", " with open(audio_file, \"rb\") as f:\n", " while True:\n", " chunk = f.read(chunk_size)\n", " if chunk:\n", " yield chunk\n", " sleep(1)\n", " else:\n", " break\n", " stream_as_bytes_btn.click(stream_bytes, input_audio, stream_as_bytes_output)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/stream_audio_out/run.py b/demo/stream_audio_out/run.py index 9b348c532fcb6..a21dab49372fe 100644 --- a/demo/stream_audio_out/run.py +++ b/demo/stream_audio_out/run.py @@ -1,6 +1,9 @@ import gradio as gr from pydub import AudioSegment from time import sleep +import os +import tempfile +from pathlib import Path with gr.Blocks() as demo: input_audio = gr.Audio(label="Input Audio", type="filepath", format="mp3") @@ -8,7 +11,7 @@ with gr.Column(): stream_as_file_btn = gr.Button("Stream as File") format = gr.Radio(["wav", "mp3"], value="wav", label="Format") - stream_as_file_output = gr.Audio(streaming=True) + stream_as_file_output = gr.Audio(streaming=True, elem_id="stream_as_file_output", autoplay=True, visible=False) def stream_file(audio_file, format): audio = AudioSegment.from_file(audio_file) @@ -18,8 +21,9 @@ def stream_file(audio_file, format): chunk = audio[chunk_size * i : chunk_size * (i + 1)] i += 1 if chunk: - file = f"/tmp/{i}.{format}" - chunk.export(file, format=format) + file = Path(tempfile.gettempdir()) / "stream_audio_demo" / f"{i}.{format}" + file.parent.mkdir(parents=True, exist_ok=True) + chunk.export(str(file), format=format) yield file sleep(0.5) @@ -28,15 +32,17 @@ def stream_file(audio_file, format): ) gr.Examples( - [["audio/cantina.wav", "wav"], ["audio/cantina.wav", "mp3"]], + [[os.path.join(os.path.dirname(__file__), "audio/cantina.wav"), "wav"], + [os.path.join(os.path.dirname(__file__), "audio/cantina.wav"), "mp3"]], [input_audio, format], fn=stream_file, outputs=stream_as_file_output, + cache_examples=False, ) with gr.Column(): stream_as_bytes_btn = gr.Button("Stream as Bytes") - stream_as_bytes_output = gr.Audio(streaming=True) + stream_as_bytes_output = gr.Audio(streaming=True, elem_id="stream_as_bytes_output", autoplay=True) def stream_bytes(audio_file): chunk_size = 20_000 diff --git a/demo/stream_frames/requirements.txt b/demo/stream_frames/requirements.txt new file mode 100644 index 0000000000000..24ce15ab7ead3 --- /dev/null +++ b/demo/stream_frames/requirements.txt @@ -0,0 +1 @@ +numpy diff --git a/demo/stream_frames/run.ipynb b/demo/stream_frames/run.ipynb index 1fab7111bf62b..15b2efc534de4 100644 --- a/demo/stream_frames/run.ipynb +++ b/demo/stream_frames/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: stream_frames"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import numpy as np\n", "\n", "def flip(im):\n", " return np.flipud(im)\n", "\n", "demo = gr.Interface(\n", " flip,\n", " gr.Image(sources=[\"webcam\"], streaming=True),\n", " \"image\",\n", " live=True\n", ")\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: stream_frames"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import numpy as np\n", "\n", "def flip(im):\n", " return np.flipud(im)\n", "\n", "demo = gr.Interface(\n", " flip,\n", " gr.Image(sources=[\"webcam\"], streaming=True),\n", " \"image\",\n", " live=True\n", ")\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/stream_video_out/run.ipynb b/demo/stream_video_out/run.ipynb new file mode 100644 index 0000000000000..10c88f79cef29 --- /dev/null +++ b/demo/stream_video_out/run.ipynb @@ -0,0 +1 @@ +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: stream_video_out"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio opencv-python"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('video')\n", "!wget -q -O video/compliment_bot_screen_recording_3x.mp4 https://github.com/gradio-app/gradio/raw/main/demo/stream_video_out/video/compliment_bot_screen_recording_3x.mp4"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import cv2\n", "import os\n", "from pathlib import Path\n", "import atexit\n", "\n", "current_dir = Path(__file__).resolve().parent\n", "\n", "\n", "def delete_files():\n", " for p in Path(current_dir).glob(\"*.ts\"):\n", " p.unlink()\n", " for p in Path(current_dir).glob(\"*.mp4\"):\n", " p.unlink()\n", "\n", "\n", "atexit.register(delete_files)\n", "\n", "\n", "def process_video(input_video, stream_as_mp4):\n", " cap = cv2.VideoCapture(input_video)\n", "\n", " video_codec = cv2.VideoWriter_fourcc(*\"mp4v\") if stream_as_mp4 else cv2.VideoWriter_fourcc(*\"x264\") # type: ignore\n", " fps = int(cap.get(cv2.CAP_PROP_FPS))\n", " width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n", " height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n", "\n", " iterating, frame = cap.read()\n", "\n", " n_frames = 0\n", " n_chunks = 0\n", " name = str(current_dir / f\"output_{n_chunks}{'.mp4' if stream_as_mp4 else '.ts'}\")\n", " segment_file = cv2.VideoWriter(name, video_codec, fps, (width, height)) # type: ignore\n", "\n", " while iterating:\n", "\n", " # flip frame vertically\n", " frame = cv2.flip(frame, 0)\n", " display_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n", " segment_file.write(display_frame)\n", " n_frames += 1\n", " if n_frames == 3 * fps:\n", " n_chunks += 1\n", " segment_file.release()\n", " n_frames = 0\n", " yield name\n", " name = str(\n", " current_dir / f\"output_{n_chunks}{'.mp4' if stream_as_mp4 else '.ts'}\"\n", " )\n", " segment_file = cv2.VideoWriter(name, video_codec, fps, (width, height)) # type: ignore\n", "\n", " iterating, frame = cap.read()\n", "\n", " segment_file.release()\n", " yield name\n", "\n", "\n", "with gr.Blocks() as demo:\n", " gr.Markdown(\"# Video Streaming Out \ud83d\udcf9\")\n", " with gr.Row():\n", " with gr.Column():\n", " input_video = gr.Video(label=\"input\")\n", " checkbox = gr.Checkbox(label=\"Stream as MP4 file?\", value=False)\n", " with gr.Column():\n", " processed_frames = gr.Video(\n", " label=\"stream\",\n", " streaming=True,\n", " autoplay=True,\n", " elem_id=\"stream_video_output\",\n", " )\n", " with gr.Row():\n", " process_video_btn = gr.Button(\"process video\")\n", "\n", " process_video_btn.click(process_video, [input_video, checkbox], [processed_frames])\n", "\n", " gr.Examples(\n", " [\n", " [\n", " os.path.join(\n", " os.path.abspath(''),\n", " \"video/compliment_bot_screen_recording_3x.mp4\",\n", " ),\n", " False,\n", " ],\n", " [\n", " os.path.join(\n", " os.path.abspath(''),\n", " \"video/compliment_bot_screen_recording_3x.mp4\",\n", " ),\n", " True,\n", " ],\n", " ],\n", " [input_video, checkbox],\n", " fn=process_video,\n", " outputs=processed_frames,\n", " cache_examples=False,\n", " )\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/stream_video_out/run.py b/demo/stream_video_out/run.py new file mode 100644 index 0000000000000..0b573749ddc8b --- /dev/null +++ b/demo/stream_video_out/run.py @@ -0,0 +1,101 @@ +import gradio as gr +import cv2 +import os +from pathlib import Path +import atexit + +current_dir = Path(__file__).resolve().parent + + +def delete_files(): + for p in Path(current_dir).glob("*.ts"): + p.unlink() + for p in Path(current_dir).glob("*.mp4"): + p.unlink() + + +atexit.register(delete_files) + + +def process_video(input_video, stream_as_mp4): + cap = cv2.VideoCapture(input_video) + + video_codec = cv2.VideoWriter_fourcc(*"mp4v") if stream_as_mp4 else cv2.VideoWriter_fourcc(*"x264") # type: ignore + fps = int(cap.get(cv2.CAP_PROP_FPS)) + width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + + iterating, frame = cap.read() + + n_frames = 0 + n_chunks = 0 + name = str(current_dir / f"output_{n_chunks}{'.mp4' if stream_as_mp4 else '.ts'}") + segment_file = cv2.VideoWriter(name, video_codec, fps, (width, height)) # type: ignore + + while iterating: + + # flip frame vertically + frame = cv2.flip(frame, 0) + display_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + segment_file.write(display_frame) + n_frames += 1 + if n_frames == 3 * fps: + n_chunks += 1 + segment_file.release() + n_frames = 0 + yield name + name = str( + current_dir / f"output_{n_chunks}{'.mp4' if stream_as_mp4 else '.ts'}" + ) + segment_file = cv2.VideoWriter(name, video_codec, fps, (width, height)) # type: ignore + + iterating, frame = cap.read() + + segment_file.release() + yield name + + +with gr.Blocks() as demo: + gr.Markdown("# Video Streaming Out 📹") + with gr.Row(): + with gr.Column(): + input_video = gr.Video(label="input") + checkbox = gr.Checkbox(label="Stream as MP4 file?", value=False) + with gr.Column(): + processed_frames = gr.Video( + label="stream", + streaming=True, + autoplay=True, + elem_id="stream_video_output", + ) + with gr.Row(): + process_video_btn = gr.Button("process video") + + process_video_btn.click(process_video, [input_video, checkbox], [processed_frames]) + + gr.Examples( + [ + [ + os.path.join( + os.path.dirname(__file__), + "video/compliment_bot_screen_recording_3x.mp4", + ), + False, + ], + [ + os.path.join( + os.path.dirname(__file__), + "video/compliment_bot_screen_recording_3x.mp4", + ), + True, + ], + ], + [input_video, checkbox], + fn=process_video, + outputs=processed_frames, + cache_examples=False, + ) + + +if __name__ == "__main__": + demo.launch() diff --git a/demo/stream_video_out/video/compliment_bot_screen_recording_3x.mp4 b/demo/stream_video_out/video/compliment_bot_screen_recording_3x.mp4 new file mode 100644 index 0000000000000..7a7395bf43b40 Binary files /dev/null and b/demo/stream_video_out/video/compliment_bot_screen_recording_3x.mp4 differ diff --git a/demo/streaming_filter/requirements.txt b/demo/streaming_filter/requirements.txt new file mode 100644 index 0000000000000..b96544bd0b2a3 --- /dev/null +++ b/demo/streaming_filter/requirements.txt @@ -0,0 +1,2 @@ +opencv-python +numpy diff --git a/demo/streaming_filter/run.ipynb b/demo/streaming_filter/run.ipynb new file mode 100644 index 0000000000000..161dde8a85988 --- /dev/null +++ b/demo/streaming_filter/run.ipynb @@ -0,0 +1 @@ +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: streaming_filter"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio opencv-python numpy "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import numpy as np\n", "import cv2\n", "\n", "def transform_cv2(frame, transform):\n", " if transform == \"cartoon\":\n", " # prepare color\n", " img_color = cv2.pyrDown(cv2.pyrDown(frame))\n", " for _ in range(6):\n", " img_color = cv2.bilateralFilter(img_color, 9, 9, 7)\n", " img_color = cv2.pyrUp(cv2.pyrUp(img_color))\n", "\n", " # prepare edges\n", " img_edges = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\n", " img_edges = cv2.adaptiveThreshold(\n", " cv2.medianBlur(img_edges, 7),\n", " 255,\n", " cv2.ADAPTIVE_THRESH_MEAN_C,\n", " cv2.THRESH_BINARY,\n", " 9,\n", " 2,\n", " )\n", " img_edges = cv2.cvtColor(img_edges, cv2.COLOR_GRAY2RGB)\n", " # combine color and edges\n", " img = cv2.bitwise_and(img_color, img_edges)\n", " return img\n", " elif transform == \"edges\":\n", " # perform edge detection\n", " img = cv2.cvtColor(cv2.Canny(frame, 100, 200), cv2.COLOR_GRAY2BGR)\n", " return img\n", " else:\n", " return np.flipud(frame)\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " with gr.Column():\n", " transform = gr.Dropdown(choices=[\"cartoon\", \"edges\", \"flip\"],\n", " value=\"flip\", label=\"Transformation\")\n", " input_img = gr.Image(sources=[\"webcam\"], type=\"numpy\")\n", " with gr.Column():\n", " output_img = gr.Image(streaming=True)\n", " dep = input_img.stream(transform_cv2, [input_img, transform], [output_img],\n", " time_limit=30, stream_every=0.1, concurrency_limit=30)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/streaming_filter/run.py b/demo/streaming_filter/run.py new file mode 100644 index 0000000000000..dca23e5602ac3 --- /dev/null +++ b/demo/streaming_filter/run.py @@ -0,0 +1,46 @@ +import gradio as gr +import numpy as np +import cv2 + +def transform_cv2(frame, transform): + if transform == "cartoon": + # prepare color + img_color = cv2.pyrDown(cv2.pyrDown(frame)) + for _ in range(6): + img_color = cv2.bilateralFilter(img_color, 9, 9, 7) + img_color = cv2.pyrUp(cv2.pyrUp(img_color)) + + # prepare edges + img_edges = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) + img_edges = cv2.adaptiveThreshold( + cv2.medianBlur(img_edges, 7), + 255, + cv2.ADAPTIVE_THRESH_MEAN_C, + cv2.THRESH_BINARY, + 9, + 2, + ) + img_edges = cv2.cvtColor(img_edges, cv2.COLOR_GRAY2RGB) + # combine color and edges + img = cv2.bitwise_and(img_color, img_edges) + return img + elif transform == "edges": + # perform edge detection + img = cv2.cvtColor(cv2.Canny(frame, 100, 200), cv2.COLOR_GRAY2BGR) + return img + else: + return np.flipud(frame) + +with gr.Blocks() as demo: + with gr.Row(): + with gr.Column(): + transform = gr.Dropdown(choices=["cartoon", "edges", "flip"], + value="flip", label="Transformation") + input_img = gr.Image(sources=["webcam"], type="numpy") + with gr.Column(): + output_img = gr.Image(streaming=True) + dep = input_img.stream(transform_cv2, [input_img, transform], [output_img], + time_limit=30, stream_every=0.1, concurrency_limit=30) + +if __name__ == "__main__": + demo.launch() diff --git a/demo/streaming_filter_unified/requirements.txt b/demo/streaming_filter_unified/requirements.txt new file mode 100644 index 0000000000000..b96544bd0b2a3 --- /dev/null +++ b/demo/streaming_filter_unified/requirements.txt @@ -0,0 +1,2 @@ +opencv-python +numpy diff --git a/demo/streaming_filter_unified/run.ipynb b/demo/streaming_filter_unified/run.ipynb new file mode 100644 index 0000000000000..bbb5dfcc4983a --- /dev/null +++ b/demo/streaming_filter_unified/run.ipynb @@ -0,0 +1 @@ +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: streaming_filter_unified"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio opencv-python numpy "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import numpy as np\n", "import cv2\n", "\n", "def transform_cv2(frame, transform):\n", " if transform == \"cartoon\":\n", " # prepare color\n", " img_color = cv2.pyrDown(cv2.pyrDown(frame))\n", " for _ in range(6):\n", " img_color = cv2.bilateralFilter(img_color, 9, 9, 7)\n", " img_color = cv2.pyrUp(cv2.pyrUp(img_color))\n", "\n", " # prepare edges\n", " img_edges = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\n", " img_edges = cv2.adaptiveThreshold(\n", " cv2.medianBlur(img_edges, 7),\n", " 255,\n", " cv2.ADAPTIVE_THRESH_MEAN_C,\n", " cv2.THRESH_BINARY,\n", " 9,\n", " 2,\n", " )\n", " img_edges = cv2.cvtColor(img_edges, cv2.COLOR_GRAY2RGB)\n", " # combine color and edges\n", " img = cv2.bitwise_and(img_color, img_edges)\n", " return img\n", " elif transform == \"edges\":\n", " # perform edge detection\n", " img = cv2.cvtColor(cv2.Canny(frame, 100, 200), cv2.COLOR_GRAY2BGR)\n", " return img\n", " else:\n", " return np.flipud(frame)\n", "\n", "\n", "css=\"\"\".my-group {max-width: 500px !important; max-height: 500px !important;}\n", " .my-column {display: flex !important; justify-content: center !important; align-items: center !important};\"\"\"\n", "\n", "with gr.Blocks(css=css) as demo:\n", " with gr.Column(elem_classes=[\"my-column\"]):\n", " with gr.Group(elem_classes=[\"my-group\"]):\n", " transform = gr.Dropdown(choices=[\"cartoon\", \"edges\", \"flip\"],\n", " value=\"flip\", label=\"Transformation\")\n", " input_img = gr.Image(sources=[\"webcam\"], type=\"numpy\", streaming=True)\n", " input_img.stream(transform_cv2, [input_img, transform], [input_img], time_limit=30, stream_every=0.1)\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/streaming_filter_unified/run.py b/demo/streaming_filter_unified/run.py new file mode 100644 index 0000000000000..fd8ce7956484d --- /dev/null +++ b/demo/streaming_filter_unified/run.py @@ -0,0 +1,48 @@ +import gradio as gr +import numpy as np +import cv2 + +def transform_cv2(frame, transform): + if transform == "cartoon": + # prepare color + img_color = cv2.pyrDown(cv2.pyrDown(frame)) + for _ in range(6): + img_color = cv2.bilateralFilter(img_color, 9, 9, 7) + img_color = cv2.pyrUp(cv2.pyrUp(img_color)) + + # prepare edges + img_edges = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) + img_edges = cv2.adaptiveThreshold( + cv2.medianBlur(img_edges, 7), + 255, + cv2.ADAPTIVE_THRESH_MEAN_C, + cv2.THRESH_BINARY, + 9, + 2, + ) + img_edges = cv2.cvtColor(img_edges, cv2.COLOR_GRAY2RGB) + # combine color and edges + img = cv2.bitwise_and(img_color, img_edges) + return img + elif transform == "edges": + # perform edge detection + img = cv2.cvtColor(cv2.Canny(frame, 100, 200), cv2.COLOR_GRAY2BGR) + return img + else: + return np.flipud(frame) + + +css=""".my-group {max-width: 500px !important; max-height: 500px !important;} + .my-column {display: flex !important; justify-content: center !important; align-items: center !important};""" + +with gr.Blocks(css=css) as demo: + with gr.Column(elem_classes=["my-column"]): + with gr.Group(elem_classes=["my-group"]): + transform = gr.Dropdown(choices=["cartoon", "edges", "flip"], + value="flip", label="Transformation") + input_img = gr.Image(sources=["webcam"], type="numpy", streaming=True) + input_img.stream(transform_cv2, [input_img, transform], [input_img], time_limit=30, stream_every=0.1) + + +if __name__ == "__main__": + demo.launch() diff --git a/demo/streaming_simple/run.ipynb b/demo/streaming_simple/run.ipynb new file mode 100644 index 0000000000000..148301ad2c1bc --- /dev/null +++ b/demo/streaming_simple/run.ipynb @@ -0,0 +1 @@ +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: streaming_simple"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " with gr.Column():\n", " input_img = gr.Image(label=\"Input\", sources=\"webcam\")\n", " with gr.Column():\n", " output_img = gr.Image(label=\"Output\")\n", " input_img.stream(lambda s: s, input_img, output_img, time_limit=15, stream_every=0.1, concurrency_limit=30)\n", "\n", "if __name__ == \"__main__\":\n", "\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/streaming_simple/run.py b/demo/streaming_simple/run.py new file mode 100644 index 0000000000000..a08f066c61ef1 --- /dev/null +++ b/demo/streaming_simple/run.py @@ -0,0 +1,13 @@ +import gradio as gr + +with gr.Blocks() as demo: + with gr.Row(): + with gr.Column(): + input_img = gr.Image(label="Input", sources="webcam") + with gr.Column(): + output_img = gr.Image(label="Output") + input_img.stream(lambda s: s, input_img, output_img, time_limit=15, stream_every=0.1, concurrency_limit=30) + +if __name__ == "__main__": + + demo.launch() diff --git a/demo/sub_block_render/requirements.txt b/demo/sub_block_render/requirements.txt new file mode 100644 index 0000000000000..7e2fba5e6c5dd --- /dev/null +++ b/demo/sub_block_render/requirements.txt @@ -0,0 +1 @@ +Pillow diff --git a/demo/sub_block_render/run.ipynb b/demo/sub_block_render/run.ipynb index 882843c57a9d2..fdc218fb8b907 100644 --- a/demo/sub_block_render/run.ipynb +++ b/demo/sub_block_render/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: sub_block_render"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/sub_block_render/cheetah.jpg\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/sub_block_render/frog.jpg"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "from pathlib import Path\n", "\n", "from PIL import Image\n", "\n", "root = Path(os.path.abspath(''))\n", "\n", "def infer(\n", " text,\n", " guidance_scale,\n", "):\n", "\n", " img = (\n", " Image.open(root / \"cheetah.jpg\")\n", " if text == \"Cheetah\"\n", " else Image.open(root / \"frog.jpg\")\n", " )\n", " img = img.resize((224, 224))\n", "\n", " return ([img, img, img, img], \"image\")\n", "\n", "block = gr.Blocks()\n", "\n", "examples = [\n", " [\"A serious capybara at work, wearing a suit\", 7],\n", " [\"A Squirtle fine dining with a view to the London Eye\", 7],\n", " [\"A tamale food cart in front of a Japanese Castle\", 7],\n", " [\"a graffiti of a robot serving meals to people\", 7],\n", " [\"a beautiful cabin in Attersee, Austria, 3d animation style\", 7],\n", "]\n", "\n", "with block as demo:\n", " with gr.Row(elem_id=\"prompt-container\", equal_height=True):\n", " text = gr.Textbox(\n", " label=\"Enter your prompt\",\n", " show_label=False,\n", " max_lines=1,\n", " placeholder=\"Enter your prompt\",\n", " elem_id=\"prompt-text-input\",\n", " )\n", "\n", " gallery = gr.Gallery(\n", " label=\"Generated images\", show_label=False, elem_id=\"gallery\", rows=2, columns=2\n", " )\n", " out_txt = gr.Textbox(\n", " label=\"Prompt\",\n", " placeholder=\"Enter a prompt to generate an image\",\n", " lines=3,\n", " elem_id=\"prompt-text-input\",\n", " )\n", "\n", " guidance_scale = gr.Slider(\n", " label=\"Guidance Scale\", minimum=0, maximum=50, value=7.5, step=0.1\n", " )\n", "\n", " ex = gr.Examples(\n", " examples=examples,\n", " fn=infer,\n", " inputs=[text, guidance_scale],\n", " outputs=[gallery, out_txt],\n", " cache_examples=True,\n", " )\n", "\n", " text.submit(\n", " infer,\n", " inputs=[text, guidance_scale],\n", " outputs=[gallery, out_txt],\n", " concurrency_id=\"infer\",\n", " concurrency_limit=8,\n", " )\n", "\n", "with gr.Blocks() as demo:\n", " block.render()\n", "\n", "if __name__ == \"__main__\":\n", " demo.queue(max_size=10, api_open=False).launch(show_api=False)\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: sub_block_render"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio Pillow "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/sub_block_render/cheetah.jpg\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/sub_block_render/frog.jpg"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "from pathlib import Path\n", "\n", "from PIL import Image\n", "\n", "root = Path(os.path.abspath(''))\n", "\n", "def infer(\n", " text,\n", " guidance_scale,\n", "):\n", "\n", " img = (\n", " Image.open(root / \"cheetah.jpg\")\n", " if text == \"Cheetah\"\n", " else Image.open(root / \"frog.jpg\")\n", " )\n", " img = img.resize((224, 224))\n", "\n", " return ([img, img, img, img], \"image\")\n", "\n", "block = gr.Blocks()\n", "\n", "examples = [\n", " [\"A serious capybara at work, wearing a suit\", 7],\n", " [\"A Squirtle fine dining with a view to the London Eye\", 7],\n", " [\"A tamale food cart in front of a Japanese Castle\", 7],\n", " [\"a graffiti of a robot serving meals to people\", 7],\n", " [\"a beautiful cabin in Attersee, Austria, 3d animation style\", 7],\n", "]\n", "\n", "with block as demo:\n", " with gr.Row(elem_id=\"prompt-container\", equal_height=True):\n", " text = gr.Textbox(\n", " label=\"Enter your prompt\",\n", " show_label=False,\n", " max_lines=1,\n", " placeholder=\"Enter your prompt\",\n", " elem_id=\"prompt-text-input\",\n", " )\n", "\n", " gallery = gr.Gallery(\n", " label=\"Generated images\", show_label=False, elem_id=\"gallery\", rows=2, columns=2\n", " )\n", " out_txt = gr.Textbox(\n", " label=\"Prompt\",\n", " placeholder=\"Enter a prompt to generate an image\",\n", " lines=3,\n", " elem_id=\"prompt-text-input\",\n", " )\n", "\n", " guidance_scale = gr.Slider(\n", " label=\"Guidance Scale\", minimum=0, maximum=50, value=7.5, step=0.1\n", " )\n", "\n", " ex = gr.Examples(\n", " examples=examples,\n", " fn=infer,\n", " inputs=[text, guidance_scale],\n", " outputs=[gallery, out_txt],\n", " cache_examples=True,\n", " )\n", "\n", " text.submit(\n", " infer,\n", " inputs=[text, guidance_scale],\n", " outputs=[gallery, out_txt],\n", " concurrency_id=\"infer\",\n", " concurrency_limit=8,\n", " )\n", "\n", "with gr.Blocks() as demo:\n", " block.render()\n", "\n", "if __name__ == \"__main__\":\n", " demo.queue(max_size=10, api_open=False).launch(show_api=False)\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/theme_new_step_3/run.ipynb b/demo/theme_new_step_3/run.ipynb index c4bc7f3b11e45..de3cfa8b8e760 100644 --- a/demo/theme_new_step_3/run.ipynb +++ b/demo/theme_new_step_3/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: theme_new_step_3"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["from __future__ import annotations\n", "from typing import Iterable\n", "import gradio as gr\n", "from gradio.themes.base import Base\n", "from gradio.themes.utils import colors, fonts, sizes\n", "import time\n", "\n", "class Seafoam(Base):\n", " def __init__(\n", " self,\n", " *,\n", " primary_hue: colors.Color | str = colors.emerald,\n", " secondary_hue: colors.Color | str = colors.blue,\n", " neutral_hue: colors.Color | str = colors.blue,\n", " spacing_size: sizes.Size | str = sizes.spacing_md,\n", " radius_size: sizes.Size | str = sizes.radius_md,\n", " text_size: sizes.Size | str = sizes.text_lg,\n", " font: fonts.Font\n", " | str\n", " | Iterable[fonts.Font | str] = (\n", " fonts.GoogleFont(\"Quicksand\"),\n", " \"ui-sans-serif\",\n", " \"sans-serif\",\n", " ),\n", " font_mono: fonts.Font\n", " | str\n", " | Iterable[fonts.Font | str] = (\n", " fonts.GoogleFont(\"IBM Plex Mono\"),\n", " \"ui-monospace\",\n", " \"monospace\",\n", " ),\n", " ):\n", " super().__init__(\n", " primary_hue=primary_hue,\n", " secondary_hue=secondary_hue,\n", " neutral_hue=neutral_hue,\n", " spacing_size=spacing_size,\n", " radius_size=radius_size,\n", " text_size=text_size,\n", " font=font,\n", " font_mono=font_mono,\n", " )\n", " super().set(\n", " body_background_fill=\"repeating-linear-gradient(45deg, *primary_200, *primary_200 10px, *primary_50 10px, *primary_50 20px)\",\n", " body_background_fill_dark=\"repeating-linear-gradient(45deg, *primary_800, *primary_800 10px, *primary_900 10px, *primary_900 20px)\",\n", " button_primary_background_fill=\"linear-gradient(90deg, *primary_300, *secondary_400)\",\n", " button_primary_background_fill_hover=\"linear-gradient(90deg, *primary_200, *secondary_300)\",\n", " button_primary_text_color=\"white\",\n", " button_primary_background_fill_dark=\"linear-gradient(90deg, *primary_600, *secondary_800)\",\n", " slider_color=\"*secondary_300\",\n", " slider_color_dark=\"*secondary_600\",\n", " block_title_text_weight=\"600\",\n", " block_border_width=\"3px\",\n", " block_shadow=\"*shadow_drop_lg\",\n", " button_shadow=\"*shadow_drop_lg\",\n", " button_large_padding=\"32px\",\n", " )\n", "\n", "seafoam = Seafoam()\n", "\n", "with gr.Blocks(theme=seafoam) as demo:\n", " textbox = gr.Textbox(label=\"Name\")\n", " slider = gr.Slider(label=\"Count\", minimum=0, maximum=100, step=1)\n", " with gr.Row():\n", " button = gr.Button(\"Submit\", variant=\"primary\")\n", " clear = gr.Button(\"Clear\")\n", " output = gr.Textbox(label=\"Output\")\n", "\n", " def repeat(name, count):\n", " time.sleep(3)\n", " return name * count\n", "\n", " button.click(repeat, [textbox, slider], output)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: theme_new_step_3"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["from __future__ import annotations\n", "from typing import Iterable\n", "import gradio as gr\n", "from gradio.themes.base import Base\n", "from gradio.themes.utils import colors, fonts, sizes\n", "import time\n", "\n", "class Seafoam(Base):\n", " def __init__(\n", " self,\n", " *,\n", " primary_hue: colors.Color | str = colors.emerald,\n", " secondary_hue: colors.Color | str = colors.blue,\n", " neutral_hue: colors.Color | str = colors.blue,\n", " spacing_size: sizes.Size | str = sizes.spacing_md,\n", " radius_size: sizes.Size | str = sizes.radius_md,\n", " text_size: sizes.Size | str = sizes.text_lg,\n", " font: fonts.Font\n", " | str\n", " | Iterable[fonts.Font | str] = (\n", " fonts.GoogleFont(\"Quicksand\"),\n", " \"ui-sans-serif\",\n", " \"sans-serif\",\n", " ),\n", " font_mono: fonts.Font\n", " | str\n", " | Iterable[fonts.Font | str] = (\n", " fonts.GoogleFont(\"IBM Plex Mono\"),\n", " \"ui-monospace\",\n", " \"monospace\",\n", " ),\n", " ):\n", " super().__init__(\n", " primary_hue=primary_hue,\n", " secondary_hue=secondary_hue,\n", " neutral_hue=neutral_hue,\n", " spacing_size=spacing_size,\n", " radius_size=radius_size,\n", " text_size=text_size,\n", " font=font,\n", " font_mono=font_mono,\n", " )\n", " super().set(\n", " body_background_fill=\"repeating-linear-gradient(45deg, *primary_200, *primary_200 10px, *primary_50 10px, *primary_50 20px)\",\n", " body_background_fill_dark=\"repeating-linear-gradient(45deg, *primary_800, *primary_800 10px, *primary_900 10px, *primary_900 20px)\",\n", " button_primary_background_fill=\"linear-gradient(90deg, *primary_300, *secondary_400)\",\n", " button_primary_background_fill_hover=\"linear-gradient(90deg, *primary_200, *secondary_300)\",\n", " button_primary_text_color=\"white\",\n", " button_primary_background_fill_dark=\"linear-gradient(90deg, *primary_600, *secondary_800)\",\n", " slider_color=\"*secondary_300\",\n", " slider_color_dark=\"*secondary_600\",\n", " block_title_text_weight=\"600\",\n", " block_border_width=\"3px\",\n", " block_shadow=\"*shadow_drop_lg\",\n", " button_primary_shadow=\"*shadow_drop_lg\",\n", " button_large_padding=\"32px\",\n", " )\n", "\n", "seafoam = Seafoam()\n", "\n", "with gr.Blocks(theme=seafoam) as demo:\n", " textbox = gr.Textbox(label=\"Name\")\n", " slider = gr.Slider(label=\"Count\", minimum=0, maximum=100, step=1)\n", " with gr.Row():\n", " button = gr.Button(\"Submit\", variant=\"primary\")\n", " clear = gr.Button(\"Clear\")\n", " output = gr.Textbox(label=\"Output\")\n", "\n", " def repeat(name, count):\n", " time.sleep(3)\n", " return name * count\n", "\n", " button.click(repeat, [textbox, slider], output)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/theme_new_step_3/run.py b/demo/theme_new_step_3/run.py index f76013feeb5f5..34a2917a7394e 100644 --- a/demo/theme_new_step_3/run.py +++ b/demo/theme_new_step_3/run.py @@ -52,7 +52,7 @@ def __init__( block_title_text_weight="600", block_border_width="3px", block_shadow="*shadow_drop_lg", - button_shadow="*shadow_drop_lg", + button_primary_shadow="*shadow_drop_lg", button_large_padding="32px", ) diff --git a/demo/unispeech-speaker-verification/run.ipynb b/demo/unispeech-speaker-verification/run.ipynb index bcd1ce85cedc1..927b1ed0b2de5 100644 --- a/demo/unispeech-speaker-verification/run.ipynb +++ b/demo/unispeech-speaker-verification/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: unispeech-speaker-verification"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio git+https://github.com/huggingface/transformers torchaudio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('samples')\n", "!wget -q -O samples/cate_blanch.mp3 https://github.com/gradio-app/gradio/raw/main/demo/unispeech-speaker-verification/samples/cate_blanch.mp3\n", "!wget -q -O samples/cate_blanch_2.mp3 https://github.com/gradio-app/gradio/raw/main/demo/unispeech-speaker-verification/samples/cate_blanch_2.mp3\n", "!wget -q -O samples/cate_blanch_3.mp3 https://github.com/gradio-app/gradio/raw/main/demo/unispeech-speaker-verification/samples/cate_blanch_3.mp3\n", "!wget -q -O samples/heath_ledger.mp3 https://github.com/gradio-app/gradio/raw/main/demo/unispeech-speaker-verification/samples/heath_ledger.mp3\n", "!wget -q -O samples/heath_ledger_2.mp3 https://github.com/gradio-app/gradio/raw/main/demo/unispeech-speaker-verification/samples/heath_ledger_2.mp3\n", "!wget -q -O samples/kirsten_dunst.wav https://github.com/gradio-app/gradio/raw/main/demo/unispeech-speaker-verification/samples/kirsten_dunst.wav"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import torch\n", "from torchaudio.sox_effects import apply_effects_file\n", "from transformers import AutoFeatureExtractor, AutoModelForAudioXVector\n", "\n", "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n", "\n", "STYLE = \"\"\"\n", "\n", "\"\"\"\n", "OUTPUT_OK = (\n", " STYLE\n", " + \"\"\"\n", "
\n", "

The speakers are

\n", "

{:.1f}%

\n", "

similar

\n", "

Welcome, human!

\n", "
(You must get at least 85% to be considered the same person)
\n", "
\n", "\"\"\"\n", ")\n", "OUTPUT_FAIL = (\n", " STYLE\n", " + \"\"\"\n", "
\n", "

The speakers are

\n", "

{:.1f}%

\n", "

similar

\n", "

You shall not pass!

\n", "
(You must get at least 85% to be considered the same person)
\n", "
\n", "\"\"\"\n", ")\n", "\n", "EFFECTS = [\n", " [\"remix\", \"-\"],\n", " [\"channels\", \"1\"],\n", " [\"rate\", \"16000\"],\n", " [\"gain\", \"-1.0\"],\n", " [\"silence\", \"1\", \"0.1\", \"0.1%\", \"-1\", \"0.1\", \"0.1%\"],\n", " [\"trim\", \"0\", \"10\"],\n", "]\n", "\n", "THRESHOLD = 0.85\n", "\n", "model_name = \"microsoft/unispeech-sat-base-plus-sv\"\n", "feature_extractor = AutoFeatureExtractor.from_pretrained(model_name)\n", "model = AutoModelForAudioXVector.from_pretrained(model_name).to(device)\n", "cosine_sim = torch.nn.CosineSimilarity(dim=-1)\n", "\n", "def similarity_fn(path1, path2):\n", " if not (path1 and path2):\n", " return 'ERROR: Please record audio for *both* speakers!'\n", " wav1, _ = apply_effects_file(path1, EFFECTS)\n", " wav2, _ = apply_effects_file(path2, EFFECTS)\n", " print(wav1.shape, wav2.shape)\n", "\n", " input1 = feature_extractor(wav1.squeeze(0), return_tensors=\"pt\", sampling_rate=16000).input_values.to(device)\n", " input2 = feature_extractor(wav2.squeeze(0), return_tensors=\"pt\", sampling_rate=16000).input_values.to(device)\n", "\n", " with torch.no_grad():\n", " emb1 = model(input1).embeddings\n", " emb2 = model(input2).embeddings\n", " emb1 = torch.nn.functional.normalize(emb1, dim=-1).cpu()\n", " emb2 = torch.nn.functional.normalize(emb2, dim=-1).cpu()\n", " similarity = cosine_sim(emb1, emb2).numpy()[0]\n", "\n", " if similarity >= THRESHOLD:\n", " output = OUTPUT_OK.format(similarity * 100)\n", " else:\n", " output = OUTPUT_FAIL.format(similarity * 100)\n", "\n", " return output\n", "\n", "inputs = [\n", " gr.Audio(sources=[\"microphone\"], type=\"filepath\", label=\"Speaker #1\"),\n", " gr.Audio(sources=[\"microphone\"], type=\"filepath\", label=\"Speaker #2\"),\n", "]\n", "output = gr.HTML(label=\"\")\n", "\n", "description = (\n", " \"This demo will compare two speech samples and determine if they are from the same speaker. \"\n", " \"Try it with your own voice!\"\n", ")\n", "article = (\n", " \"

\"\n", " \"\ud83c\udf99\ufe0f Learn more about UniSpeech-SAT | \"\n", " \"\ud83d\udcda UniSpeech-SAT paper | \"\n", " \"\ud83d\udcda X-Vector paper\"\n", " \"

\"\n", ")\n", "examples = [\n", " [\"samples/cate_blanch.mp3\", \"samples/cate_blanch_2.mp3\"],\n", " [\"samples/cate_blanch.mp3\", \"samples/cate_blanch_3.mp3\"],\n", " [\"samples/cate_blanch_2.mp3\", \"samples/cate_blanch_3.mp3\"],\n", " [\"samples/heath_ledger.mp3\", \"samples/heath_ledger_2.mp3\"],\n", " [\"samples/cate_blanch.mp3\", \"samples/kirsten_dunst.wav\"],\n", "]\n", "\n", "demo = gr.Interface(\n", " fn=similarity_fn,\n", " inputs=inputs,\n", " outputs=output,\n", " title=\"Voice Authentication with UniSpeech-SAT + X-Vectors\",\n", " description=description,\n", " article=article,\n", " allow_flagging=\"never\",\n", " live=False,\n", " examples=examples,\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n", "\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: unispeech-speaker-verification"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio git+https://github.com/huggingface/transformers torchaudio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('samples')\n", "!wget -q -O samples/cate_blanch.mp3 https://github.com/gradio-app/gradio/raw/main/demo/unispeech-speaker-verification/samples/cate_blanch.mp3\n", "!wget -q -O samples/cate_blanch_2.mp3 https://github.com/gradio-app/gradio/raw/main/demo/unispeech-speaker-verification/samples/cate_blanch_2.mp3\n", "!wget -q -O samples/cate_blanch_3.mp3 https://github.com/gradio-app/gradio/raw/main/demo/unispeech-speaker-verification/samples/cate_blanch_3.mp3\n", "!wget -q -O samples/heath_ledger.mp3 https://github.com/gradio-app/gradio/raw/main/demo/unispeech-speaker-verification/samples/heath_ledger.mp3\n", "!wget -q -O samples/heath_ledger_2.mp3 https://github.com/gradio-app/gradio/raw/main/demo/unispeech-speaker-verification/samples/heath_ledger_2.mp3\n", "!wget -q -O samples/kirsten_dunst.wav https://github.com/gradio-app/gradio/raw/main/demo/unispeech-speaker-verification/samples/kirsten_dunst.wav"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import torch\n", "from torchaudio.sox_effects import apply_effects_file\n", "from transformers import AutoFeatureExtractor, AutoModelForAudioXVector\n", "\n", "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n", "\n", "STYLE = \"\"\"\n", "\n", "\"\"\"\n", "OUTPUT_OK = (\n", " STYLE\n", " + \"\"\"\n", "
\n", "

The speakers are

\n", "

{:.1f}%

\n", "

similar

\n", "

Welcome, human!

\n", "
(You must get at least 85% to be considered the same person)
\n", "
\n", "\"\"\"\n", ")\n", "OUTPUT_FAIL = (\n", " STYLE\n", " + \"\"\"\n", "
\n", "

The speakers are

\n", "

{:.1f}%

\n", "

similar

\n", "

You shall not pass!

\n", "
(You must get at least 85% to be considered the same person)
\n", "
\n", "\"\"\"\n", ")\n", "\n", "EFFECTS = [\n", " [\"remix\", \"-\"],\n", " [\"channels\", \"1\"],\n", " [\"rate\", \"16000\"],\n", " [\"gain\", \"-1.0\"],\n", " [\"silence\", \"1\", \"0.1\", \"0.1%\", \"-1\", \"0.1\", \"0.1%\"],\n", " [\"trim\", \"0\", \"10\"],\n", "]\n", "\n", "THRESHOLD = 0.85\n", "\n", "model_name = \"microsoft/unispeech-sat-base-plus-sv\"\n", "feature_extractor = AutoFeatureExtractor.from_pretrained(model_name)\n", "model = AutoModelForAudioXVector.from_pretrained(model_name).to(device)\n", "cosine_sim = torch.nn.CosineSimilarity(dim=-1)\n", "\n", "def similarity_fn(path1, path2):\n", " if not (path1 and path2):\n", " return 'ERROR: Please record audio for *both* speakers!'\n", " wav1, _ = apply_effects_file(path1, EFFECTS)\n", " wav2, _ = apply_effects_file(path2, EFFECTS)\n", " print(wav1.shape, wav2.shape)\n", "\n", " input1 = feature_extractor(wav1.squeeze(0), return_tensors=\"pt\", sampling_rate=16000).input_values.to(device)\n", " input2 = feature_extractor(wav2.squeeze(0), return_tensors=\"pt\", sampling_rate=16000).input_values.to(device)\n", "\n", " with torch.no_grad():\n", " emb1 = model(input1).embeddings\n", " emb2 = model(input2).embeddings\n", " emb1 = torch.nn.functional.normalize(emb1, dim=-1).cpu()\n", " emb2 = torch.nn.functional.normalize(emb2, dim=-1).cpu()\n", " similarity = cosine_sim(emb1, emb2).numpy()[0]\n", "\n", " if similarity >= THRESHOLD:\n", " output = OUTPUT_OK.format(similarity * 100)\n", " else:\n", " output = OUTPUT_FAIL.format(similarity * 100)\n", "\n", " return output\n", "\n", "inputs = [\n", " gr.Audio(sources=[\"microphone\"], type=\"filepath\", label=\"Speaker #1\"),\n", " gr.Audio(sources=[\"microphone\"], type=\"filepath\", label=\"Speaker #2\"),\n", "]\n", "output = gr.HTML(label=\"\")\n", "\n", "description = (\n", " \"This demo will compare two speech samples and determine if they are from the same speaker. \"\n", " \"Try it with your own voice!\"\n", ")\n", "article = (\n", " \"

\"\n", " \"\ud83c\udf99\ufe0f Learn more about UniSpeech-SAT | \"\n", " \"\ud83d\udcda UniSpeech-SAT paper | \"\n", " \"\ud83d\udcda X-Vector paper\"\n", " \"

\"\n", ")\n", "examples = [\n", " [\"samples/cate_blanch.mp3\", \"samples/cate_blanch_2.mp3\"],\n", " [\"samples/cate_blanch.mp3\", \"samples/cate_blanch_3.mp3\"],\n", " [\"samples/cate_blanch_2.mp3\", \"samples/cate_blanch_3.mp3\"],\n", " [\"samples/heath_ledger.mp3\", \"samples/heath_ledger_2.mp3\"],\n", " [\"samples/cate_blanch.mp3\", \"samples/kirsten_dunst.wav\"],\n", "]\n", "\n", "demo = gr.Interface(\n", " fn=similarity_fn,\n", " inputs=inputs,\n", " outputs=output,\n", " title=\"Voice Authentication with UniSpeech-SAT + X-Vectors\",\n", " description=description,\n", " article=article,\n", " flagging_mode=\"never\",\n", " live=False,\n", " examples=examples,\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n", "\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/unispeech-speaker-verification/run.py b/demo/unispeech-speaker-verification/run.py index 9adafcba4e868..408f518bc0e63 100644 --- a/demo/unispeech-speaker-verification/run.py +++ b/demo/unispeech-speaker-verification/run.py @@ -105,7 +105,7 @@ def similarity_fn(path1, path2): title="Voice Authentication with UniSpeech-SAT + X-Vectors", description=description, article=article, - allow_flagging="never", + flagging_mode="never", live=False, examples=examples, ) diff --git a/demo/waveform/run.ipynb b/demo/waveform/run.ipynb deleted file mode 100644 index 7462af3f2cc8b..0000000000000 --- a/demo/waveform/run.ipynb +++ /dev/null @@ -1 +0,0 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: waveform"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import random\n", "\n", "COLORS = [\n", " [\"#ff0000\", \"#00ff00\"],\n", " [\"#00ff00\", \"#0000ff\"],\n", " [\"#0000ff\", \"#ff0000\"],\n", "]\n", "\n", "def audio_waveform(audio, image):\n", " return (\n", " audio,\n", " gr.make_waveform(audio),\n", " gr.make_waveform(audio, animate=True),\n", " gr.make_waveform(audio, bg_image=image, bars_color=str(random.choice(COLORS))),\n", " )\n", "\n", "gr.Interface(\n", " audio_waveform,\n", " inputs=[gr.Audio(), gr.Image(type=\"filepath\")],\n", " outputs=[\n", " gr.Audio(),\n", " gr.Video(),\n", " gr.Video(),\n", " gr.Video(),\n", " ],\n", ").launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/waveform/run.py b/demo/waveform/run.py deleted file mode 100644 index 2292f8ad2dae2..0000000000000 --- a/demo/waveform/run.py +++ /dev/null @@ -1,27 +0,0 @@ -import gradio as gr -import random - -COLORS = [ - ["#ff0000", "#00ff00"], - ["#00ff00", "#0000ff"], - ["#0000ff", "#ff0000"], -] - -def audio_waveform(audio, image): - return ( - audio, - gr.make_waveform(audio), - gr.make_waveform(audio, animate=True), - gr.make_waveform(audio, bg_image=image, bars_color=str(random.choice(COLORS))), - ) - -gr.Interface( - audio_waveform, - inputs=[gr.Audio(), gr.Image(type="filepath")], - outputs=[ - gr.Audio(), - gr.Video(), - gr.Video(), - gr.Video(), - ], -).launch() diff --git a/demo/yolov10_webcam_stream/inference.py b/demo/yolov10_webcam_stream/inference.py new file mode 100644 index 0000000000000..2827cfece44da --- /dev/null +++ b/demo/yolov10_webcam_stream/inference.py @@ -0,0 +1,148 @@ +import time +import cv2 +import numpy as np +import onnxruntime # type: ignore + +from utils import draw_detections # type: ignore + + +class YOLOv10: + def __init__(self, path): + # Initialize model + self.initialize_model(path) + + def __call__(self, image): + return self.detect_objects(image) + + def initialize_model(self, path): + self.session = onnxruntime.InferenceSession( + path, providers=onnxruntime.get_available_providers() + ) + # Get model info + self.get_input_details() + self.get_output_details() + + def detect_objects(self, image, conf_threshold=0.3): + input_tensor = self.prepare_input(image) + + # Perform inference on the image + new_image = self.inference(image, input_tensor, conf_threshold) + + return new_image + + def prepare_input(self, image): + self.img_height, self.img_width = image.shape[:2] + + input_img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + + # Resize input image + input_img = cv2.resize(input_img, (self.input_width, self.input_height)) + + # Scale input pixel values to 0 to 1 + input_img = input_img / 255.0 + input_img = input_img.transpose(2, 0, 1) + input_tensor = input_img[np.newaxis, :, :, :].astype(np.float32) + + return input_tensor + + def inference(self, image, input_tensor, conf_threshold=0.3): + start = time.perf_counter() + outputs = self.session.run( + self.output_names, {self.input_names[0]: input_tensor} + ) + + print(f"Inference time: {(time.perf_counter() - start)*1000:.2f} ms") + ( + boxes, + scores, + class_ids, + ) = self.process_output(outputs, conf_threshold) + return self.draw_detections(image, boxes, scores, class_ids) + + def process_output(self, output, conf_threshold=0.3): + predictions = np.squeeze(output[0]) + + # Filter out object confidence scores below threshold + scores = predictions[:, 4] + predictions = predictions[scores > conf_threshold, :] + scores = scores[scores > conf_threshold] + + if len(scores) == 0: + return [], [], [] + + # Get the class with the highest confidence + class_ids = predictions[:, 5].astype(int) + + # Get bounding boxes for each object + boxes = self.extract_boxes(predictions) + + return boxes, scores, class_ids + + def extract_boxes(self, predictions): + # Extract boxes from predictions + boxes = predictions[:, :4] + + # Scale boxes to original image dimensions + boxes = self.rescale_boxes(boxes) + + # Convert boxes to xyxy format + # boxes = xywh2xyxy(boxes) + + return boxes + + def rescale_boxes(self, boxes): + # Rescale boxes to original image dimensions + input_shape = np.array( + [self.input_width, self.input_height, self.input_width, self.input_height] + ) + boxes = np.divide(boxes, input_shape, dtype=np.float32) + boxes *= np.array( + [self.img_width, self.img_height, self.img_width, self.img_height] + ) + return boxes + + def draw_detections( + self, image, boxes, scores, class_ids, draw_scores=True, mask_alpha=0.4 + ): + return draw_detections(image, boxes, scores, class_ids, mask_alpha) + + def get_input_details(self): + model_inputs = self.session.get_inputs() + self.input_names = [model_inputs[i].name for i in range(len(model_inputs))] + + self.input_shape = model_inputs[0].shape + self.input_height = self.input_shape[2] + self.input_width = self.input_shape[3] + + def get_output_details(self): + model_outputs = self.session.get_outputs() + self.output_names = [model_outputs[i].name for i in range(len(model_outputs))] + + +if __name__ == "__main__": + import requests + import tempfile + from huggingface_hub import hf_hub_download + + model_file = hf_hub_download( + repo_id="onnx-community/yolov10s", filename="onnx/model.onnx" + ) + + yolov8_detector = YOLOv10(model_file) + + with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as f: + f.write( + requests.get( + "https://live.staticflickr.com/13/19041780_d6fd803de0_3k.jpg" + ).content + ) + f.seek(0) + img = cv2.imread(f.name) + + # # Detect Objects + combined_image = yolov8_detector.detect_objects(img) + + # Draw detections + cv2.namedWindow("Output", cv2.WINDOW_NORMAL) + cv2.imshow("Output", combined_image) + cv2.waitKey(0) diff --git a/demo/yolov10_webcam_stream/requirements.txt b/demo/yolov10_webcam_stream/requirements.txt new file mode 100644 index 0000000000000..9766bb4676a1a --- /dev/null +++ b/demo/yolov10_webcam_stream/requirements.txt @@ -0,0 +1,6 @@ +safetensors==0.4.3 +opencv-python +twilio +gradio>=5.0,<6.0 +gradio-webrtc==0.0.1 +onnxruntime-gpu \ No newline at end of file diff --git a/demo/yolov10_webcam_stream/run.ipynb b/demo/yolov10_webcam_stream/run.ipynb new file mode 100644 index 0000000000000..43759190962ee --- /dev/null +++ b/demo/yolov10_webcam_stream/run.ipynb @@ -0,0 +1 @@ +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: yolov10_webcam_stream"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio safetensors==0.4.3 opencv-python twilio gradio>=5.0,<6.0 gradio-webrtc==0.0.1 onnxruntime-gpu"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/yolov10_webcam_stream/inference.py\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/yolov10_webcam_stream/utils.py"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import cv2\n", "from huggingface_hub import hf_hub_download\n", "from gradio_webrtc import WebRTC # type: ignore\n", "from twilio.rest import Client # type: ignore\n", "import os\n", "from inference import YOLOv10 # type: ignore\n", "\n", "model_file = hf_hub_download(\n", " repo_id=\"onnx-community/yolov10n\", filename=\"onnx/model.onnx\"\n", ")\n", "\n", "model = YOLOv10(model_file)\n", "\n", "account_sid = os.environ.get(\"TWILIO_ACCOUNT_SID\")\n", "auth_token = os.environ.get(\"TWILIO_AUTH_TOKEN\")\n", "\n", "if account_sid and auth_token:\n", " client = Client(account_sid, auth_token)\n", "\n", " token = client.tokens.create()\n", "\n", " rtc_configuration = {\n", " \"iceServers\": token.ice_servers,\n", " \"iceTransportPolicy\": \"relay\",\n", " }\n", "else:\n", " rtc_configuration = None\n", "\n", "\n", "def detection(image, conf_threshold=0.3):\n", " image = cv2.resize(image, (model.input_width, model.input_height))\n", " new_image = model.detect_objects(image, conf_threshold)\n", " return cv2.resize(new_image, (500, 500))\n", "\n", "\n", "css = \"\"\".my-group {max-width: 600px !important; max-height: 600 !important;}\n", " .my-column {display: flex !important; justify-content: center !important; align-items: center !important};\"\"\"\n", "\n", "\n", "with gr.Blocks(css=css) as demo:\n", " gr.HTML(\n", " \"\"\"\n", "

\n", " YOLOv10 Webcam Stream (Powered by WebRTC \u26a1\ufe0f)\n", "

\n", " \"\"\"\n", " )\n", " gr.HTML(\n", " \"\"\"\n", "

\n", " arXiv | github\n", "

\n", " \"\"\"\n", " )\n", " with gr.Column(elem_classes=[\"my-column\"]):\n", " with gr.Group(elem_classes=[\"my-group\"]):\n", " image = WebRTC(label=\"Stream\", rtc_configuration=rtc_configuration)\n", " conf_threshold = gr.Slider(\n", " label=\"Confidence Threshold\",\n", " minimum=0.0,\n", " maximum=1.0,\n", " step=0.05,\n", " value=0.30,\n", " )\n", "\n", " image.stream(\n", " fn=detection, inputs=[image, conf_threshold], outputs=[image], time_limit=10\n", " )\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/yolov10_webcam_stream/run.py b/demo/yolov10_webcam_stream/run.py new file mode 100644 index 0000000000000..8080322605e91 --- /dev/null +++ b/demo/yolov10_webcam_stream/run.py @@ -0,0 +1,72 @@ +import gradio as gr +import cv2 +from huggingface_hub import hf_hub_download +from gradio_webrtc import WebRTC # type: ignore +from twilio.rest import Client # type: ignore +import os +from inference import YOLOv10 # type: ignore + +model_file = hf_hub_download( + repo_id="onnx-community/yolov10n", filename="onnx/model.onnx" +) + +model = YOLOv10(model_file) + +account_sid = os.environ.get("TWILIO_ACCOUNT_SID") +auth_token = os.environ.get("TWILIO_AUTH_TOKEN") + +if account_sid and auth_token: + client = Client(account_sid, auth_token) + + token = client.tokens.create() + + rtc_configuration = { + "iceServers": token.ice_servers, + "iceTransportPolicy": "relay", + } +else: + rtc_configuration = None + + +def detection(image, conf_threshold=0.3): + image = cv2.resize(image, (model.input_width, model.input_height)) + new_image = model.detect_objects(image, conf_threshold) + return cv2.resize(new_image, (500, 500)) + + +css = """.my-group {max-width: 600px !important; max-height: 600 !important;} + .my-column {display: flex !important; justify-content: center !important; align-items: center !important};""" + + +with gr.Blocks(css=css) as demo: + gr.HTML( + """ +

+ YOLOv10 Webcam Stream (Powered by WebRTC ⚡️) +

+ """ + ) + gr.HTML( + """ +

+ arXiv | github +

+ """ + ) + with gr.Column(elem_classes=["my-column"]): + with gr.Group(elem_classes=["my-group"]): + image = WebRTC(label="Stream", rtc_configuration=rtc_configuration) + conf_threshold = gr.Slider( + label="Confidence Threshold", + minimum=0.0, + maximum=1.0, + step=0.05, + value=0.30, + ) + + image.stream( + fn=detection, inputs=[image, conf_threshold], outputs=[image], time_limit=10 + ) + +if __name__ == "__main__": + demo.launch() diff --git a/demo/yolov10_webcam_stream/utils.py b/demo/yolov10_webcam_stream/utils.py new file mode 100644 index 0000000000000..281656746abce --- /dev/null +++ b/demo/yolov10_webcam_stream/utils.py @@ -0,0 +1,237 @@ +import numpy as np +import cv2 + +class_names = [ + "person", + "bicycle", + "car", + "motorcycle", + "airplane", + "bus", + "train", + "truck", + "boat", + "traffic light", + "fire hydrant", + "stop sign", + "parking meter", + "bench", + "bird", + "cat", + "dog", + "horse", + "sheep", + "cow", + "elephant", + "bear", + "zebra", + "giraffe", + "backpack", + "umbrella", + "handbag", + "tie", + "suitcase", + "frisbee", + "skis", + "snowboard", + "sports ball", + "kite", + "baseball bat", + "baseball glove", + "skateboard", + "surfboard", + "tennis racket", + "bottle", + "wine glass", + "cup", + "fork", + "knife", + "spoon", + "bowl", + "banana", + "apple", + "sandwich", + "orange", + "broccoli", + "carrot", + "hot dog", + "pizza", + "donut", + "cake", + "chair", + "couch", + "potted plant", + "bed", + "dining table", + "toilet", + "tv", + "laptop", + "mouse", + "remote", + "keyboard", + "cell phone", + "microwave", + "oven", + "toaster", + "sink", + "refrigerator", + "book", + "clock", + "vase", + "scissors", + "teddy bear", + "hair drier", + "toothbrush", +] + +# Create a list of colors for each class where each color is a tuple of 3 integer values +rng = np.random.default_rng(3) +colors = rng.uniform(0, 255, size=(len(class_names), 3)) + + +def nms(boxes, scores, iou_threshold): + # Sort by score + sorted_indices = np.argsort(scores)[::-1] + + keep_boxes = [] + while sorted_indices.size > 0: + # Pick the last box + box_id = sorted_indices[0] + keep_boxes.append(box_id) + + # Compute IoU of the picked box with the rest + ious = compute_iou(boxes[box_id, :], boxes[sorted_indices[1:], :]) + + # Remove boxes with IoU over the threshold + keep_indices = np.where(ious < iou_threshold)[0] + + # print(keep_indices.shape, sorted_indices.shape) + sorted_indices = sorted_indices[keep_indices + 1] + + return keep_boxes + + +def multiclass_nms(boxes, scores, class_ids, iou_threshold): + unique_class_ids = np.unique(class_ids) + + keep_boxes = [] + for class_id in unique_class_ids: + class_indices = np.where(class_ids == class_id)[0] + class_boxes = boxes[class_indices, :] + class_scores = scores[class_indices] + + class_keep_boxes = nms(class_boxes, class_scores, iou_threshold) + keep_boxes.extend(class_indices[class_keep_boxes]) + + return keep_boxes + + +def compute_iou(box, boxes): + # Compute xmin, ymin, xmax, ymax for both boxes + xmin = np.maximum(box[0], boxes[:, 0]) + ymin = np.maximum(box[1], boxes[:, 1]) + xmax = np.minimum(box[2], boxes[:, 2]) + ymax = np.minimum(box[3], boxes[:, 3]) + + # Compute intersection area + intersection_area = np.maximum(0, xmax - xmin) * np.maximum(0, ymax - ymin) + + # Compute union area + box_area = (box[2] - box[0]) * (box[3] - box[1]) + boxes_area = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) + union_area = box_area + boxes_area - intersection_area + + # Compute IoU + iou = intersection_area / union_area + + return iou + + +def xywh2xyxy(x): + # Convert bounding box (x, y, w, h) to bounding box (x1, y1, x2, y2) + y = np.copy(x) + y[..., 0] = x[..., 0] - x[..., 2] / 2 + y[..., 1] = x[..., 1] - x[..., 3] / 2 + y[..., 2] = x[..., 0] + x[..., 2] / 2 + y[..., 3] = x[..., 1] + x[..., 3] / 2 + return y + + +def draw_detections(image, boxes, scores, class_ids, mask_alpha=0.3): + det_img = image.copy() + + img_height, img_width = image.shape[:2] + font_size = min([img_height, img_width]) * 0.0006 + text_thickness = int(min([img_height, img_width]) * 0.001) + + # det_img = draw_masks(det_img, boxes, class_ids, mask_alpha) + + # Draw bounding boxes and labels of detections + for class_id, box, score in zip(class_ids, boxes, scores): + color = colors[class_id] + + draw_box(det_img, box, color) # type: ignore + + label = class_names[class_id] + caption = f"{label} {int(score * 100)}%" + draw_text(det_img, caption, box, color, font_size, text_thickness) # type: ignore + + return det_img + + +def draw_box( + image: np.ndarray, + box: np.ndarray, + color: tuple[int, int, int] = (0, 0, 255), + thickness: int = 2, +) -> np.ndarray: + x1, y1, x2, y2 = box.astype(int) + return cv2.rectangle(image, (x1, y1), (x2, y2), color, thickness) + + +def draw_text( + image: np.ndarray, + text: str, + box: np.ndarray, + color: tuple[int, int, int] = (0, 0, 255), + font_size: float = 0.001, + text_thickness: int = 2, +) -> np.ndarray: + x1, y1, x2, y2 = box.astype(int) + (tw, th), _ = cv2.getTextSize( + text=text, + fontFace=cv2.FONT_HERSHEY_SIMPLEX, + fontScale=font_size, + thickness=text_thickness, + ) + th = int(th * 1.2) + + cv2.rectangle(image, (x1, y1), (x1 + tw, y1 - th), color, -1) + + return cv2.putText( + image, + text, + (x1, y1), + cv2.FONT_HERSHEY_SIMPLEX, + font_size, + (255, 255, 255), + text_thickness, + cv2.LINE_AA, + ) + + +def draw_masks( + image: np.ndarray, boxes: np.ndarray, classes: np.ndarray, mask_alpha: float = 0.3 +) -> np.ndarray: + mask_img = image.copy() + + # Draw bounding boxes and labels of detections + for box, class_id in zip(boxes, classes): + color = colors[class_id] + + x1, y1, x2, y2 = box.astype(int) + + # Draw fill rectangle in mask image + cv2.rectangle(mask_img, (x1, y1), (x2, y2), color, -1) # type: ignore + + return cv2.addWeighted(mask_img, mask_alpha, image, 1 - mask_alpha, 0) diff --git a/gradio/CHANGELOG.md b/gradio/CHANGELOG.md index 1973f9fdc6daa..9cee66c08936a 100644 --- a/gradio/CHANGELOG.md +++ b/gradio/CHANGELOG.md @@ -1,5 +1,210 @@ # gradio +## 5.0.0-beta.10 + +### Fixes + +- [#9600](https://github.com/gradio-app/gradio/pull/9600) [`9f71086`](https://github.com/gradio-app/gradio/commit/9f71086036339bfdd14f3aab29729041a01fc2d4) - Ensure undo/try shows for final bot message in gr.Chatbot. Thanks @hannahblair! + +## 5.0.0-beta.9 + +### Features + +- [#9437](https://github.com/gradio-app/gradio/pull/9437) [`c3d93be`](https://github.com/gradio-app/gradio/commit/c3d93bef94b9401747a363f7bad88a1d347d535b) - Adding new themes to Gradio 5.0. Thanks @allisonwhilden! +- [#9593](https://github.com/gradio-app/gradio/pull/9593) [`cc61fe7`](https://github.com/gradio-app/gradio/commit/cc61fe7047ac61779a61cce52c666400b9517daa) - Some more chatbot fixes. Thanks @dawoodkhan82! +- [#9583](https://github.com/gradio-app/gradio/pull/9583) [`b92a762`](https://github.com/gradio-app/gradio/commit/b92a7623e1ebd801587041e1ccca058a61058da9) - Disable the submit button and enter-key submit when the text is empty. Thanks @whitphx! +- [#9590](https://github.com/gradio-app/gradio/pull/9590) [`e853c41`](https://github.com/gradio-app/gradio/commit/e853c413583d91186aef3aceb0849d0ec0494834) - SSR e2e + fixes. Thanks @pngwn! +- [#9591](https://github.com/gradio-app/gradio/pull/9591) [`139152f`](https://github.com/gradio-app/gradio/commit/139152fe18bfdc5644a59d7bdfec9891b644f0bf) - Equal height in row false by default. Thanks @aliabid94! +- [#9589](https://github.com/gradio-app/gradio/pull/9589) [`477f45c`](https://github.com/gradio-app/gradio/commit/477f45cb43be957684eb392e3d62c09490c22391) - Only move files to the cache that have a meta key. Thanks @freddyaboulton! +- [#9584](https://github.com/gradio-app/gradio/pull/9584) [`6f8fa54`](https://github.com/gradio-app/gradio/commit/6f8fa5405528ad684084e4c7facfe36624fe7784) - Chat Interface Multimodal Fix & Fallback to `gr.Examples()`. Thanks @dawoodkhan82! +- [#9482](https://github.com/gradio-app/gradio/pull/9482) [`bd6c5f2`](https://github.com/gradio-app/gradio/commit/bd6c5f237b0631d86273c7684c3bf2b1011992a3) - Fix custom component CLI on main/5.0. Thanks @freddyaboulton! +- [#9601](https://github.com/gradio-app/gradio/pull/9601) [`c078892`](https://github.com/gradio-app/gradio/commit/c07889223cb64661b17560b707b977248809470a) - Tweak gr.Dataframe menu UX. Thanks @hannahblair! +- [#9575](https://github.com/gradio-app/gradio/pull/9575) [`4ec2feb`](https://github.com/gradio-app/gradio/commit/4ec2feb04e452d2c77482c09543c59948567be67) - Update gr.Dataframe UI with action popover. Thanks @hannahblair! +- [#9582](https://github.com/gradio-app/gradio/pull/9582) [`43a7f42`](https://github.com/gradio-app/gradio/commit/43a7f420d8ac34c7f7fa71d6e630a4c8618d3780) - Chatbot autoscroll. Thanks @whitphx! +- [#9598](https://github.com/gradio-app/gradio/pull/9598) [`ffc33fa`](https://github.com/gradio-app/gradio/commit/ffc33facaec1bcc92add5892afb86b7b5ba037d4) - Fix markdown code copy/check button in gr.Chatbot. Thanks @hannahblair! +- [#9576](https://github.com/gradio-app/gradio/pull/9576) [`430a26a`](https://github.com/gradio-app/gradio/commit/430a26a4fbcbabb5e9ddb6173bf658a00960e88e) - Fix reload mode. Thanks @freddyaboulton! +- [#9580](https://github.com/gradio-app/gradio/pull/9580) [`a9ac396`](https://github.com/gradio-app/gradio/commit/a9ac396f19218eafc441b7614289b4828cde853d) - Deep equal check with hash. Thanks @aliabid94! +- [#9499](https://github.com/gradio-app/gradio/pull/9499) [`17e6c84`](https://github.com/gradio-app/gradio/commit/17e6c84d6b11651cd03c1d47caec85de62030ea0) - Fix `gr.Chatbot` panels layout. Thanks @hannahblair! +- [#9592](https://github.com/gradio-app/gradio/pull/9592) [`24fe222`](https://github.com/gradio-app/gradio/commit/24fe222fd17583d04dd31aebf60b649224e8382f) - Fix favicon in ssr mode. Thanks @freddyaboulton! + +## 5.0.0-beta.8 + +### Features + +- [#9550](https://github.com/gradio-app/gradio/pull/9550) [`b0fedd7`](https://github.com/gradio-app/gradio/commit/b0fedd7ef718c0df797ec277db7e773543a70a4d) - Fix most flaky Python tests in `5.0-dev` branch. Thanks @abidlabs! +- [#9577](https://github.com/gradio-app/gradio/pull/9577) [`9f532e0`](https://github.com/gradio-app/gradio/commit/9f532e03a6b91b9a5592152c19b9b2611774cae7) - Equal height columns. Thanks @aliabid94! +- [#9570](https://github.com/gradio-app/gradio/pull/9570) [`e0ee3d5`](https://github.com/gradio-app/gradio/commit/e0ee3d5bb1020744cefa1abf66009fbf07da8cbe) - Update gr.ColorPicker UI. Thanks @hannahblair! +- [#9483](https://github.com/gradio-app/gradio/pull/9483) [`8dc7c12`](https://github.com/gradio-app/gradio/commit/8dc7c12389311b60efcde1b9d3e3668a34d2dc00) - Send Streaming data over Websocket if possible. Also support base64 output format for images. Thanks @freddyaboulton! +- [#9521](https://github.com/gradio-app/gradio/pull/9521) [`06ef22e`](https://github.com/gradio-app/gradio/commit/06ef22e83cdd27e7afb381396d153d9db3dea16e) - Allow `info=` to render markdown. Thanks @dawoodkhan82! +- [#9571](https://github.com/gradio-app/gradio/pull/9571) [`148345d`](https://github.com/gradio-app/gradio/commit/148345d107763754710505281ad70368ebc6f3ec) - Fix chatinterface embedding height issues. Thanks @aliabid94! +- [#9525](https://github.com/gradio-app/gradio/pull/9525) [`7c367b6`](https://github.com/gradio-app/gradio/commit/7c367b6cf0472d478671b7c7476e892b4c61c812) - Fix cut off in gr.ImageEditor. Thanks @hannahblair! +- [#9522](https://github.com/gradio-app/gradio/pull/9522) [`3b71ed2`](https://github.com/gradio-app/gradio/commit/3b71ed21b7e2ecb67eb68fb946d25565169cb4df) - Api info fix. Thanks @freddyaboulton! +- [#9508](https://github.com/gradio-app/gradio/pull/9508) [`b260389`](https://github.com/gradio-app/gradio/commit/b26038932a64f024bd149a56b1539e9e75802f29) - Change caching to occur not at the creation of a `gr.Examples()` but when the Blocks is actually launched. Thanks @aliabid94! +- [#9524](https://github.com/gradio-app/gradio/pull/9524) [`cf39640`](https://github.com/gradio-app/gradio/commit/cf396404ec8052c7fbf5f045955028bc54a274a5) - Add `css_paths` and `head_paths` parameters. Thanks @abidlabs! + +## 5.0.0-beta.7 + +### Features + +- [#9546](https://github.com/gradio-app/gradio/pull/9546) [`b82aa6f`](https://github.com/gradio-app/gradio/commit/b82aa6f115f6ad9a9690bdecb0fe63c162dc75c0) - Disable sagemaker_check() for now. Thanks @vmatt! +- [#9545](https://github.com/gradio-app/gradio/pull/9545) [`098a009`](https://github.com/gradio-app/gradio/commit/098a009c05243967bd9e5acc28864eb40a135f6a) - Add Jinja2 language to Code component. Thanks @CISC! +- [#9526](https://github.com/gradio-app/gradio/pull/9526) [`f60bb68`](https://github.com/gradio-app/gradio/commit/f60bb68f52cd0863d9087b3fbc56635e297adef1) - Fix single select dropdown. Thanks @whitphx! +- [#9497](https://github.com/gradio-app/gradio/pull/9497) [`d826faa`](https://github.com/gradio-app/gradio/commit/d826faa8c2584cf0772d4e66b4073e33b83f3a00) - Hide x axis labels. Thanks @aliabid94! + +## 5.0.0-beta.6 + +### Features + +- [#9460](https://github.com/gradio-app/gradio/pull/9460) [`7352a89`](https://github.com/gradio-app/gradio/commit/7352a89722da91461c32fd33588531f3edce9c48) - Playground requirements tab. Thanks @whitphx! +- [#9496](https://github.com/gradio-app/gradio/pull/9496) [`1647ebd`](https://github.com/gradio-app/gradio/commit/1647ebddc3e2ed6fc143a62629409e32afcc5801) - UI theme fixes. Thanks @aliabid94! +- [#9450](https://github.com/gradio-app/gradio/pull/9450) [`991883e`](https://github.com/gradio-app/gradio/commit/991883e217dc0a3512b3ae3245378812f373b8db) - Improve `gr.Code`. Thanks @hannahblair! +- [#9504](https://github.com/gradio-app/gradio/pull/9504) [`d054262`](https://github.com/gradio-app/gradio/commit/d054262f611d5f1eb1a1c936db7152347a891f8e) - Centre components within `Block` when height and width are set. Thanks @hannahblair! +- [#9481](https://github.com/gradio-app/gradio/pull/9481) [`2510a6e`](https://github.com/gradio-app/gradio/commit/2510a6e978a49432d7820e9518f164a70cf8acc8) - Fix `slider-color` var. Thanks @hannahblair! +- [#9495](https://github.com/gradio-app/gradio/pull/9495) [`488ef76`](https://github.com/gradio-app/gradio/commit/488ef768ccc5008401f7e0aa4c357b93311190ff) - Fix custom component CLI unit tests. Thanks @freddyaboulton! +- [#9488](https://github.com/gradio-app/gradio/pull/9488) [`4e6a47f`](https://github.com/gradio-app/gradio/commit/4e6a47f5a29cb885d5bc01a79ca4cc45d298f0b1) - Fixes: Chatbot examples for custom chatbot + rename `suggestions` -> `examples`. Thanks @dawoodkhan82! +- [#9506](https://github.com/gradio-app/gradio/pull/9506) [`861f5e9`](https://github.com/gradio-app/gradio/commit/861f5e97ffde5f59e42cfa213364f19e84d799fd) - Fix node process to run with correct server name. Thanks @abidlabs! +- [#9493](https://github.com/gradio-app/gradio/pull/9493) [`c307a0c`](https://github.com/gradio-app/gradio/commit/c307a0c9b81b66bde21f0af4a9f7d5726ea7a30d) - Minor fixes to docs and a demo. Thanks @abidlabs! +- [#9519](https://github.com/gradio-app/gradio/pull/9519) [`0ab6ac5`](https://github.com/gradio-app/gradio/commit/0ab6ac5dc01b69e4f2462d00c4910f3354441227) - Fix change triggers for dropdown and radio. Thanks @dawoodkhan82! + +### Fixes + +- [#9431](https://github.com/gradio-app/gradio/pull/9431) [`7065e11`](https://github.com/gradio-app/gradio/commit/7065e11e465fcdfe14688bd6ca2aeed0a25fcc36) - Check for `file_types` parameter in the backend. Thanks @dawoodkhan82! + +## 5.0.0-beta.5 + +### Features + +- [#9470](https://github.com/gradio-app/gradio/pull/9470) [`b406139`](https://github.com/gradio-app/gradio/commit/b40613928c0e0d224ff2b4db5d0b45727c178560) - Add support for 3rd party providers to `gr.load`, and provide a better UX for conversational models. Thanks @abidlabs! +- [#9383](https://github.com/gradio-app/gradio/pull/9383) [`30d13ac`](https://github.com/gradio-app/gradio/commit/30d13ac8a932d53abfd236d3e4e845570480e701) - Pre/post-processing download requests. Thanks @aliabid94! +- [#9464](https://github.com/gradio-app/gradio/pull/9464) [`3ac5d9c`](https://github.com/gradio-app/gradio/commit/3ac5d9c972576d82bc365a6532e6e12f55441a30) - Fix plots. Thanks @pngwn! + +## 5.0.0-beta.4 + +### Features + +- [#9419](https://github.com/gradio-app/gradio/pull/9419) [`018c140`](https://github.com/gradio-app/gradio/commit/018c140ef86cacc8211df05b57b26924dab7fa08) - Start/stop recoding from the backend. Add guide on conversational chatbots. Thanks @freddyaboulton! +- [#9453](https://github.com/gradio-app/gradio/pull/9453) [`56dbf77`](https://github.com/gradio-app/gradio/commit/56dbf77671012015efd3c745bc33e5074ab7158f) - Chatbot bug fixes. Thanks @dawoodkhan82! +- [#9448](https://github.com/gradio-app/gradio/pull/9448) [`e7a415b`](https://github.com/gradio-app/gradio/commit/e7a415b1ef923d31754b302b3cd8b5e71a9ea446) - Use or `pathlib.Path` objects to indicate filepaths for `css`, `js`, and `head` parameters. Thanks @abidlabs! +- [#9469](https://github.com/gradio-app/gradio/pull/9469) [`f7c3396`](https://github.com/gradio-app/gradio/commit/f7c3396f55a5b8364d3880a29d766bd092d7f840) - Fix. Triggered dataframe change event for header change. Thanks @Joodith! +- [#9447](https://github.com/gradio-app/gradio/pull/9447) [`afbd8e7`](https://github.com/gradio-app/gradio/commit/afbd8e7a2faadeea5a431f67a753dea14dd5829f) - Reduce analytics that are collected. Thanks @abidlabs! +- [#9438](https://github.com/gradio-app/gradio/pull/9438) [`8f469e1`](https://github.com/gradio-app/gradio/commit/8f469e1d1d9d1636e4dedfb1c09e76a5e2ba8d4e) - Small changes to caching. Thanks @abidlabs! +- [#9446](https://github.com/gradio-app/gradio/pull/9446) [`0c8fafb`](https://github.com/gradio-app/gradio/commit/0c8fafb31df7ef3ef5812d6efb47ca342a3bad3c) - Fix SSR mode flag with `mount_gradio_app` and revert changes to pytests. Thanks @abidlabs! +- [#9456](https://github.com/gradio-app/gradio/pull/9456) [`4d75f02`](https://github.com/gradio-app/gradio/commit/4d75f029aacf0561f50c7afebbe8d54da2cb0af4) - Update object detection guide. Thanks @freddyaboulton! +- [#9406](https://github.com/gradio-app/gradio/pull/9406) [`74f3b9d`](https://github.com/gradio-app/gradio/commit/74f3b9ded1ce0ff6f2f0dbcb113edd64b7bb8f60) - Allow skipping an arbitrary number of output components, and also raise a warning if the number of output components does not match the number of values returned from a function. Thanks @abidlabs! +- [#9413](https://github.com/gradio-app/gradio/pull/9413) [`a16787a`](https://github.com/gradio-app/gradio/commit/a16787abdc8d507d75b986dbe882d21be4540908) - Lite: HTTPX client improvement. Thanks @whitphx! + +## 5.0.0-beta.3 + +### Features + +- [#9376](https://github.com/gradio-app/gradio/pull/9376) [`d92c26f`](https://github.com/gradio-app/gradio/commit/d92c26fe63f6b88e16c356cb84c55b61f795db73) - Small fixes to `gr.Dataframe` and chatbot docs. Thanks @abidlabs! +- [#9412](https://github.com/gradio-app/gradio/pull/9412) [`c2c2fd9`](https://github.com/gradio-app/gradio/commit/c2c2fd989348f826566773c07c0e0bda200199ff) - fix SSR apps on spaces. Thanks @pngwn! + +### Fixes + +- [#9405](https://github.com/gradio-app/gradio/pull/9405) [`bf27ff4`](https://github.com/gradio-app/gradio/commit/bf27ff4ac8ada33ea03dd26d5c1c1115aa1f318a) - Center icon in button when no text is present. Thanks @abidlabs! + +## 5.0.0-beta.2 + +### Features + +- [#9359](https://github.com/gradio-app/gradio/pull/9359) [`50c3a7f`](https://github.com/gradio-app/gradio/commit/50c3a7f1541f632853a96f3d979ebeef6ad82869) - Small tweak to how thoughts are shown in `gr.Chatbot`. Thanks @abidlabs! +- [#9323](https://github.com/gradio-app/gradio/pull/9323) [`06babda`](https://github.com/gradio-app/gradio/commit/06babda0395fd3fbd323c1c3cb33704ecfd6deb0) - Disable liking user message in chatbot by default but make it configurable. Thanks @freddyaboulton! +- [#8966](https://github.com/gradio-app/gradio/pull/8966) [`8e52b6a`](https://github.com/gradio-app/gradio/commit/8e52b6a3e75957462bc7fdbf6ff9c280084d5f08) - Chatbot Examples. Thanks @dawoodkhan82! +- [#9261](https://github.com/gradio-app/gradio/pull/9261) [`73647a0`](https://github.com/gradio-app/gradio/commit/73647a07b0439efabe3dd218ff6c366ffa3b84a0) - Move icons into `IconButtonWrapper`. Thanks @hannahblair! +- [#9316](https://github.com/gradio-app/gradio/pull/9316) [`4338f29`](https://github.com/gradio-app/gradio/commit/4338f29bce2430d765f20070d1823ecc19d940cb) - 9227 chatinterface retry bug. Thanks @freddyaboulton! +- [#9313](https://github.com/gradio-app/gradio/pull/9313) [`1fef9d9`](https://github.com/gradio-app/gradio/commit/1fef9d9a26f0ebce4de18c486702661f6539b1c6) - Standardize `height` across components and add `max_height` and `min_height` parameters where appropriate. Thanks @abidlabs! +- [#9339](https://github.com/gradio-app/gradio/pull/9339) [`4c8c6f2`](https://github.com/gradio-app/gradio/commit/4c8c6f2fe603081941c5fdc43f48a0632b9f31ad) - Ssr part 2. Thanks @pngwn! +- [#9250](https://github.com/gradio-app/gradio/pull/9250) [`350b0a5`](https://github.com/gradio-app/gradio/commit/350b0a5cafb9176f914f62e7c90de51d4352cc77) - Improve Icon Button consistency. Thanks @hannahblair! +- [#9269](https://github.com/gradio-app/gradio/pull/9269) [`e05f568`](https://github.com/gradio-app/gradio/commit/e05f568f47e9fa33ef91dbbe5cc477d32762bc36) - Fix reload mode and streaming in 5.0 dev. Thanks @freddyaboulton! +- [#9356](https://github.com/gradio-app/gradio/pull/9356) [`1daf259`](https://github.com/gradio-app/gradio/commit/1daf259b52d0b1ce16d916ff25a15d322b51ecf5) - Use `container` param in `gr.Markdown`. Thanks @hannahblair! +- [#9321](https://github.com/gradio-app/gradio/pull/9321) [`81a356d`](https://github.com/gradio-app/gradio/commit/81a356d802f95b6a9a7aeb3759e05e47febbd0d3) - Remove two dependencies: `importlib_resources` and `urllib3` (if not in Wasm). Thanks @abidlabs! +- [#9253](https://github.com/gradio-app/gradio/pull/9253) [`99648ec`](https://github.com/gradio-app/gradio/commit/99648ec7c4443e74799941e47b0015ac9ca581e1) - Adds ability to block event trigger when file is uploading. Thanks @dawoodkhan82! +- [#9341](https://github.com/gradio-app/gradio/pull/9341) [`02369b3`](https://github.com/gradio-app/gradio/commit/02369b3159df72b2f4a36ce5684574eb65065731) - Improve is_in_or_equal and fuzzer. Thanks @freddyaboulton! +- [#9333](https://github.com/gradio-app/gradio/pull/9333) [`5b86e2f`](https://github.com/gradio-app/gradio/commit/5b86e2f2a2bedcde79e425fd470473bc1fd6ae2e) - Enhance Lite E2E tests and fix a networking problem on Lite. Thanks @whitphx! +- [#9338](https://github.com/gradio-app/gradio/pull/9338) [`19f6b31`](https://github.com/gradio-app/gradio/commit/19f6b31a73c6114093cbb5a7e69131175efa8a79) - Fix typo in `tunneling.py`. Thanks @abidlabs! +- [#9336](https://github.com/gradio-app/gradio/pull/9336) [`736046f`](https://github.com/gradio-app/gradio/commit/736046f17db073b56023b5e077b0ae5ae4adeb02) - Object Detection From Webcam Stream Guide. Thanks @freddyaboulton! +- [#9300](https://github.com/gradio-app/gradio/pull/9300) [`6309a48`](https://github.com/gradio-app/gradio/commit/6309a48e3a89a13137ec9d61c1c722eb59b8e3dc) - Raise ChecksumMismatchError. Thanks @abidlabs! +- [#9373](https://github.com/gradio-app/gradio/pull/9373) [`6443062`](https://github.com/gradio-app/gradio/commit/64430620449ab5b19ea32b02ab82a2d1804dcb2e) - Fix Cached Examples for Streamed Media. Thanks @freddyaboulton! +- [#9367](https://github.com/gradio-app/gradio/pull/9367) [`1c94328`](https://github.com/gradio-app/gradio/commit/1c94328cfe6ce0676c3850f5e9da5bcabf9ee570) - add local fonts and update themes. Thanks @hannahblair! +- [#9335](https://github.com/gradio-app/gradio/pull/9335) [`b543465`](https://github.com/gradio-app/gradio/commit/b543465d06d7d1b399c4d0755da05e022611a97f) - Remove lite/theme.css from the Git-managed file tree. Thanks @whitphx! +- [#9358](https://github.com/gradio-app/gradio/pull/9358) [`16c0485`](https://github.com/gradio-app/gradio/commit/16c0485a32be324a5f1c7252f5ce09fff79f7d67) - Small tweaks to improve the DX for the "tuples"/"messages" argument in `gr.Chatbot`. Thanks @abidlabs! +- [#9303](https://github.com/gradio-app/gradio/pull/9303) [`34f46b0`](https://github.com/gradio-app/gradio/commit/34f46b0512fe30b4db9c9901cb23987d3cecc48d) - Dont move files to cache automatically in chatbot postprocess. Thanks @freddyaboulton! +- [#9363](https://github.com/gradio-app/gradio/pull/9363) [`3ad28c7`](https://github.com/gradio-app/gradio/commit/3ad28c7e310e8589e0c53b7efee8031e129bece8) - Prevent HTML and Markdown height changing when status is hidden. Thanks @hannahblair! +- [#9260](https://github.com/gradio-app/gradio/pull/9260) [`d47dd1f`](https://github.com/gradio-app/gradio/commit/d47dd1f8417a878ef731d2eeabf60f3069289dee) - Fix overflowing markdown in Chatbot. Thanks @hannahblair! +- [#9320](https://github.com/gradio-app/gradio/pull/9320) [`98cbcae`](https://github.com/gradio-app/gradio/commit/98cbcaef827de7267462ccba180c7b2ffb1e825d) - chore: fix docs style. Thanks @imba-tjd! +- [#9314](https://github.com/gradio-app/gradio/pull/9314) [`299879d`](https://github.com/gradio-app/gradio/commit/299879d02adf3bacb012c76a467aaf5df5b31493) - Make `gr.Image` preprocessing more efficient. Thanks @abidlabs! +- [#9371](https://github.com/gradio-app/gradio/pull/9371) [`7bf3e99`](https://github.com/gradio-app/gradio/commit/7bf3e9989392b7edcdc18c1d840fb8130b15040e) - Fix `gr.ImageEditor` toolbar cutoff. Thanks @hannahblair! +- [#9306](https://github.com/gradio-app/gradio/pull/9306) [`f3f0fef`](https://github.com/gradio-app/gradio/commit/f3f0fef199c7779aac9aaef794dd4af1861ce50f) - Fixes race condition in `update_root_in_config`. Thanks @abidlabs! +- [#9312](https://github.com/gradio-app/gradio/pull/9312) [`7c0780b`](https://github.com/gradio-app/gradio/commit/7c0780b5677f8a1c05b9d2eee136e982917829b8) - Proposal: remove `gr.make_waveform` and remove `matplotlib` as a dependency. Thanks @abidlabs! +- [#9339](https://github.com/gradio-app/gradio/pull/9339) [`4c8c6f2`](https://github.com/gradio-app/gradio/commit/4c8c6f2fe603081941c5fdc43f48a0632b9f31ad) - Tweaks to SSR mode. Thanks @pngwn! +- [#9270](https://github.com/gradio-app/gradio/pull/9270) [`b0b8500`](https://github.com/gradio-app/gradio/commit/b0b850081d8d10c1287b5d179b8db37482e21c8d) - Fix stop recording button colors. Thanks @freddyaboulton! +- [#9268](https://github.com/gradio-app/gradio/pull/9268) [`c469d40`](https://github.com/gradio-app/gradio/commit/c469d40b0d9d807abb9fa92c67069c08833ce6bc) - Raise error instead of warning if checksums for binary do not match. Thanks @abidlabs! +- [#9377](https://github.com/gradio-app/gradio/pull/9377) [`618e9fe`](https://github.com/gradio-app/gradio/commit/618e9fe941744cef4a4a06eb7840763c64b15e32) - Update babylon.js to `v7` for `gr.Model3D`. Thanks @abidlabs! +- [#9282](https://github.com/gradio-app/gradio/pull/9282) [`54ea485`](https://github.com/gradio-app/gradio/commit/54ea485ba92165be96137ae35e2d3f2fc62a2873) - Further tweak to is_in_or_equal. Thanks @freddyaboulton! +- [#9326](https://github.com/gradio-app/gradio/pull/9326) [`7afb9a1`](https://github.com/gradio-app/gradio/commit/7afb9a14fa64310eb8b70f43a3bad373e46e36c1) - 5.0 merge take 2. Thanks @pngwn! +- [#9280](https://github.com/gradio-app/gradio/pull/9280) [`7122420`](https://github.com/gradio-app/gradio/commit/712242047fde3a594dfde7f48a44c7ea16239dc8) - Match style of textbox stop button to submit button. Thanks @freddyaboulton! +- [#9348](https://github.com/gradio-app/gradio/pull/9348) [`61f794b`](https://github.com/gradio-app/gradio/commit/61f794bba78ef59e55beca0ba743548f33f3a3c3) - Do not attach `content_disposition_type = "attachment"` headers for files explicitly allowed by developer. Thanks @abidlabs! +- [#9361](https://github.com/gradio-app/gradio/pull/9361) [`5eb860f`](https://github.com/gradio-app/gradio/commit/5eb860f739a187217ded1fc569676e0edd16bab0) - Refactor lazy caching. Thanks @abidlabs! +- [#9311](https://github.com/gradio-app/gradio/pull/9311) [`c4afdcd`](https://github.com/gradio-app/gradio/commit/c4afdcdb1f1f80c5f95ab45d527236e9364ace82) - Added max lines and overflow scrollbar for `gr.Code`. Thanks @micpst! + +### Fixes + +- [#9299](https://github.com/gradio-app/gradio/pull/9299) [`aa35b07`](https://github.com/gradio-app/gradio/commit/aa35b0788e613fdd45446d267513e6f94fa208ea) - Trigger state change event on iterators. Thanks @freddyaboulton! +- [#9393](https://github.com/gradio-app/gradio/pull/9393) [`53ed0f0`](https://github.com/gradio-app/gradio/commit/53ed0f030551ad876a1fea28a9db9015ba6ec33e) - Fix File Types for MultimodalTextbox. Thanks @dawoodkhan82! +- [#9328](https://github.com/gradio-app/gradio/pull/9328) [`6a7f631`](https://github.com/gradio-app/gradio/commit/6a7f63180b4105622298dd742d6a0d25216ea629) - Set the color of placeholder in a disabled textbox to gray instead of black, and disable typing while a response is generating in `gr.ChatInterface`, allow `gr.MultimodalTextbox` to accept string values. Thanks @abidlabs! + +## 5.0.0-beta.1 + +### Features + +- [#9235](https://github.com/gradio-app/gradio/pull/9235) [`f8b411f`](https://github.com/gradio-app/gradio/commit/f8b411fe282ff0316ed4abebc0a043b044bf4dd9) - Built-in submit and stop buttons in `gr.ChatInterface(multimodal=False)`, adding `submit_btn` and `stop_btn` props to `gr.Textbox()` and `gr.MultimodalText()`. Thanks @whitphx! +- [#9201](https://github.com/gradio-app/gradio/pull/9201) [`5492e74`](https://github.com/gradio-app/gradio/commit/5492e742b1f1fa618208cce523f50ad22a6e86f1) - Move buttons from chat_interface into Chatbot. Thanks @freddyaboulton! +- [#9199](https://github.com/gradio-app/gradio/pull/9199) [`3175c7a`](https://github.com/gradio-app/gradio/commit/3175c7aebc6ad2466d31d6949580f5a3cb4cd698) - Redesign `gr.Tabs()`. Thanks @hannahblair! +- [#9167](https://github.com/gradio-app/gradio/pull/9167) [`e9e737e`](https://github.com/gradio-app/gradio/commit/e9e737eeeb61d0bbf43277c75b6ffed8b34aa445) - Redesign `gr.Button()`. Thanks @hannahblair! +- [#9218](https://github.com/gradio-app/gradio/pull/9218) [`4a832f4`](https://github.com/gradio-app/gradio/commit/4a832f4b0a8f35a10bc2301a56b711519e85034b) - Adds TLS to FRP tunnel. Thanks @abidlabs! +- [#9166](https://github.com/gradio-app/gradio/pull/9166) [`8a75559`](https://github.com/gradio-app/gradio/commit/8a755596317c59bfb10803edc0f8642e62d7cecd) - Minor changes to flagging for 5.0. Thanks @abidlabs! +- [#9254](https://github.com/gradio-app/gradio/pull/9254) [`03f3735`](https://github.com/gradio-app/gradio/commit/03f3735fba1fd4f1978b5431af9e67de3b6e7945) - Adds a "huggingface" button variant, and makes it the default for `gr.LoginButton` and `gr.DuplicateButton`. Thanks @abidlabs! +- [#9187](https://github.com/gradio-app/gradio/pull/9187) [`5bf00b7`](https://github.com/gradio-app/gradio/commit/5bf00b7524ebf399b48719120a49d15bb21bd65c) - make all component SSR compatible. Thanks @pngwn! +- [#9236](https://github.com/gradio-app/gradio/pull/9236) [`dd8e2e3`](https://github.com/gradio-app/gradio/commit/dd8e2e32c6c1ec42e13c55af870d0da291117dd3) - Improve button consistency across light/dark mode. Thanks @hannahblair! +- [#9225](https://github.com/gradio-app/gradio/pull/9225) [`5f2e047`](https://github.com/gradio-app/gradio/commit/5f2e047c2ce114cebc95d5dba16c4df10fa73eb1) - Add a 'None' option to the gradio.Image component to disable image_m…. Thanks @GeeMoose! +- [#9204](https://github.com/gradio-app/gradio/pull/9204) [`3c73f00`](https://github.com/gradio-app/gradio/commit/3c73f00e3016b16917ebfe0bad390f2dff683457) - 🔡 Update default core Gradio font. Thanks @hannahblair! +- [#9245](https://github.com/gradio-app/gradio/pull/9245) [`c8cfe93`](https://github.com/gradio-app/gradio/commit/c8cfe93c0971d904c29da60410952fd20c9439c0) - Lighten secondary button grey fill. Thanks @hannahblair! +- [#9246](https://github.com/gradio-app/gradio/pull/9246) [`38cf712`](https://github.com/gradio-app/gradio/commit/38cf71234bf57fe9da6eea2d32b1d6e7ef35c700) - Stop using `multiprocessing` in `flagging.CSVLogger` on Lite v5. Thanks @whitphx! +- [#9216](https://github.com/gradio-app/gradio/pull/9216) [`e137b30`](https://github.com/gradio-app/gradio/commit/e137b30b1a53ca32d3cd809d31e97d5d54a4e479) - Decrease component radii and remove input shadows. Thanks @hannahblair! +- [#9200](https://github.com/gradio-app/gradio/pull/9200) [`2e179d3`](https://github.com/gradio-app/gradio/commit/2e179d35be6ed60a5a6bfc7303178d63e41781ad) - prefix api routes. Thanks @pngwn! + +## 5.0.0-beta.0 + +### Features + +- [#9069](https://github.com/gradio-app/gradio/pull/9069) [`f9f84bf`](https://github.com/gradio-app/gradio/commit/f9f84bfe7064634164501d1023591b415ad2a03b) - No token passed by default in `gr.load()`. Thanks @abidlabs! +- [#9160](https://github.com/gradio-app/gradio/pull/9160) [`8f5a895`](https://github.com/gradio-app/gradio/commit/8f5a8950c949996f7c439b11a7aa40edda3e8562) - Fix native plot lite demos. Thanks @aliabd! +- [#9197](https://github.com/gradio-app/gradio/pull/9197) [`6773c4d`](https://github.com/gradio-app/gradio/commit/6773c4da22f957a11b2a07f032ce13c7b4c94f8c) - Redesign `gr.Slider()`. Thanks @hannahblair! +- [#9140](https://github.com/gradio-app/gradio/pull/9140) [`c054ec8`](https://github.com/gradio-app/gradio/commit/c054ec85e49ab102b15afd305583ee394151d16c) - Drop python 3.8 and 3.9. Thanks @abidlabs! +- [#8978](https://github.com/gradio-app/gradio/pull/8978) [`fe9d1cb`](https://github.com/gradio-app/gradio/commit/fe9d1cb0870a5b07d51e8cb05401af47efbacd13) - Improve url downloads for file objects. Thanks @aliabid94! +- [#8810](https://github.com/gradio-app/gradio/pull/8810) [`4cf8af9`](https://github.com/gradio-app/gradio/commit/4cf8af9407a44ee914e0be567da38b29f00eff8e) - Prevent invalid values from being submitted to dropdown, etc. Thanks @abidlabs! +- [#9194](https://github.com/gradio-app/gradio/pull/9194) [`20c0836`](https://github.com/gradio-app/gradio/commit/20c0836ed0e0698dbc81d2a4bda04363fd857334) - Deprecate type='tuples for chatbot and focus chatbot docs on 'messages' type. Thanks @freddyaboulton! +- [#9122](https://github.com/gradio-app/gradio/pull/9122) [`2672ea2`](https://github.com/gradio-app/gradio/commit/2672ea297ef28414ecf2eeab7984b1e4b4ee40b8) - Postprocess hardening. Thanks @freddyaboulton! +- [#9149](https://github.com/gradio-app/gradio/pull/9149) [`3d7a9b8`](https://github.com/gradio-app/gradio/commit/3d7a9b81f6fef06187eca832471dc1692eb493a0) - Open audio/image input stream only when queue is ready. Thanks @freddyaboulton! +- [#9173](https://github.com/gradio-app/gradio/pull/9173) [`66349fe`](https://github.com/gradio-app/gradio/commit/66349fe26827e3a3c15b738a1177e95fec7f5554) - Streaming Guides. Thanks @freddyaboulton! +- [#9185](https://github.com/gradio-app/gradio/pull/9185) [`2daf3d1`](https://github.com/gradio-app/gradio/commit/2daf3d10f5986675f6ceb75ebb50c9d991c282bf) - Adding `maxlength` attribute handling of `textarea` and `input` HTML element for the `gr.TextBox()` component via a `max_length` parameter. Thanks @WH-Yoshi! +- [#8959](https://github.com/gradio-app/gradio/pull/8959) [`a0aac66`](https://github.com/gradio-app/gradio/commit/a0aac6694076529ff925ccd34b3503d35e86cb49) - Adds `strict_cors` parameter to `launch()`. Thanks @abidlabs! +- [#9052](https://github.com/gradio-app/gradio/pull/9052) [`f3652eb`](https://github.com/gradio-app/gradio/commit/f3652ebe08211e12739df73c15fd97e5ff81276a) - Video gallery. Thanks @dawoodkhan82! +- [#9213](https://github.com/gradio-app/gradio/pull/9213) [`ab4580b`](https://github.com/gradio-app/gradio/commit/ab4580bd5f755a07c9a9bd2a775220a9a2085f8c) - Remove grey background behind all components. Thanks @hannahblair! +- [#9073](https://github.com/gradio-app/gradio/pull/9073) [`0d8a358`](https://github.com/gradio-app/gradio/commit/0d8a358cc86331aa0c83380326b30d04597f9ef9) - Set default `format` in `gr.Audio` to be `None` to avoid unnecessary preprocessing. Thanks @abidlabs! +- [#9130](https://github.com/gradio-app/gradio/pull/9130) [`864cd0f`](https://github.com/gradio-app/gradio/commit/864cd0fd6aa85691b53bd0bf3a50af05b778813c) - Raise WasmUnsupportedError for ffmpeg usage on Lite. Thanks @whitphx! +- [#8797](https://github.com/gradio-app/gradio/pull/8797) [`6e6818c`](https://github.com/gradio-app/gradio/commit/6e6818c3af836051fffdd070a9e33889b246186e) - Deprecate for 5.0. Thanks @abidlabs! +- [#9132](https://github.com/gradio-app/gradio/pull/9132) [`5cedf16`](https://github.com/gradio-app/gradio/commit/5cedf162f2120e30dd58bf3a8eab27115030b4f5) - Deprecate passing a tuple for gr.Code value. Thanks @freddyaboulton! +- [#8941](https://github.com/gradio-app/gradio/pull/8941) [`97a7bf6`](https://github.com/gradio-app/gradio/commit/97a7bf66a79179d1b91a3199d68e5c11216ca500) - Streaming inputs for 5.0. Thanks @freddyaboulton! +- [#9150](https://github.com/gradio-app/gradio/pull/9150) [`80c966a`](https://github.com/gradio-app/gradio/commit/80c966af6e3d947abe96058de9b683ecf05d9803) - DNS resolver on ip check. Thanks @aliabid94! +- [#9175](https://github.com/gradio-app/gradio/pull/9175) [`e6d456a`](https://github.com/gradio-app/gradio/commit/e6d456a9c3b7e80e0c9a16cd365288deff706635) - Change dark mode color theme from `gray` to `zinc`. Thanks @hannahblair! +- [#8884](https://github.com/gradio-app/gradio/pull/8884) [`3408dba`](https://github.com/gradio-app/gradio/commit/3408dba7560a17371be679d0f01564a5606dc90b) - replace ip addresses with machine-specific hashes. Thanks @abidlabs! + +### Fixes + +- [#9189](https://github.com/gradio-app/gradio/pull/9189) [`ab142ee`](https://github.com/gradio-app/gradio/commit/ab142ee13d19070b75b5eb03efcda7193b8993c2) - Fix serialization error in curl api. Thanks @freddyaboulton! + ## 4.44.1 ### Features diff --git a/gradio/__init__.py b/gradio/__init__.py index 008c63bd4cfb5..5538d75cd36b3 100644 --- a/gradio/__init__.py +++ b/gradio/__init__.py @@ -42,7 +42,6 @@ Label, LinePlot, LoginButton, - LogoutButton, Markdown, MessageDict, Model3D, @@ -69,7 +68,9 @@ EventData, KeyUpData, LikeData, + RetryData, SelectData, + UndoData, on, ) from gradio.exceptions import Error @@ -77,14 +78,12 @@ from gradio.flagging import ( CSVLogger, FlaggingCallback, - HuggingFaceDatasetSaver, SimpleCSVLogger, ) from gradio.helpers import ( Info, Progress, Warning, - make_waveform, skip, update, ) diff --git a/gradio/_simple_templates/simpledropdown.py b/gradio/_simple_templates/simpledropdown.py index c1d8ad14239e8..f653fda7e6e4b 100644 --- a/gradio/_simple_templates/simpledropdown.py +++ b/gradio/_simple_templates/simpledropdown.py @@ -1,7 +1,8 @@ from __future__ import annotations import warnings -from typing import TYPE_CHECKING, Any, Callable, Sequence +from collections.abc import Callable, Sequence +from typing import TYPE_CHECKING, Any from gradio.components.base import Component, FormComponent from gradio.events import Events @@ -40,8 +41,8 @@ def __init__( Parameters: choices: A list of string options to choose from. An option can also be a tuple of the form (name, value), where name is the displayed name of the dropdown choice and value is the value to be passed to the function, or returned by the function. value: default value selected in dropdown. If None, no value is selected by default. If callable, the function will be called whenever the app loads to set the initial value of the component. - label: component name in interface. - info: additional component description. + label: the label for this component, displayed above the component if `show_label` is `True` and is also used as the header if there are a table of examples for this component. If None and used in a `gr.Interface`, the label will be the name of the parameter this component corresponds to. + info: additional component description, appears below the label in smaller font. Supports markdown / HTML syntax. every: Continously calls `value` to recalculate it if `value` is a function (has no effect otherwise). Can provide a Timer whose tick resets `value`, or a float that provides the regular interval for the reset Timer. inputs: Components that are used as inputs to calculate `value` if `value` is a function (has no effect otherwise). `value` is recalculated any time the inputs change. inputs: Components that are used as inputs to calculate `value` if `value` is a function (has no effect otherwise). `value` is recalculated any time the inputs change. diff --git a/gradio/_simple_templates/simpleimage.py b/gradio/_simple_templates/simpleimage.py index 712b843f609d0..337294e6505b3 100644 --- a/gradio/_simple_templates/simpleimage.py +++ b/gradio/_simple_templates/simpleimage.py @@ -2,8 +2,9 @@ from __future__ import annotations +from collections.abc import Sequence from pathlib import Path -from typing import TYPE_CHECKING, Any, Sequence +from typing import TYPE_CHECKING, Any from gradio_client import handle_file from gradio_client.documentation import document @@ -52,7 +53,7 @@ def __init__( """ Parameters: value: A path or URL for the default value that SimpleImage component is going to take. If callable, the function will be called whenever the app loads to set the initial value of the component. - label: The label for this component. Appears above the component and is also used as the header if there are a table of examples for this component. If None and used in a `gr.Interface`, the label will be the name of the parameter this component is assigned to. + label: the label for this component, displayed above the component if `show_label` is `True` and is also used as the header if there are a table of examples for this component. If None and used in a `gr.Interface`, the label will be the name of the parameter this component corresponds to. every: Continously calls `value` to recalculate it if `value` is a function (has no effect otherwise). Can provide a Timer whose tick resets `value`, or a float that provides the regular interval for the reset Timer. inputs: Components that are used as inputs to calculate `value` if `value` is a function (has no effect otherwise). `value` is recalculated any time the inputs change. show_label: if True, will display label. diff --git a/gradio/_simple_templates/simpletextbox.py b/gradio/_simple_templates/simpletextbox.py index 12aa16acb5522..4536eb05b4e31 100644 --- a/gradio/_simple_templates/simpletextbox.py +++ b/gradio/_simple_templates/simpletextbox.py @@ -1,6 +1,7 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Any, Callable, Sequence +from collections.abc import Callable, Sequence +from typing import TYPE_CHECKING, Any from gradio.components.base import Component, FormComponent from gradio.events import Events @@ -43,7 +44,7 @@ def __init__( Parameters: value: default text to provide in textbox. If callable, the function will be called whenever the app loads to set the initial value of the component. placeholder: placeholder hint to provide behind textbox. - label: component name in interface. + label: the label for this component, displayed above the component if `show_label` is `True` and is also used as the header if there are a table of examples for this component. If None and used in a `gr.Interface`, the label will be the name of the parameter this component corresponds to. every: Continously calls `value` to recalculate it if `value` is a function (has no effect otherwise). Can provide a Timer whose tick resets `value`, or a float that provides the regular interval for the reset Timer. inputs: Components that are used as inputs to calculate `value` if `value` is a function (has no effect otherwise). `value` is recalculated any time the inputs change. show_label: if True, will display label. diff --git a/gradio/analytics.py b/gradio/analytics.py index 79fb9c2d54ed1..0284085d5cf60 100644 --- a/gradio/analytics.py +++ b/gradio/analytics.py @@ -16,7 +16,6 @@ import gradio from gradio import wasm_utils -from gradio.context import Context from gradio.utils import core_gradio_components, get_package_version # For testability, we import the pyfetch function into this module scope and define a fallback coroutine object to be patched in tests. @@ -67,7 +66,6 @@ def _do_analytics_request(topic: str, data: dict[str, Any]) -> None: def _do_normal_analytics_request(topic: str, data: dict[str, Any]) -> None: - data["ip_address"] = get_local_ip_address() try: _send_telemetry_in_thread( topic=topic, @@ -80,8 +78,6 @@ def _do_normal_analytics_request(topic: str, data: dict[str, Any]) -> None: async def _do_wasm_analytics_request(url: str, data: dict[str, Any]) -> None: - data["ip_address"] = await get_local_ip_address_wasm() - # We use urllib.parse.urlencode to encode the data as a form. # Ref: https://docs.python.org/3/library/urllib.request.html#urllib-examples body = urllib.parse.urlencode(data).encode("ascii") @@ -116,53 +112,6 @@ def version_check(): pass -def get_local_ip_address() -> str: - """ - Gets the public IP address or returns the string "No internet connection" if unable - to obtain it or the string "Analytics disabled" if a user has disabled analytics. - Does not make a new request if the IP address has already been obtained in the - same Python session. - """ - if not analytics_enabled(): - return "Analytics disabled" - - if Context.ip_address is None: - try: - ip_address = httpx.get( - "https://checkip.amazonaws.com/", timeout=3 - ).text.strip() - except (httpx.ConnectError, httpx.ReadTimeout): - ip_address = "No internet connection" - Context.ip_address = ip_address - else: - ip_address = Context.ip_address - return ip_address - - -async def get_local_ip_address_wasm() -> str: - """The Wasm-compatible version of get_local_ip_address().""" - if not analytics_enabled(): - return "Analytics disabled" - - if Context.ip_address is None: - try: - response = await asyncio.wait_for( - pyodide_pyfetch( - # The API used by the normal version (`get_local_ip_address()`), `https://checkip.amazonaws.com/``, blocks CORS requests, so here we use a different API. - "https://api.ipify.org" - ), - timeout=5, - ) - response_text: str = await response.string() # type: ignore - ip_address = response_text.strip() - except (asyncio.TimeoutError, OSError): - ip_address = "No internet connection" - Context.ip_address = ip_address - else: - ip_address = Context.ip_address - return ip_address - - def initiated_analytics(data: dict[str, Any]) -> None: if not analytics_enabled(): return @@ -232,8 +181,6 @@ def get_inputs_outputs( "is_sagemaker": blocks.is_sagemaker, "using_auth": blocks.auth is not None, "dev_mode": blocks.dev_mode, - "show_api": blocks.show_api, - "show_error": blocks.show_error, "inputs": get_inputs_outputs( blocks.mode, blocks.input_components, inputs_telemetry ), diff --git a/gradio/blocks.py b/gradio/blocks.py index eaf4b38de5771..3b7c830ecb5b9 100644 --- a/gradio/blocks.py +++ b/gradio/blocks.py @@ -15,16 +15,13 @@ import warnings import webbrowser from collections import defaultdict +from collections.abc import AsyncIterator, Callable, Coroutine, Sequence, Set from pathlib import Path from types import ModuleType from typing import ( TYPE_CHECKING, - AbstractSet, Any, - AsyncIterator, - Callable, Literal, - Sequence, cast, ) from urllib.parse import urlparse, urlunparse @@ -69,11 +66,14 @@ EventListenerMethod, ) from gradio.exceptions import ( + ChecksumMismatchError, DuplicateBlockError, InvalidApiNameError, InvalidComponentError, ) from gradio.helpers import create_tracker, skip, special_args +from gradio.node_server import start_node_server +from gradio.route_utils import API_PREFIX, MediaStream from gradio.state_holder import SessionState, StateHolder from gradio.themes import Default as DefaultTheme from gradio.themes import ThemeClass as Theme @@ -89,6 +89,7 @@ check_function_inputs_match, component_or_layout_class, get_cancelled_fn_indices, + get_node_path, get_package_version, get_upload_folder, ) @@ -100,8 +101,6 @@ if TYPE_CHECKING: # Only import for type checking (is False at runtime). - from fastapi.applications import FastAPI - from gradio.components.base import Component from gradio.renderable import Renderable @@ -113,6 +112,9 @@ themes.Monochrome(), themes.Soft(), themes.Glass(), + themes.Origin(), + themes.Citrus(), + themes.Ocean(), ] } @@ -148,6 +150,7 @@ def __init__( # Keep tracks of files that should not be deleted when the delete_cache parmameter is set # These files are the default value of the component and files that are used in examples self.keep_in_cache = set() + self.has_launched = False if render: self.render() @@ -261,8 +264,9 @@ def get_config(self): config = {**config, "proxy_url": self.proxy_url, "name": self.get_block_class()} if self.rendered_in is not None: config["rendered_in"] = self.rendered_in._id - if (_selectable := getattr(self, "_selectable", None)) is not None: - config["_selectable"] = _selectable + for event_attribute in ["_selectable", "_undoable", "_retryable", "likeable"]: + if (attributable := getattr(self, event_attribute, None)) is not None: + config[event_attribute] = attributable return config @classmethod @@ -296,7 +300,7 @@ async def async_move_resource_to_block_cache( url_or_file_path = str(url_or_file_path) if client_utils.is_http_url_like(url_or_file_path): - temp_file_path = await processing_utils.async_save_url_to_cache( + temp_file_path = await processing_utils.async_ssrf_protected_download( url_or_file_path, cache_dir=self.GRADIO_CACHE ) @@ -382,7 +386,7 @@ def serve_static_file( if client_utils.is_http_url_like(url_or_file_path): return FileData(path=url_or_file_path, url=url_or_file_path).model_dump() else: - data = {"path": url_or_file_path} + data = {"path": url_or_file_path, "meta": {"_type": "gradio.FileData"}} try: return client_utils.synchronize_async( processing_utils.async_move_files_to_cache, data, self @@ -514,6 +518,11 @@ def __init__( renderable: Renderable | None = None, rendered_in: Renderable | None = None, is_cancel_function: bool = False, + connection: Literal["stream", "sse"] = "sse", + time_limit: float | None = None, + stream_every: float = 0.5, + like_user_message: bool = False, + event_specific_args: list[str] | None = None, ): self.fn = fn self._id = _id @@ -552,6 +561,11 @@ def __init__( # We need to keep track of which events are cancel events # so that the client can call the /cancel route directly self.is_cancel_function = is_cancel_function + self.time_limit = time_limit + self.stream_every = stream_every + self.connection = connection + self.like_user_message = like_user_message + self.event_specific_args = event_specific_args self.spaces_auto_wrap() @@ -600,6 +614,11 @@ def get_config(self): "show_api": self.show_api, "zerogpu": self.zero_gpu, "rendered_in": self.rendered_in._id if self.rendered_in else None, + "connection": self.connection, + "time_limit": self.time_limit, + "stream_every": self.stream_every, + "like_user_message": self.like_user_message, + "event_specific_args": self.event_specific_args, } @@ -671,14 +690,14 @@ def set_event_trigger( Component | BlockContext | Sequence[Component | BlockContext] - | AbstractSet[Component | BlockContext] + | Set[Component | BlockContext] | None ), outputs: ( Component | BlockContext | Sequence[Component | BlockContext] - | AbstractSet[Component | BlockContext] + | Set[Component | BlockContext] | None ), preprocess: bool = True, @@ -701,6 +720,11 @@ def set_event_trigger( show_api: bool = True, renderable: Renderable | None = None, is_cancel_function: bool = False, + connection: Literal["stream", "sse"] = "sse", + time_limit: float | None = None, + stream_every: float = 0.5, + like_user_message: bool = False, + event_specific_args: list[str] | None = None, ) -> tuple[BlockFunction, int]: """ Adds an event to the component's dependencies. @@ -728,6 +752,9 @@ def set_event_trigger( concurrency_id: If set, this is the id of the concurrency group. Events with the same concurrency_id will be limited by the lowest set concurrency_limit. show_api: whether to show this event in the "view API" page of the Gradio app, or in the ".view_api()" method of the Gradio clients. Unlike setting api_name to False, setting show_api to False will still allow downstream apps as well as the Clients to use this event. If fn is None, show_api will automatically be set to False. is_cancel_function: whether this event cancels another running event. + connection: The connection format, either "sse" or "stream". + time_limit: The time limit for the function to run. Parameter only used for the `.stream()` event. + stream_every: The latency (in seconds) at which stream chunks are sent to the backend. Defaults to 0.5 seconds. Parameter only used for the `.stream()` event. Returns: dependency information, dependency index """ # Support for singular parameter @@ -738,7 +765,7 @@ def set_event_trigger( ) for target in targets ] - if isinstance(inputs, AbstractSet): + if isinstance(inputs, Set): inputs_as_dict = True inputs = sorted(inputs, key=lambda x: x._id) else: @@ -748,7 +775,7 @@ def set_event_trigger( elif not isinstance(inputs, Sequence): inputs = [inputs] - if isinstance(outputs, AbstractSet): + if isinstance(outputs, Set): outputs = sorted(outputs, key=lambda x: x._id) elif outputs is None: outputs = [] @@ -760,6 +787,8 @@ def set_event_trigger( if _targets[0][1] in ["change", "key_up"] and trigger_mode is None: trigger_mode = "always_last" + elif _targets[0][1] in ["stream"] and trigger_mode is None: + trigger_mode = "multiple" elif trigger_mode is None: trigger_mode = "once" elif trigger_mode not in ["once", "multiple", "always_last"]: @@ -839,6 +868,11 @@ def set_event_trigger( renderable=renderable, rendered_in=rendered_in, is_cancel_function=is_cancel_function, + connection=connection, + time_limit=time_limit, + stream_every=stream_every, + like_user_message=like_user_message, + event_specific_args=event_specific_args, ) self.fns[self.fn_id] = block_fn @@ -885,9 +919,14 @@ def get_layout(block: Block): block_config["renderable"] = renderable._id if not block.skip_api: block_config["api_info"] = block.api_info() # type: ignore - # .example_inputs() has been renamed .example_payload() but - # we use the old name for backwards compatibility with custom components - # created on Gradio 4.20.0 or earlier + if hasattr(block, "api_info_as_input"): + block_config["api_info_as_input"] = block.api_info_as_input() # type: ignore + else: + block_config["api_info_as_input"] = block.api_info() # type: ignore + if hasattr(block, "api_info_as_output"): + block_config["api_info_as_output"] = block.api_info_as_output() # type: ignore + else: + block_config["api_info_as_output"] = block.api_info() # type: ignore block_config["example_inputs"] = block.example_inputs() # type: ignore config["components"].append(block_config) @@ -949,8 +988,10 @@ def __init__( mode: str = "blocks", title: str = "Gradio", css: str | None = None, + css_paths: str | Path | Sequence[str | Path] | None = None, js: str | None = None, head: str | None = None, + head_paths: str | Path | Sequence[str | Path] | None = None, fill_height: bool = False, fill_width: bool = False, delete_cache: tuple[int, int] | None = None, @@ -962,9 +1003,11 @@ def __init__( analytics_enabled: Whether to allow basic telemetry. If None, will use GRADIO_ANALYTICS_ENABLED environment variable or default to True. mode: A human-friendly name for the kind of Blocks or Interface being created. Used internally for analytics. title: The tab title to display when this is opened in a browser window. - css: Custom css as a string or path to a css file. This css will be included in the demo webpage. - js: Custom js as a string or path to a js file. The custom js should be in the form of a single js function. This function will automatically be executed when the page loads. For more flexibility, use the head parameter to insert js inside diff --git a/js/_website/src/lib/components/DemosLite.svelte b/js/_website/src/lib/components/DemosLite.svelte index 9d411d33388a0..652fd1c84ce17 100644 --- a/js/_website/src/lib/components/DemosLite.svelte +++ b/js/_website/src/lib/components/DemosLite.svelte @@ -1,15 +1,176 @@ + -
- +
- {#each demos as demo, i} + {#if selected_demo} + {/if}
-
+
@@ -297,30 +737,32 @@ width: 0; } - :global(div.lite-demo div.gradio-container) { + :global(#lite-demo div.gradio-container) { height: 100%; overflow-y: scroll; margin: 0 !important; } - .code-editor :global(label) { - display: none; + .code-editor :global(.cm-scroller) { + height: 100% !important; + min-height: none !important; + max-height: none !important; } - .code-editor :global(.codemirror-wrappper) { - border-radius: var(--block-radius); + #lite-demo { + overflow: scroll; } - .code-editor :global(> .block) { + #lite-demo :global(.embed-container) { border: none !important; } - .code-editor :global(.cm-scroller) { - height: 100% !important; + :global(div.editor-tabitem) { + padding: 0; + height: 100%; } - - .lite-demo :global(.embed-container) { - border: none !important; + :global(div.editor-tabitem > div) { + height: 100%; } .fullscreen { @@ -340,4 +782,123 @@ width: 100% !important; } } + + .search-bar { + @apply font-sans z-10 px-4 relative flex flex-none items-center border-b text-gray-500; + border-color: #e5e7eb; + } + + .search-bar input { + @apply appearance-none h-14 text-black mx-1 flex-auto min-w-0 border-none cursor-text; + outline: none; + box-shadow: none; + font-size: 1rem; + } + + .loader { + border: 1px solid #fcc089; + border-top: 2px solid #ff7c00; + border-radius: 50%; + width: 15px; + height: 15px; + animation: spin 1.2s linear infinite; + } + + @keyframes spin { + 0% { + transform: rotate(0deg); + } + 100% { + transform: rotate(360deg); + } + } + + .grayed { + color: #6b7280 !important; + } + + .clear { + display: flex; + align-items: center; + color: #999b9e; + font-size: 11px; + } + + .button { + display: flex; + align-items: center; + font-weight: 600; + padding-left: 0.3rem; + padding-right: 0.3rem; + border-radius: 0.375rem; + float: right; + margin: 0.25rem; + border: 1px solid #e5e7eb; + background: linear-gradient(to bottom right, #f3f4f6, #e5e7eb); + color: #374151; + cursor: pointer; + font-family: sans-serif; + } + + .share-button { + display: flex; + align-items: center; + font-weight: 500; + padding-left: 0.5rem; + padding-right: 0.5rem; + padding-top: 0.1rem; + padding-bottom: 0.1rem; + border-radius: 0.375rem; + float: right; + margin: 0.25rem; + border: 1px solid #e5e7eb; + background: linear-gradient(to bottom right, #f9fafb, #e5e7eb); + color: #374151; + cursor: pointer; + font-family: sans-serif; + font-size: 14px; + } + .share-button:hover { + background: linear-gradient(to bottom right, #f9fafb, #d7dadf); + } + + :global(.highlight) { + background: #e1f7e161; + } + + .share-btns { + top: -6%; + right: 0.4%; + } + + @media (min-height: 800px) { + .share-btns { + top: -5%; + } + } + + .code-scroll { + overflow: auto; + } + + /* For Webkit browsers (Chrome, Safari, etc.) */ + .code-scroll::-webkit-scrollbar { + width: 10px; /* width of the entire scrollbar */ + } + + .code-scroll::-webkit-scrollbar-track { + background: transparent; /* color of the tracking area */ + } + + .code-scroll::-webkit-scrollbar-thumb { + background-color: #888; /* color of the scroll thumb */ + border-radius: 20px; /* roundness of the scroll thumb */ + border: 3px solid white; /* creates padding around scroll thumb */ + } + + /* For Firefox */ + .code-scroll { + scrollbar-width: thin; + scrollbar-color: #888 transparent; + } diff --git a/js/_website/src/lib/components/VersionDropdown.svelte b/js/_website/src/lib/components/VersionDropdown.svelte index da46773ef60b8..1a7a96ba9bc1f 100644 --- a/js/_website/src/lib/components/VersionDropdown.svelte +++ b/js/_website/src/lib/components/VersionDropdown.svelte @@ -57,8 +57,8 @@ diff --git a/js/_website/src/lib/templates/gradio/01_building-demos/02_chatinterface.svx b/js/_website/src/lib/templates/gradio/01_building-demos/02_chatinterface.svx index 36f97abc9b1ed..318d6c78fd060 100644 --- a/js/_website/src/lib/templates/gradio/01_building-demos/02_chatinterface.svx +++ b/js/_website/src/lib/templates/gradio/01_building-demos/02_chatinterface.svx @@ -17,7 +17,7 @@ ```python -gradio.ChatInterface(fn, ···) +gradio.ChatInterface(fn, type="messages", ···) ``` @@ -36,7 +36,7 @@ import gradio as gr def echo(message, history): return message -demo = gr.ChatInterface(fn=echo, examples=["hello", "hola", "merhaba"], title="Echo Bot") +demo = gr.ChatInterface(fn=echo, type="messages", examples=["hello", "hola", "merhaba"], title="Echo Bot") demo.launch() ``` @@ -57,7 +57,7 @@ def vote(data: gr.LikeData): with gr.Blocks() as demo: chatbot = gr.Chatbot(placeholder="Your Personal Yes-Man
Ask Me Anything") chatbot.like(vote, None, None) - gr.ChatInterface(fn=yes, chatbot=chatbot) + gr.ChatInterface(fn=yes, type="messages", chatbot=chatbot) demo.launch() ``` diff --git a/js/_website/src/lib/templates/gradio/03_components/chatbot.svx b/js/_website/src/lib/templates/gradio/03_components/chatbot.svx index ce1d04308f13f..3cd1587b0e88c 100644 --- a/js/_website/src/lib/templates/gradio/03_components/chatbot.svx +++ b/js/_website/src/lib/templates/gradio/03_components/chatbot.svx @@ -10,6 +10,13 @@ import { style_formatted_text } from "$lib/text"; let obj = get_object("chatbot"); + + let embedded_demo_obj = `[ + {"role": "user", "content": "Hello World"}, + {"role": "assistant", "content": "Hey Gradio!"}, + {"role": "user", "content": "❤️"}, + {"role": "assistant", "content": "😍"} + ]` @@ -17,7 +24,7 @@ ```python -gradio.Chatbot(···) +gradio.Chatbot(type="messages", ···) ``` @@ -25,7 +32,8 @@ gradio.Chatbot(···) import gradio as gr with gr.Blocks() as demo: - gr.Chatbot(value=[["Hello World","Hey Gradio!"],["❤️","😍"],["🔥","🤗"]]) + gr.Chatbot(value={embedded_demo_obj}, + type="messages") demo.launch()
@@ -38,145 +46,105 @@ demo.launch() ### Behavior The data format accepted by the Chatbot is dictated by the `type` parameter. -This parameter can take two values, `'tuples'` and `'messages'`. - - -If `type` is `'tuples'`, then the data sent to/from the chatbot will be a list of tuples. -The first element of each tuple is the user message and the second element is the bot's response. -Each element can be a string (markdown/html is supported), -a tuple (in which case the first element is a filepath that will be displayed in the chatbot), -or a gradio component (see the Examples section for more details). +This parameter can take two values, `'tuples'` and `'messages'`. +The `'tuples'` type is deprecated and will be removed in a future version of Gradio. +### Message format If the `type` is `'messages'`, then the data sent to/from the chatbot will be a list of dictionaries with `role` and `content` keys. This format is compliant with the format expected by most LLM APIs (HuggingChat, OpenAI, Claude). -The `role` key is either `'user'` or `'`assistant'` and the `content` key can be a string (markdown/html supported), -a `FileDataDict` (to represent a file that is displayed in the chatbot - documented below), or a gradio component. +The `role` key is either `'user'` or `'assistant'` and the `content` key can be one of the following: +1. A string (markdown/html is also supported). +2. A dictionary with `path` and `alt_text` keys. In this case, the file at `path` will be displayed in the chat history. Image, audio, and video files are fully embedded and visualized in the chat bubble. +The `path` key can point to a valid publicly available URL. The `alt_text` key is optional but it's good practice to provide [alt text](https://en.wikipedia.org/wiki/Alt_attribute). +3. An instance of another Gradio component. -For convenience, you can use the `ChatMessage` dataclass so that your text editor can give you autocomplete hints and typechecks. +
+We will show examples for all three cases below - ```python -from gradio import ChatMessage - def generate_response(history): + # A plain text response history.append( - ChatMessage(role="assistant", - content="How can I help you?") - ) + {"role": "assistant", content="I am happy to provide you that report and plot."} + ) + # Embed the quaterly sales report in the chat + history.append( + {"role": "assistant", content={"path": "quaterly_sales.txt", "alt_text": "Sales Report for Q2 2024"}} + ) + # Make a plot of sales data + history.append( + {"role": "assistant", content=gr.Plot(value=make_plot_from_file('quaterly_sales.txt'))} + ) return history ``` -Additionally, when `type` is `messages`, you can provide additional metadata regarding any tools used to generate the response. -This is useful for displaying the thought process of LLM agents. For example, +For convenience, you can use the `ChatMessage` dataclass so that your text editor can give you autocomplete hints and typechecks. ```python +from gradio import ChatMessage + def generate_response(history): history.append( ChatMessage(role="assistant", - content="The weather API says it is 20 degrees Celcius in New York.", - metadata={"title": "🛠️ Used tool Weather API"}) + content="How can I help you?") ) return history ``` -Would be displayed as following: - -Gradio chatbot tool display - - -All of the types expected by the messages format are documented below: - -```python -class MetadataDict(TypedDict): - title: Union[str, None] - -class FileDataDict(TypedDict): - path: str # server filepath - url: NotRequired[Optional[str]] # normalised server url - size: NotRequired[Optional[int]] # size in bytes - orig_name: NotRequired[Optional[str]] # original filename - mime_type: NotRequired[Optional[str]] - is_stream: NotRequired[bool] - meta: dict[Literal["_type"], Literal["gradio.FileData"]] - +### Tuples format -class MessageDict(TypedDict): - content: str | FileDataDict | Component - role: Literal["user", "assistant", "system"] - metadata: NotRequired[MetadataDict] - - -@dataclass -class Metadata: - title: Optional[str] = None - - -@dataclass -class ChatMessage: - role: Literal["user", "assistant", "system"] - content: str | FileData | Component | FileDataDict | tuple | list - metadata: MetadataDict | Metadata = field(default_factory=Metadata) -``` +If `type` is `'tuples'`, then the data sent to/from the chatbot will be a list of tuples. +The first element of each tuple is the user message and the second element is the bot's response. +Each element can be a string (markdown/html is supported), +a tuple (in which case the first element is a filepath that will be displayed in the chatbot), +or a gradio component (see the Examples section for more details). + +### Initialization + -## **As input component**: {@html style_formatted_text(obj.preprocess.return_doc.doc)} -##### Your function should accept one of these types: -If `type` is `tuples` - +{#if obj.string_shortcuts && obj.string_shortcuts.length > 0} + +### Shortcuts + +{/if} -```python -from gradio import Component +### Examples -def predict( - value: list[list[str | tuple[str, str] | Component | None]] | None -): - ... -``` +** Displaying Thoughts/Tool Usage ** -If `type` is `messages` - +When `type` is `messages`, you can provide additional metadata regarding any tools used to generate the response. +This is useful for displaying the thought process of LLM agents. For example, ```python -from gradio import MessageDict - -def predict(value: list[MessageDict] | None): - ... +def generate_response(history): + history.append( + ChatMessage(role="assistant", + content="The weather API says it is 20 degrees Celcius in New York.", + metadata={"title": "🛠️ Used tool Weather API"}) + ) + return history ``` -
- -## **As output component**: {@html style_formatted_text(obj.postprocess.parameter_doc[0].doc)} -##### Your function should return one of these types: - -If `type` is `tuples` - -```python -def predict(···) -> list[list[str | tuple[str] | tuple[str, str] | None] | tuple] | None - ... - return value -``` +Would be displayed as following: -If `type` is `messages` - +Gradio chatbot tool display -from gradio import ChatMessage, MessageDict +You can also specify metadata with a plain python dictionary, ```python -def predict(···) - > list[MessageDict] | list[ChatMessage]: - ... +def generate_response(history): + history.append( + dict(role="assistant", + content="The weather API says it is 20 degrees Celcius in New York.", + metadata={"title": "🛠️ Used tool Weather API"}) + ) + return history ``` - -### Initialization - - - -{#if obj.string_shortcuts && obj.string_shortcuts.length > 0} - -### Shortcuts - -{/if} - -### Examples - **Using Gradio Components Inside `gr.Chatbot`** The `Chatbot` component supports using many of the core Gradio components (such as `gr.Image`, `gr.Plot`, `gr.Audio`, and `gr.HTML`) inside of the chatbot. Simply include one of these components in your list of tuples. Here's an example: diff --git a/js/_website/src/lib/templates/gradio/03_components/logoutbutton.svx b/js/_website/src/lib/templates/gradio/03_components/logoutbutton.svx deleted file mode 100644 index 155e0cbfc21af..0000000000000 --- a/js/_website/src/lib/templates/gradio/03_components/logoutbutton.svx +++ /dev/null @@ -1,78 +0,0 @@ - - - - -# {obj.name} - - -```python -gradio.LogoutButton(···) -``` - - -### Description -## {@html style_formatted_text(obj.description)} - - -### Behavior -## **As input component**: {@html style_formatted_text(obj.preprocess.return_doc.doc)} -##### Your function should accept one of these types: - -```python -def predict( - value: str | None -) - ... -``` - -
- -## **As output component**: {@html style_formatted_text(obj.postprocess.parameter_doc[0].doc)} -##### Your function should return one of these types: - -```python -def predict(···) -> str | None - ... - return value -``` - - - -### Initialization - - - -{#if obj.string_shortcuts && obj.string_shortcuts.length > 0} - -### Shortcuts - -{/if} - -{#if obj.demos && obj.demos.length > 0} - -### Demos - -{/if} - -{#if obj.fns && obj.fns.length > 0} - -### Event Listeners - -{/if} - -{#if obj.guides && obj.guides.length > 0} - -### Guides - -{/if} diff --git a/js/_website/src/lib/templates/gradio/04_helpers/make_waveform.svx b/js/_website/src/lib/templates/gradio/04_helpers/make_waveform.svx deleted file mode 100644 index 7dd94e10fa41b..0000000000000 --- a/js/_website/src/lib/templates/gradio/04_helpers/make_waveform.svx +++ /dev/null @@ -1,57 +0,0 @@ - - - - -# {obj.name} - - -```python -gradio.make_waveform(···) -``` - - -### Description -## {@html style_formatted_text(obj.description)} - - - -{#if obj.example} -### Example Usage -```python -None -``` -{/if} - - -### Initialization - - - -{#if obj.demos && obj.demos.length > 0} - -### Demos - -{/if} - -{#if obj.fns && obj.fns.length > 0} - -### Methods - -{/if} - -{#if obj.guides && obj.guides.length > 0} - -### Guides - -{/if} diff --git a/js/_website/src/lib/templates/gradio/other/01_flagging.svx b/js/_website/src/lib/templates/gradio/other/01_flagging.svx index d7773b95105d1..e919a34976940 100644 --- a/js/_website/src/lib/templates/gradio/other/01_flagging.svx +++ b/js/_website/src/lib/templates/gradio/other/01_flagging.svx @@ -11,7 +11,6 @@ let simple_csv_logger_obj = get_object("simplecsvlogger"); let csv_logger_obj = get_object("csvlogger"); - let hf_dataset_saver_obj = get_object("huggingfacedatasetsaver"); @@ -128,58 +127,3 @@ demo = gr.Interface(fn=image_classifier, inputs="image", outputs="label", {/if} - - - - -# {hf_dataset_saver_obj.name} - - - -```python -gradio.HuggingFaceDatasetSaver(hf_token, dataset_name, ···) -``` - - -### Description -## {@html style_formatted_text(hf_dataset_saver_obj.description)} - - - -{#if hf_dataset_saver_obj.example} -### Example Usage -```python -import gradio as gr -hf_writer = gr.HuggingFaceDatasetSaver(HF_API_TOKEN, "image-classification-mistakes") -def image_classifier(inp): - return {'cat': 0.3, 'dog': 0.7} -demo = gr.Interface(fn=image_classifier, inputs="image", outputs="label", - allow_flagging="manual", flagging_callback=hf_writer) -``` -{/if} - -{#if (hf_dataset_saver_obj.parameters.length > 0 && hf_dataset_saver_obj.parameters[0].name != "self") || hf_dataset_saver_obj.parameters.length > 1} - -### Initialization - -{/if} - -{#if hf_dataset_saver_obj.demos && hf_dataset_saver_obj.demos.length > 0} - -### Demos - -{/if} - -{#if hf_dataset_saver_obj.fns && hf_dataset_saver_obj.fns.length > 0} - -### Methods - -{/if} - -{#if hf_dataset_saver_obj.guides && hf_dataset_saver_obj.guides.length > 0} - -### Guides - -{/if} - - diff --git a/js/_website/src/lib/templates/third-party-clients/third-party-clients/01_introduction.svx b/js/_website/src/lib/templates/third-party-clients/third-party-clients/01_introduction.svx index 396af07a87ccd..73a0c96b19d63 100644 --- a/js/_website/src/lib/templates/third-party-clients/third-party-clients/01_introduction.svx +++ b/js/_website/src/lib/templates/third-party-clients/third-party-clients/01_introduction.svx @@ -14,4 +14,5 @@ ### Community Clients ## We also encourage the development and use of third party clients built by the community: -- [Rust Client](/docs/third-party-clients/rust-client): `gradio-rs` built by [@JacobLinCool](https://github.com/JacobLinCool) allows you to make requests in Rust. \ No newline at end of file +- [Rust Client](/docs/third-party-clients/rust-client): `gradio-rs` built by [@JacobLinCool](https://github.com/JacobLinCool) allows you to make requests in Rust. +- [Powershell Client](https://github.com/rrg92/powershai): `powershai` built by [@rrg92](https://github.com/rrg92) allows you to make requests to Gradio apps directly from Powershell. See [here for documentation](https://github.com/rrg92/powershai/blob/main/docs/en-US/providers/HUGGING-FACE.md) \ No newline at end of file diff --git a/js/_website/src/routes/+page.svelte b/js/_website/src/routes/+page.svelte index 408521931a912..526034365205b 100644 --- a/js/_website/src/routes/+page.svelte +++ b/js/_website/src/routes/+page.svelte @@ -30,7 +30,10 @@ diff --git a/js/_website/src/routes/[[version]]/docs/gradio/[doc]/+page.svelte b/js/_website/src/routes/[[version]]/docs/gradio/[doc]/+page.svelte index f7fbeba6d1682..116112c7f8c0f 100644 --- a/js/_website/src/routes/[[version]]/docs/gradio/[doc]/+page.svelte +++ b/js/_website/src/routes/[[version]]/docs/gradio/[doc]/+page.svelte @@ -117,8 +117,6 @@ canonical={$page.url.pathname} description={"Gradio docs for using " + all_headers.page_title.title} /> - -
@@ -257,7 +255,7 @@ {all_headers.page_title.title} {#if all_headers.headers && all_headers.headers.length > 0} -
- handle_like(i, messages[0], selected)} - {likeable} - {show_copy_button} - message={msg_format === "tuples" ? messages[0] : messages} - position={role === "user" ? "right" : "left"} - avatar={avatar_img} + handle_like(i, messages[0], selected)} + {scroll} /> {/each} {#if pending_message} {/if} - {:else if placeholder !== null} -
- -
- {/if} -
+
+ {:else} +
+ {#if placeholder !== null} +
+ +
+ {/if} + {#if examples !== null} +
+ {#each examples as example, i} + + {/each} +
+ {/if} +
+ {/if}
+{#if show_scroll_button} +
+ +
+{/if} + diff --git a/js/chatbot/shared/Copy.svelte b/js/chatbot/shared/Copy.svelte index 3f21b209d588a..47e15c16b0297 100644 --- a/js/chatbot/shared/Copy.svelte +++ b/js/chatbot/shared/Copy.svelte @@ -1,6 +1,7 @@ - - - + label={copied ? "Copied message" : "Copy message"} + Icon={copied ? Check : Copy} +/> diff --git a/js/chatbot/shared/CopyAll.svelte b/js/chatbot/shared/CopyAll.svelte index fcca6f03d375e..aa05eecb0561d 100644 --- a/js/chatbot/shared/CopyAll.svelte +++ b/js/chatbot/shared/CopyAll.svelte @@ -2,6 +2,7 @@ import { onDestroy } from "svelte"; import { Copy, Check } from "@gradio/icons"; import type { NormalisedMessage } from "../types"; + import { IconButton } from "@gradio/atoms"; let copied = false; export let value: NormalisedMessage[] | null; @@ -45,40 +46,8 @@ }); - - - + label={copied ? "Copied conversation" : "Copy conversation"} +> diff --git a/js/chatbot/shared/LikeDislike.svelte b/js/chatbot/shared/LikeDislike.svelte index 95f34ea2a9a93..71fbdab2ae150 100644 --- a/js/chatbot/shared/LikeDislike.svelte +++ b/js/chatbot/shared/LikeDislike.svelte @@ -1,67 +1,35 @@ - +/> - - - +/> diff --git a/js/chatbot/shared/Message.svelte b/js/chatbot/shared/Message.svelte new file mode 100644 index 0000000000000..62c80457d455f --- /dev/null +++ b/js/chatbot/shared/Message.svelte @@ -0,0 +1,592 @@ + + +
+ {#if avatar_img !== null} +
+ {role} avatar +
+ {/if} +
+ {#each messages as message, thought_index} +
0} + > + +
+ + {#if layout === "panel"} + + {/if} + {/each} +
+
+ +{#if layout === "bubble"} + +{/if} + + diff --git a/js/chatbot/shared/MessageBox.svelte b/js/chatbot/shared/MessageBox.svelte index 60da1464ea150..5affb8369c008 100644 --- a/js/chatbot/shared/MessageBox.svelte +++ b/js/chatbot/shared/MessageBox.svelte @@ -1,5 +1,5 @@ gradio.dispatch("clear_status", loading_status)} /> - + {#if show_label} + + {/if} {#if !value && !interactive} @@ -85,7 +88,9 @@ bind:value {language} {lines} + {max_lines} {dark_mode} + {wrap_lines} readonly={!interactive} on:blur={() => gradio.dispatch("blur")} on:focus={() => gradio.dispatch("focus")} diff --git a/js/code/package.json b/js/code/package.json index 25d2c22185521..11c3045644c34 100644 --- a/js/code/package.json +++ b/js/code/package.json @@ -1,6 +1,6 @@ { "name": "@gradio/code", - "version": "0.9.1", + "version": "0.10.0-beta.8", "description": "Gradio UI packages", "type": "module", "author": "", diff --git a/js/code/shared/Code.svelte b/js/code/shared/Code.svelte index b5da7900bac7a..9f0c27efacbf9 100644 --- a/js/code/shared/Code.svelte +++ b/js/code/shared/Code.svelte @@ -20,10 +20,12 @@ export let basic = true; export let language: string; export let lines = 5; + export let max_lines: number | null = null; export let extensions: Extension[] = []; export let use_tab = true; export let readonly = false; export let placeholder: string | HTMLElement | null | undefined = undefined; + export let wrap_lines = false; const dispatch = createEventDispatcher<{ change: string; @@ -59,7 +61,7 @@ function update_lines(): void { if (view) { - view.requestMeasure({ read: update_gutters }); + view.requestMeasure({ read: resize }); } } @@ -96,18 +98,20 @@ return null; } - function update_gutters(_view: EditorView): any { - let gutters = _view.dom.querySelectorAll(".cm-gutter"); - let _lines = lines + 1; - let lineHeight = getGutterLineHeight(_view); - if (!lineHeight) { + function resize(_view: EditorView): any { + let scroller = _view.dom.querySelector(".cm-scroller"); + if (!scroller) { return null; } - for (var i = 0; i < gutters.length; i++) { - let node = gutters[i]; - node.style.minHeight = `calc(${lineHeight} * ${_lines})`; + const lineHeight = getGutterLineHeight(_view); + if (!lineHeight) { + return null; } - return null; + + const minLines = lines == 1 ? 1 : lines + 1; + scroller.style.minHeight = `calc(${lineHeight} * ${minLines})`; + if (max_lines) + scroller.style.maxHeight = `calc(${lineHeight} * ${max_lines + 1})`; } function handle_change(vu: ViewUpdate): void { @@ -117,7 +121,7 @@ value = text; dispatch("change", text); } - view.requestMeasure({ read: update_gutters }); + view.requestMeasure({ read: resize }); } function get_extensions(): Extension[] { @@ -148,6 +152,9 @@ fontFamily: "var(--font-mono)", minHeight: "100%" }, + ".cm-gutterElement": { + marginRight: "var(--spacing-xs)" + }, ".cm-gutters": { marginRight: "1px", borderRight: "1px solid var(--border-color-primary)", @@ -199,6 +206,9 @@ } extensions.push(EditorView.updateListener.of(handle_change)); + if (wrap_lines) { + extensions.push(EditorView.lineWrapping); + } return extensions; } @@ -233,13 +243,13 @@ .wrap { display: flex; flex-direction: column; - flex-flow: column; + flex-grow: 1; margin: 0; padding: 0; height: 100%; } .codemirror-wrapper { - height: 100%; + flex-grow: 1; overflow: auto; } diff --git a/js/code/shared/Copy.svelte b/js/code/shared/Copy.svelte index e8909fc1c55f9..c1979aa266216 100644 --- a/js/code/shared/Copy.svelte +++ b/js/code/shared/Copy.svelte @@ -1,7 +1,7 @@ - - - + diff --git a/js/code/shared/Download.svelte b/js/code/shared/Download.svelte index aefdd499e57bb..f103f271ecad7 100644 --- a/js/code/shared/Download.svelte +++ b/js/code/shared/Download.svelte @@ -1,8 +1,8 @@ -
- - - {#if copied} - - {/if} - -
- - + + + diff --git a/js/code/shared/Widgets.svelte b/js/code/shared/Widgets.svelte index 584c1944077ad..014676455e75e 100644 --- a/js/code/shared/Widgets.svelte +++ b/js/code/shared/Widgets.svelte @@ -1,35 +1,13 @@ -
+ -
- - + diff --git a/js/code/shared/language.ts b/js/code/shared/language.ts index a4492bfd815c7..499bc2456e198 100644 --- a/js/code/shared/language.ts +++ b/js/code/shared/language.ts @@ -12,6 +12,7 @@ const possible_langs = [ "html", "css", "javascript", + "jinja2", "typescript", "yaml", "dockerfile", @@ -58,6 +59,10 @@ const lang_map: Record Promise) | undefined> = { css: () => import("@codemirror/lang-css").then((m) => m.css()), javascript: () => import("@codemirror/lang-javascript").then((m) => m.javascript()), + jinja2: () => + import("@codemirror/legacy-modes/mode/jinja2").then((m) => + StreamLanguage.define(m.jinja2) + ), typescript: () => import("@codemirror/lang-javascript").then((m) => m.javascript({ typescript: true }) diff --git a/js/colorpicker/CHANGELOG.md b/js/colorpicker/CHANGELOG.md index 46af56ff3745b..eff82c5ac6621 100644 --- a/js/colorpicker/CHANGELOG.md +++ b/js/colorpicker/CHANGELOG.md @@ -1,5 +1,54 @@ # @gradio/colorpicker +## 0.4.0-beta.5 + +### Dependency updates + +- @gradio/statustracker@0.8.0-beta.5 +- @gradio/icons@0.8.0-beta.4 +- @gradio/atoms@0.9.0-beta.5 + +## 0.4.0-beta.4 + +### Features + +- [#9570](https://github.com/gradio-app/gradio/pull/9570) [`e0ee3d5`](https://github.com/gradio-app/gradio/commit/e0ee3d5bb1020744cefa1abf66009fbf07da8cbe) - Update gr.ColorPicker UI. Thanks @hannahblair! +- [#9521](https://github.com/gradio-app/gradio/pull/9521) [`06ef22e`](https://github.com/gradio-app/gradio/commit/06ef22e83cdd27e7afb381396d153d9db3dea16e) - Allow `info=` to render markdown. Thanks @dawoodkhan82! + +### Dependency updates + +- @gradio/statustracker@0.8.0-beta.4 +- @gradio/atoms@0.9.0-beta.4 + +## 0.3.13-beta.3 + +### Dependency updates + +- @gradio/statustracker@0.8.0-beta.3 +- @gradio/atoms@0.9.0-beta.3 + +## 0.3.13-beta.2 + +### Dependency updates + +- @gradio/statustracker@0.8.0-beta.2 + +## 0.3.13-beta.2 + +### Dependency updates + +- @gradio/atoms@0.9.0-beta.2 +- @gradio/statustracker@0.8.0-beta.2 +- @gradio/utils@0.7.0-beta.2 + +## 0.3.13-beta.1 + +### Dependency updates + +- @gradio/atoms@0.8.1-beta.1 +- @gradio/statustracker@0.8.0-beta.1 +- @gradio/utils@0.7.0-beta.1 + ## 0.3.13 ### Fixes diff --git a/js/colorpicker/ColorPicker.stories.svelte b/js/colorpicker/ColorPicker.stories.svelte index c54482bb62ba5..d15f47fb659df 100644 --- a/js/colorpicker/ColorPicker.stories.svelte +++ b/js/colorpicker/ColorPicker.stories.svelte @@ -6,10 +6,10 @@ - + ; export let interactive: boolean; + export let disabled = false; @@ -47,10 +48,11 @@ gradio.dispatch("change")} on:input={() => gradio.dispatch("input")} on:submit={() => gradio.dispatch("submit")} diff --git a/js/colorpicker/package.json b/js/colorpicker/package.json index e00cf2513ce27..3c8bb90447fc9 100644 --- a/js/colorpicker/package.json +++ b/js/colorpicker/package.json @@ -1,6 +1,6 @@ { "name": "@gradio/colorpicker", - "version": "0.3.13", + "version": "0.4.0-beta.5", "description": "Gradio UI packages", "type": "module", "author": "", @@ -24,7 +24,10 @@ "dependencies": { "@gradio/atoms": "workspace:^", "@gradio/statustracker": "workspace:^", - "@gradio/utils": "workspace:^" + "@gradio/utils": "workspace:^", + "@gradio/icons": "workspace:^", + "tinycolor2": "^1.6.0", + "@types/tinycolor2": "^1.4.6" }, "devDependencies": { "@gradio/preview": "workspace:^" diff --git a/js/colorpicker/shared/Colorpicker.svelte b/js/colorpicker/shared/Colorpicker.svelte index 7153989cf607c..5438201d9a522 100644 --- a/js/colorpicker/shared/Colorpicker.svelte +++ b/js/colorpicker/shared/Colorpicker.svelte @@ -1,6 +1,10 @@ - +{label} + +
+
+ (value = e.currentTarget.value)} + /> + +
+ +
+ {#each modes as [label, value]} + + {/each} +
+
+
+
+{/if} diff --git a/js/colorpicker/shared/events.ts b/js/colorpicker/shared/events.ts new file mode 100644 index 0000000000000..0f41b82293ef9 --- /dev/null +++ b/js/colorpicker/shared/events.ts @@ -0,0 +1,28 @@ +/** + * Svelte action to handle clicks outside of a DOM node + * @param node DOM node to check the click is outside of + * @param callback callback function to call if click is outside + * @returns svelte action return object with destroy method to remove event listener + */ +export function click_outside( + node: Node, + callback: (arg: MouseEvent) => void +): any { + const handle_click = (event: MouseEvent): void => { + if ( + node && + !node.contains(event.target as Node) && + !event.defaultPrevented + ) { + callback(event); + } + }; + + document.addEventListener("mousedown", handle_click, true); + + return { + destroy() { + document.removeEventListener("mousedown", handle_click, true); + } + }; +} diff --git a/js/colorpicker/shared/utils.ts b/js/colorpicker/shared/utils.ts new file mode 100644 index 0000000000000..d918dce692cc5 --- /dev/null +++ b/js/colorpicker/shared/utils.ts @@ -0,0 +1,37 @@ +import tinycolor from "tinycolor2"; + +export function hsva_to_rgba(hsva: { + h: number; + s: number; + v: number; + a: number; +}): string { + const saturation = hsva.s; + const value = hsva.v; + let chroma = saturation * value; + const hue_by_60 = hsva.h / 60; + let x = chroma * (1 - Math.abs((hue_by_60 % 2) - 1)); + const m = value - chroma; + + chroma = chroma + m; + x = x + m; + + const index = Math.floor(hue_by_60) % 6; + const red = [chroma, x, m, m, x, chroma][index]; + const green = [x, chroma, chroma, x, m, m][index]; + const blue = [m, m, x, chroma, chroma, x][index]; + + return `rgba(${red * 255}, ${green * 255}, ${blue * 255}, ${hsva.a})`; +} + +export function format_color( + color: string, + mode: "hex" | "rgb" | "hsl" +): string { + if (mode === "hex") { + return tinycolor(color).toHexString(); + } else if (mode === "rgb") { + return tinycolor(color).toRgbString(); + } + return tinycolor(color).toHslString(); +} diff --git a/js/column/CHANGELOG.md b/js/column/CHANGELOG.md index b0a3e55f0c2af..c9676dd04a4c8 100644 --- a/js/column/CHANGELOG.md +++ b/js/column/CHANGELOG.md @@ -1,5 +1,57 @@ # @gradio/column +## 0.2.0-beta.2 + +### Dependency updates + +- @gradio/statustracker@0.8.0-beta.5 + +## 0.2.0-beta.2 + +### Features + +- [#9577](https://github.com/gradio-app/gradio/pull/9577) [`9f532e0`](https://github.com/gradio-app/gradio/commit/9f532e03a6b91b9a5592152c19b9b2611774cae7) - Equal height columns. Thanks @aliabid94! + +### Dependency updates + +- @gradio/statustracker@0.8.0-beta.4 + +## 0.2.0-beta.1 + +### Features + +- [#9496](https://github.com/gradio-app/gradio/pull/9496) [`1647ebd`](https://github.com/gradio-app/gradio/commit/1647ebddc3e2ed6fc143a62629409e32afcc5801) - UI theme fixes. Thanks @aliabid94! + +### Dependency updates + +- @gradio/statustracker@0.8.0-beta.3 + +## 0.2.0-beta.0 + +### Dependency updates + +- @gradio/statustracker@0.8.0-beta.2 + +## 0.2.0-beta.0 + +### Dependency updates + +- @gradio/statustracker@0.8.0-beta.2 +- @gradio/utils@0.7.0-beta.2 + +## 0.2.0-beta.0 + +### Dependency updates + +- @gradio/statustracker@0.8.0-beta.1 +- @gradio/utils@0.7.0-beta.1 + +## 0.2.0-beta.0 + +### Features + +- [#9213](https://github.com/gradio-app/gradio/pull/9213) [`ab4580b`](https://github.com/gradio-app/gradio/commit/ab4580bd5f755a07c9a9bd2a775220a9a2085f8c) - Remove grey background behind all components. Thanks @hannahblair! + ## 0.1.4 ### Fixes diff --git a/js/column/Index.svelte b/js/column/Index.svelte index 67cc50cbaaee8..e74f08da72fc9 100644 --- a/js/column/Index.svelte +++ b/js/column/Index.svelte @@ -17,7 +17,7 @@
- import { tick } from "svelte"; + import { tick, onMount } from "svelte"; import { _ } from "svelte-i18n"; import { Client } from "@gradio/client"; @@ -12,8 +12,9 @@ import type { ThemeMode, Payload } from "./types"; import { Toast } from "@gradio/statustracker"; import type { ToastMessage } from "@gradio/statustracker"; - import type { ShareData } from "@gradio/utils"; + import type { ShareData, ValueData } from "@gradio/utils"; import MountComponents from "./MountComponents.svelte"; + import { prefix_css } from "./css"; import logo from "./images/logo.svg"; import api_logo from "./api_docs/img/api-logo.svg"; @@ -45,36 +46,47 @@ export let fill_height = false; export let ready: boolean; export let username: string | null; - - const { + export let api_prefix = ""; + export let max_file_size: number | undefined = undefined; + export let initial_layout: ComponentMeta | undefined = undefined; + export let css: string | null | undefined = null; + let { layout: _layout, targets, update_value, get_data, + modify_stream, + get_stream_state, + set_time_limit, loading_status, scheduled_updates, create_layout, rerender_layout - } = create_components(); - - $: create_layout({ - components, - layout, - dependencies, - root, - app, - options: { - fill_height - } - }); + } = create_components(initial_layout); + + $: components, layout, dependencies, root, app, fill_height, target, run(); $: { ready = !!$_layout; } - let params = new URLSearchParams(window.location.search); - let api_docs_visible = params.get("view") === "api" && show_api; - let api_recorder_visible = params.get("view") === "api-recorder" && show_api; + async function run(): Promise { + await create_layout({ + components, + layout, + dependencies, + root: root + api_prefix, + app, + options: { + fill_height + } + }); + } + + export let search_params: URLSearchParams; + let api_docs_visible = search_params.get("view") === "api" && show_api; + let api_recorder_visible = + search_params.get("view") === "api-recorder" && show_api; function set_api_docs_visible(visible: boolean): void { api_recorder_visible = false; api_docs_visible = visible; @@ -166,25 +178,19 @@ let _error_id = -1; let user_left_page = false; - document.addEventListener("visibilitychange", function () { - if (document.visibilityState === "hidden") { - user_left_page = true; - } - }); const MESSAGE_QUOTE_RE = /^'([^]+)'$/; const DUPLICATE_MESSAGE = $_("blocks.long_requests_queue"); const MOBILE_QUEUE_WARNING = $_("blocks.connection_can_break"); const MOBILE_RECONNECT_MESSAGE = $_("blocks.lost_connection"); + const WAITING_FOR_INPUTS_MESSAGE = $_("blocks.waiting_for_inputs"); const SHOW_DUPLICATE_MESSAGE_ON_ETA = 15; const SHOW_MOBILE_QUEUE_WARNING_ON_ETA = 10; - const is_mobile_device = - /Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test( - navigator.userAgent - ); + let is_mobile_device = false; let showed_duplicate_message = false; let showed_mobile_warning = false; + let inputs_waiting: number[] = []; // as state updates are not synchronous, we need to ensure updates are flushed before triggering any requests function wait_then_trigger_api_call( @@ -199,8 +205,10 @@ if ($scheduled_updates) { _unsub = scheduled_updates.subscribe((updating) => { if (!updating) { - trigger_api_call(dep_index, trigger_id, event_data); - unsub(); + tick().then(() => { + trigger_api_call(dep_index, trigger_id, event_data); + unsub(); + }); } }); } else { @@ -208,13 +216,36 @@ } } + async function get_component_value_or_event_data( + component_id: number, + trigger_id: number | null, + event_data: unknown + ): Promise { + if ( + component_id === trigger_id && + event_data && + (event_data as ValueData).is_value_data === true + ) { + // @ts-ignore + return event_data.value; + } + return get_data(component_id); + } + async function trigger_api_call( dep_index: number, trigger_id: number | null = null, event_data: unknown = null ): Promise { let dep = dependencies.find((dep) => dep.id === dep_index)!; - + if (inputs_waiting.length > 0) { + for (const input of inputs_waiting) { + if (dep.inputs.includes(input)) { + add_new_message(WAITING_FOR_INPUTS_MESSAGE, "warning"); + return; + } + } + } const current_status = loading_status.get_status_for_fn(dep_index); messages = messages.filter(({ fn_index }) => fn_index !== dep_index); if (current_status === "pending" || current_status === "generating") { @@ -223,7 +254,11 @@ let payload: Payload = { fn_index: dep_index, - data: await Promise.all(dep.inputs.map((id) => get_data(id))), + data: await Promise.all( + dep.inputs.map((id) => + get_component_value_or_event_data(id, trigger_id, event_data) + ) + ), event_data: dep.collects_event_data ? event_data : null, trigger_id: trigger_id }; @@ -259,24 +294,49 @@ function trigger_prediction(dep: Dependency, payload: Payload): void { if (dep.trigger_mode === "once") { - if (!dep.pending_request) make_prediction(payload); + if (!dep.pending_request) + make_prediction(payload, dep.connection == "stream"); } else if (dep.trigger_mode === "multiple") { - make_prediction(payload); + make_prediction(payload, dep.connection == "stream"); } else if (dep.trigger_mode === "always_last") { if (!dep.pending_request) { - make_prediction(payload); + make_prediction(payload, dep.connection == "stream"); } else { dep.final_event = payload; } } } - async function make_prediction(payload: Payload): Promise { + async function make_prediction( + payload: Payload, + streaming = false + ): Promise { if (api_recorder_visible) { api_calls = [...api_calls, JSON.parse(JSON.stringify(payload))]; } let submission: ReturnType; + app.set_current_payload(payload); + if (streaming) { + if (!submit_map.has(dep_index)) { + dep.inputs.forEach((id) => modify_stream(id, "waiting")); + } else if ( + submit_map.has(dep_index) && + dep.inputs.some((id) => get_stream_state(id) === "waiting") + ) { + return; + } else if ( + submit_map.has(dep_index) && + dep.inputs.some((id) => get_stream_state(id) === "open") + ) { + await app.send_ws_message( + // @ts-ignore + `${app.config.root + app.config.api_prefix}/stream/${submit_map.get(dep_index).event_id()}`, + { ...payload, session_hash: app.session_hash } + ); + return; + } + } try { submission = app.submit( payload.fn_index, @@ -316,7 +376,7 @@ const { data, fn_index } = message; if (dep.pending_request && dep.final_event) { dep.pending_request = false; - make_prediction(dep.final_event); + make_prediction(dep.final_event, dep.connection == "stream"); } dep.pending_request = false; handle_update(data, fn_index); @@ -360,11 +420,34 @@ ]; } + function open_stream_events( + status: StatusMessage, + id: number, + dep: Dependency + ): void { + if ( + status.original_msg === "process_starts" && + dep.connection === "stream" + ) { + modify_stream(id, "open"); + } + } + + /* eslint-disable complexity */ function handle_status_update(message: StatusMessage): void { const { fn_index, ...status } = message; + if (status.stage === "streaming" && status.time_limit) { + dep.inputs.forEach((id) => { + set_time_limit(id, status.time_limit); + }); + } + dep.inputs.forEach((id) => { + open_stream_events(message, id, dep); + }); //@ts-ignore loading_status.update({ ...status, + time_limit: status.time_limit, status: status.stage, progress: status.progress_data, fn_index @@ -397,7 +480,7 @@ ]; } - if (status.stage === "complete") { + if (status.stage === "complete" || status.stage === "generating") { status.changed_state_ids?.forEach((id) => { dependencies .filter((dep) => dep.targets.some(([_id, _]) => _id === id)) @@ -405,13 +488,17 @@ wait_then_trigger_api_call(dep.id, payload.trigger_id); }); }); + } + if (status.stage === "complete") { dependencies.forEach(async (dep) => { if (dep.trigger_after === fn_index) { wait_then_trigger_api_call(dep.id, payload.trigger_id); } }); - - // submission.destroy(); + dep.inputs.forEach((id) => { + modify_stream(id, "closed"); + }); + submit_map.delete(dep_index); } if (status.broken && is_mobile_device && user_left_page) { window.setTimeout(() => { @@ -451,6 +538,7 @@ } } } + /* eslint-enable complexity */ function trigger_share(title: string | undefined, description: string): void { if (space_id === null) { @@ -503,12 +591,18 @@ } }); - if (render_complete) return; + if (!target || render_complete) return; target.addEventListener("prop_change", (e: Event) => { if (!isCustomEvent(e)) throw new Error("not a custom event"); const { id, prop, value } = e.detail; update_value([{ id, prop, value }]); + if (prop === "input_ready" && value === false) { + inputs_waiting.push(id); + } + if (prop === "input_ready" && value === true) { + inputs_waiting = inputs_waiting.filter((item) => item !== id); + } }); target.addEventListener("gradio", (e: Event) => { if (!isCustomEvent(e)) throw new Error("not a custom event"); @@ -522,6 +616,16 @@ messages = [new_message(data, -1, event), ...messages]; } else if (event == "clear_status") { update_status(id, "complete", data); + } else if (event == "close_stream") { + const deps = $targets[id]?.[data]; + deps?.forEach((dep_id) => { + if (submit_map.has(dep_id)) { + // @ts-ignore + const url = `${app.config.root + app.config.api_prefix}/stream/${submit_map.get(dep_id).event_id()}`; + app.post_data(`${url}/close`, {}); + app.close_ws(url); + } + }); } else { const deps = $targets[id]?.[event]; @@ -592,12 +696,28 @@ function isCustomEvent(event: Event): event is CustomEvent { return "detail" in event; } + + onMount(() => { + document.addEventListener("visibilitychange", function () { + if (document.visibilityState === "hidden") { + user_left_page = true; + } + }); + + is_mobile_device = + /Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test( + navigator.userAgent + ); + }); {#if control_page_title} {title} {/if} + {#if css} + {@html `\${prefix_css(css, version)}`} + {/if}
@@ -611,7 +731,7 @@ on:mount={handle_mount} {version} {autoscroll} - max_file_size={app.config.max_file_size} + {max_file_size} client={app} /> {/if} diff --git a/js/core/src/Login.svelte b/js/core/src/Login.svelte index 03a0ea6d07107..fd001d5192099 100644 --- a/js/core/src/Login.svelte +++ b/js/core/src/Login.svelte @@ -50,6 +50,7 @@
- +{#if rootNode} + +{/if} diff --git a/js/core/src/Render.svelte b/js/core/src/Render.svelte index f79e619cd5cc9..9075c3b6d9af6 100644 --- a/js/core/src/Render.svelte +++ b/js/core/src/Render.svelte @@ -36,20 +36,24 @@ }; }); - $: node.children = - node.children && - node.children.filter((v) => { - const valid_node = node.type !== "statustracker"; - if (!valid_node) { - filtered_children.push(v); - } - return valid_node; - }); + $: { + if (node) { + node.children = + node.children && + node.children.filter((v) => { + const valid_node = node.type !== "statustracker"; + if (!valid_node) { + filtered_children.push(v); + } + return valid_node; + }); + } + } setContext("BLOCK_KEY", parent); $: { - if (node.type === "form") { + if (node && node.type === "form") { if (node.children?.every((c) => !c.props.visible)) { node.props.visible = false; } else { @@ -58,7 +62,7 @@ } } - $: gradio_class = new Gradio>( + $: node.props.gradio = new Gradio>( node.id, target, theme_mode, @@ -73,7 +77,7 @@ {#if node.children && node.children.length} {#each node.children as _node (_node.id)} diff --git a/js/core/src/RenderComponent.svelte b/js/core/src/RenderComponent.svelte index 10e515d2babb1..e3987ba494468 100644 --- a/js/core/src/RenderComponent.svelte +++ b/js/core/src/RenderComponent.svelte @@ -13,7 +13,7 @@ export let theme_mode: ThemeMode; export let instance: ComponentMeta["instance"]; export let value: any; - export let gradio: Gradio; + // export let gradio: Gradio; export let elem_id: string; export let elem_classes: string[]; export let _id: number; @@ -32,6 +32,7 @@ function report(props: string) { return function (propargs: any) { + if (!target) return; const ev = s(_id, props, propargs); target.dispatchEvent(ev); }; @@ -61,7 +62,6 @@ {...$$restProps} {theme_mode} {root} - {gradio} > diff --git a/js/core/src/api_docs/ApiDocs.svelte b/js/core/src/api_docs/ApiDocs.svelte index 6999b716503b0..495690e304373 100644 --- a/js/core/src/api_docs/ApiDocs.svelte +++ b/js/core/src/api_docs/ApiDocs.svelte @@ -59,7 +59,9 @@ named_endpoints: any; unnamed_endpoints: any; }> { - let response = await fetch(root + "info"); + let response = await fetch( + root.replace(/\/$/, "") + app.api_prefix + "/info" + ); let data = await response.json(); return data; } @@ -235,6 +237,7 @@ {root} {space_id} {username} + api_prefix={app.api_prefix} /> blob_components.includes(param.component) ); + + $: normalised_api_prefix = api_prefix ? api_prefix : "/"; + $: normalised_root = root.replace(/\/$/, "");
@@ -135,7 +139,7 @@ console.log(result.data);
-
curl -X POST {root}call/{dependency.api_name} -s -H "Content-Type: application/json" -d '{"{"}
+					
curl -X POST {normalised_root}{normalised_api_prefix}/call/{dependency.api_name} -s -H "Content-Type: application/json" -d '{"{"}
   "data": [{#each endpoint_parameters as { label, parameter_name, type, python_type, component, example_input, serializer }, i}
 							{represent_value(
@@ -147,7 +151,7 @@ console.log(result.data);
 						{/each}
 ]{"}"}' \
   | awk -F'"' '{"{"} print $4{"}"}'  \
-  | read EVENT_ID; curl -N {root}call/{dependency.api_name}/$EVENT_ID
+ | read EVENT_ID; curl -N {normalised_root}{normalised_api_prefix}/call/{dependency.api_name}/$EVENT_ID
diff --git a/js/core/src/api_docs/ParametersSnippet.svelte b/js/core/src/api_docs/ParametersSnippet.svelte index 5ffa37774f8d4..b504bec362c76 100644 --- a/js/core/src/api_docs/ParametersSnippet.svelte +++ b/js/core/src/api_docs/ParametersSnippet.svelte @@ -41,7 +41,7 @@ The input value that is provided in the "{label}" {component} component. + -->. {python_type.description}

{/each} diff --git a/js/core/src/css.ts b/js/core/src/css.ts index d69917f61e065..ca58de5072fef 100644 --- a/js/core/src/css.ts +++ b/js/core/src/css.ts @@ -1,6 +1,7 @@ let supports_adopted_stylesheets = false; if ( + typeof window !== "undefined" && "attachShadow" in Element.prototype && "adoptedStyleSheets" in Document.prototype ) { @@ -35,9 +36,12 @@ export function mount_css(url: string, target: HTMLElement): Promise { export function prefix_css( string: string, version: string, - style_element = document.createElement("style") -): HTMLStyleElement | null { - if (!supports_adopted_stylesheets) return null; + style_element?: HTMLStyleElement +): string | null { + if (!supports_adopted_stylesheets) return string; + if (!style_element) { + style_element = document.createElement("style"); + } style_element.remove(); const stylesheet = new CSSStyleSheet(); @@ -52,7 +56,7 @@ export function prefix_css( const rules = stylesheet.cssRules; let css_string = ""; - let gradio_css_infix = `gradio-app .gradio-container.gradio-container-${version} .contain `; + let gradio_css_infix = `.gradio-container.gradio-container-${version} .contain `; for (let i = 0; i < rules.length; i++) { const rule = rules[i]; @@ -108,9 +112,5 @@ export function prefix_css( css_string += `@font-face { ${rule.style.cssText} }`; } } - css_string = importString + css_string; - style_element.textContent = css_string; - - document.head.appendChild(style_element); - return style_element; + return importString + css_string; } diff --git a/js/core/src/init.test.ts b/js/core/src/init.test.ts index d302fc161b792..755d9dd309e4c 100644 --- a/js/core/src/init.test.ts +++ b/js/core/src/init.test.ts @@ -481,14 +481,17 @@ describe("get_component", () => { const id = "test-random"; const variant = "component"; const handlers = [ - http.get(`${api_url}/custom_component/${id}/${variant}/style.css`, () => { - return new HttpResponse('console.log("boo")', { - status: 200, - headers: { - "Content-Type": "text/css" - } - }); - }) + http.get( + `${api_url}/custom_component/${id}/client/${variant}/style.css`, + () => { + return new HttpResponse('console.log("boo")', { + status: 200, + headers: { + "Content-Type": "text/css" + } + }); + } + ) ]; // vi.mock calls are always hoisted out of the test function to the top of the file @@ -498,7 +501,7 @@ describe("get_component", () => { }); vi.mock( - `example.com/custom_component/test-random/component/index.js`, + `example.com/custom_component/test-random/client/component/index.js`, async () => { mock(); return { diff --git a/js/core/src/init.ts b/js/core/src/init.ts index 13e3ffe1bf066..471abba704049 100644 --- a/js/core/src/init.ts +++ b/js/core/src/init.ts @@ -18,16 +18,23 @@ export interface UpdateTransaction { } let pending_updates: UpdateTransaction[][] = []; +const is_browser = typeof window !== "undefined"; +const raf = is_browser + ? requestAnimationFrame + : async (fn: () => Promise | void) => await fn(); /** * Create a store with the layout and a map of targets * @returns A store with the layout and a map of targets */ -export function create_components(): { +export function create_components(initial_layout: ComponentMeta | undefined): { layout: Writable; targets: Writable; update_value: (updates: UpdateTransaction[]) => void; get_data: (id: number) => any | Promise; + modify_stream: (id: number, state: "open" | "waiting" | "closed") => void; + get_stream_state: (id: number) => "open" | "waiting" | "closed" | "not_set"; + set_time_limit: (id: number, time_limit: number | undefined) => void; loading_status: ReturnType; scheduled_updates: Writable; create_layout: (args: { @@ -39,7 +46,7 @@ export function create_components(): { options: { fill_height: boolean; }; - }) => void; + }) => Promise; rerender_layout: (args: { render_id: number; components: ComponentMeta[]; @@ -58,13 +65,26 @@ export function create_components(): { let instance_map: { [id: number]: ComponentMeta }; let loading_status: ReturnType = create_loading_status_store(); - const layout_store: Writable = writable(); + const layout_store: Writable = writable(initial_layout); let _components: ComponentMeta[] = []; let app: client_return; let keyed_component_values: Record = {}; let _rootNode: ComponentMeta; - function create_layout({ + function set_event_specific_args(dependencies: Dependency[]): void { + dependencies.forEach((dep) => { + dep.targets.forEach((target) => { + const instance = instance_map[target[0]]; + if (instance && dep.event_specific_args?.length > 0) { + dep.event_specific_args?.forEach((arg: string) => { + instance.props[arg] = dep[arg as keyof Dependency]; + }); + } + }); + }); + } + + async function create_layout({ app: _app, components, layout, @@ -80,7 +100,9 @@ export function create_components(): { options: { fill_height: boolean; }; - }): void { + }): Promise { + // make sure the state is settled before proceeding + flush(); app = _app; store_keyed_values(_components); @@ -130,9 +152,10 @@ export function create_components(): { {} as { [id: number]: ComponentMeta } ); - walk_layout(layout, root).then(() => { - layout_store.set(_rootNode); - }); + await walk_layout(layout, root); + + layout_store.set(_rootNode); + set_event_specific_args(dependencies); } /** @@ -208,6 +231,8 @@ export function create_components(): { walk_layout(layout, root, current_element.parent).then(() => { layout_store.set(_rootNode); }); + + set_event_specific_args(dependencies); } async function walk_layout( @@ -301,7 +326,6 @@ export function create_components(): { } return layout; }); - pending_updates = []; update_scheduled = false; update_scheduled_store.set(false); @@ -314,12 +338,15 @@ export function create_components(): { if (!update_scheduled) { update_scheduled = true; update_scheduled_store.set(true); - requestAnimationFrame(flush); + raf(flush); } } - function get_data(id: number): any | Promise { - const comp = _component_map.get(id); + let comp = _component_map.get(id); + if (!comp) { + const layout = get(layout_store); + comp = findComponentById(layout, id); + } if (!comp) { return null; } @@ -329,15 +356,61 @@ export function create_components(): { return comp.props.value; } + function findComponentById( + node: ComponentMeta, + id: number + ): ComponentMeta | undefined { + if (node.id === id) { + return node; + } + if (node.children) { + for (const child of node.children) { + const result = findComponentById(child, id); + if (result) { + return result; + } + } + } + return undefined; + } + + function modify_stream( + id: number, + state: "open" | "closed" | "waiting" + ): void { + const comp = _component_map.get(id); + if (comp && comp.instance.modify_stream_state) { + comp.instance.modify_stream_state(state); + } + } + + function get_stream_state( + id: number + ): "open" | "closed" | "waiting" | "not_set" { + const comp = _component_map.get(id); + if (comp && comp.instance.get_stream_state) + return comp.instance.get_stream_state(); + return "not_set"; + } + + function set_time_limit(id: number, time_limit: number | undefined): void { + const comp = _component_map.get(id); + if (comp && comp.instance.set_time_limit) { + comp.instance.set_time_limit(time_limit); + } + } + return { layout: layout_store, targets: target_map, update_value, get_data, + modify_stream, + get_stream_state, + set_time_limit, loading_status, scheduled_updates: update_scheduled_store, - create_layout: (...args) => - requestAnimationFrame(() => create_layout(...args)), + create_layout: create_layout, rerender_layout }; } diff --git a/js/core/src/lang/en.json b/js/core/src/lang/en.json index 35d662c993602..33512377d3275 100644 --- a/js/core/src/lang/en.json +++ b/js/core/src/lang/en.json @@ -16,12 +16,14 @@ "record": "Record", "no_microphone": "No microphone found", "pause": "Pause", - "play": "Play" + "play": "Play", + "waiting": "Waiting" }, "blocks": { "connection_can_break": "On mobile, the connection can break if this tab is unfocused or the device sleeps, losing your position in queue.", "long_requests_queue": "There is a long queue of requests pending. Duplicate this Space to skip.", - "lost_connection": "Lost connection due to leaving page. Rejoining queue..." + "lost_connection": "Lost connection due to leaving page. Rejoining queue...", + "waiting_for_inputs": "Waiting for file(s) to finish uploading, please retry." }, "checkbox": { "checkbox": "Checkbox", @@ -53,8 +55,12 @@ }, "dataframe": { "incorrect_format": "Incorrect format, only CSV and TSV files are supported", - "new_column": "New column", - "new_row": "New row" + "new_column": "Add column", + "new_row": "New row", + "add_row_above": "Add row above", + "add_row_below": "Add row below", + "add_column_left": "Add column to the left", + "add_column_right": "Add column to the right" }, "dropdown": { "dropdown": "Dropdown" @@ -112,7 +118,7 @@ "drop_file": "Drop File Here", "drop_image": "Drop Image Here", "drop_video": "Drop Video Here", - "drop_gallery": "Drop Image(s) Here", + "drop_gallery": "Drop Media Here", "paste_clipboard": "Paste from Clipboard" } } diff --git a/js/core/src/lang/zh-CN.json b/js/core/src/lang/zh-CN.json index 4f1e080370ddb..94bef996d6417 100644 --- a/js/core/src/lang/zh-CN.json +++ b/js/core/src/lang/zh-CN.json @@ -16,7 +16,8 @@ "record": "录制", "no_microphone": "找不到麦克风", "pause": "暂停", - "play": "播放" + "play": "播放", + "waiting": "等待" }, "blocks": { "connection_can_break": "在移动设备上,如果此标签页失去焦点或设备休眠,连接可能会中断,导致您在队列中失去位置。", diff --git a/js/core/src/stores.ts b/js/core/src/stores.ts index cb54d930aa80e..197b85a793eb8 100644 --- a/js/core/src/stores.ts +++ b/js/core/src/stores.ts @@ -2,7 +2,7 @@ import { type Writable, writable, get } from "svelte/store"; export interface LoadingStatus { eta: number | null; - status: "pending" | "error" | "complete" | "generating"; + status: "pending" | "error" | "complete" | "generating" | "streaming"; queue: boolean; queue_position: number | null; queue_size?: number; @@ -10,6 +10,7 @@ export interface LoadingStatus { message?: string | null; scroll_to_output?: boolean; show_progress?: "full" | "minimal" | "hidden"; + time_limit?: number | null | undefined; progress?: { progress: number | null; index: number | null; @@ -48,7 +49,8 @@ export function create_loading_status_store(): LoadingStatusStore { position = null, eta = null, message = null, - progress + progress, + time_limit = null }: { fn_index: LoadingStatus["fn_index"]; status: LoadingStatus["status"]; @@ -58,6 +60,7 @@ export function create_loading_status_store(): LoadingStatusStore { eta?: LoadingStatus["eta"]; message?: LoadingStatus["message"]; progress?: LoadingStatus["progress"]; + time_limit?: LoadingStatus["time_limit"]; }): void { const outputs = fn_outputs[fn_index]; const inputs = fn_inputs[fn_index]; diff --git a/js/core/src/types.ts b/js/core/src/types.ts index 21711fa6b2a10..07827c74ae1a6 100644 --- a/js/core/src/types.ts +++ b/js/core/src/types.ts @@ -69,6 +69,11 @@ export interface Dependency { final_event: Payload | null; show_api: boolean; rendered_in: number | null; + connection: "stream" | "sse"; + time_limit: number; + stream_every: number; + like_user_message: boolean; + event_specific_args: string[]; } interface TypeDescription { diff --git a/js/dataframe/CHANGELOG.md b/js/dataframe/CHANGELOG.md index e7875ae36182b..6124a871bd6c5 100644 --- a/js/dataframe/CHANGELOG.md +++ b/js/dataframe/CHANGELOG.md @@ -1,11 +1,103 @@ # @gradio/dataframe -## 0.10.1 +## 0.11.0-beta.8 + +### Features + +- [#9601](https://github.com/gradio-app/gradio/pull/9601) [`c078892`](https://github.com/gradio-app/gradio/commit/c07889223cb64661b17560b707b977248809470a) - Tweak gr.Dataframe menu UX. Thanks @hannahblair! +- [#9575](https://github.com/gradio-app/gradio/pull/9575) [`4ec2feb`](https://github.com/gradio-app/gradio/commit/4ec2feb04e452d2c77482c09543c59948567be67) - Update gr.Dataframe UI with action popover. Thanks @hannahblair! + +### Dependency updates + +- @gradio/upload@0.13.0-beta.7 +- @gradio/statustracker@0.8.0-beta.5 +- @gradio/atoms@0.9.0-beta.5 +- @gradio/button@0.3.0-beta.7 +- @gradio/markdown@0.10.0-beta.5 + +## 0.11.0-beta.7 + +### Dependency updates + +- @gradio/statustracker@0.8.0-beta.4 +- @gradio/atoms@0.9.0-beta.4 +- @gradio/client@1.6.0-beta.4 +- @gradio/upload@0.13.0-beta.6 +- @gradio/markdown@0.10.0-beta.4 +- @gradio/button@0.3.0-beta.6 + +## 0.11.0-beta.6 + +### Features + +- [#9496](https://github.com/gradio-app/gradio/pull/9496) [`1647ebd`](https://github.com/gradio-app/gradio/commit/1647ebddc3e2ed6fc143a62629409e32afcc5801) - UI theme fixes. Thanks @aliabid94! + +### Dependency updates + +- @gradio/upload@0.13.0-beta.5 +- @gradio/statustracker@0.8.0-beta.3 +- @gradio/atoms@0.9.0-beta.3 +- @gradio/button@0.3.0-beta.5 +- @gradio/markdown@0.10.0-beta.3 + +## 0.11.0-beta.5 + +### Dependency updates + +- @gradio/statustracker@0.8.0-beta.2 +- @gradio/upload@0.13.0-beta.4 +- @gradio/button@0.3.0-beta.4 +- @gradio/markdown@0.10.0-beta.2 + +## 0.11.0-beta.4 + +### Features + +- [#9469](https://github.com/gradio-app/gradio/pull/9469) [`f7c3396`](https://github.com/gradio-app/gradio/commit/f7c3396f55a5b8364d3880a29d766bd092d7f840) - Fix. Triggered dataframe change event for header change. Thanks @Joodith! + +## 0.11.0-beta.3 + +### Dependency updates + +- @gradio/upload@0.13.0-beta.3 +- @gradio/client@1.6.0-beta.3 +- @gradio/button@0.3.0-beta.3 + +## 0.11.0-beta.2 + +### Features + +- [#9313](https://github.com/gradio-app/gradio/pull/9313) [`1fef9d9`](https://github.com/gradio-app/gradio/commit/1fef9d9a26f0ebce4de18c486702661f6539b1c6) - Standardize `height` across components and add `max_height` and `min_height` parameters where appropriate. Thanks @abidlabs! +- [#9339](https://github.com/gradio-app/gradio/pull/9339) [`4c8c6f2`](https://github.com/gradio-app/gradio/commit/4c8c6f2fe603081941c5fdc43f48a0632b9f31ad) - Ssr part 2. Thanks @pngwn! + +### Dependency updates + +- @gradio/atoms@0.9.0-beta.2 +- @gradio/upload@0.13.0-beta.2 +- @gradio/markdown@0.10.0-beta.2 +- @gradio/client@1.6.0-beta.2 +- @gradio/statustracker@0.8.0-beta.2 +- @gradio/utils@0.7.0-beta.2 +- @gradio/button@0.3.0-beta.2 + +## 0.10.1-beta.1 ### Features - [#9187](https://github.com/gradio-app/gradio/pull/9187) [`5bf00b7`](https://github.com/gradio-app/gradio/commit/5bf00b7524ebf399b48719120a49d15bb21bd65c) - make all component SSR compatible. Thanks @pngwn! +### Dependency updates + +- @gradio/atoms@0.8.1-beta.1 +- @gradio/statustracker@0.8.0-beta.1 +- @gradio/utils@0.7.0-beta.1 +- @gradio/client@1.6.0-beta.1 +- @gradio/upload@0.12.4-beta.1 +- @gradio/markdown@0.9.4-beta.1 +- @gradio/button@0.3.0-beta.1 + +## 0.10.1-beta.0 + ### Fixes - [#9163](https://github.com/gradio-app/gradio/pull/9163) [`2b6cbf2`](https://github.com/gradio-app/gradio/commit/2b6cbf25908e42cf027324e54ef2cc0baad11a91) - fix exports and generate types. Thanks @pngwn! diff --git a/js/dataframe/Dataframe.stories.svelte b/js/dataframe/Dataframe.stories.svelte index 818a3094dcce5..2746617c8ff6b 100644 --- a/js/dataframe/Dataframe.stories.svelte +++ b/js/dataframe/Dataframe.stories.svelte @@ -1,6 +1,10 @@ + + { + const canvas = within(canvasElement); + + const cell = canvas.getByText("200"); + userEvent.click(cell); + const open_dialog_btn = canvas.getAllByText("⋮"); + await userEvent.click(open_dialog_btn[0]); + + const add_row_btn = canvas.getByText("Add row above"); + await userEvent.click(add_row_btn); + }} +/> diff --git a/js/dataframe/Index.svelte b/js/dataframe/Index.svelte index b2075cf77dc63..3d71cead10437 100644 --- a/js/dataframe/Index.svelte +++ b/js/dataframe/Index.svelte @@ -45,7 +45,7 @@ right: string; display: boolean; }[]; - export let height: number | undefined = undefined; + export let max_height: number | undefined = undefined; export let loading_status: LoadingStatus; export let interactive: boolean; @@ -149,11 +149,11 @@ {datatype} {latex_delimiters} editable={interactive} - {height} + {max_height} i18n={gradio.i18n} {line_breaks} {column_widths} - upload={gradio.client.upload} - stream_handler={gradio.client.stream} + upload={(...args) => gradio.client.upload(...args)} + stream_handler={(...args) => gradio.client.stream(...args)} /> diff --git a/js/dataframe/package.json b/js/dataframe/package.json index c0be10e65733b..e348f3082e7b4 100644 --- a/js/dataframe/package.json +++ b/js/dataframe/package.json @@ -1,6 +1,6 @@ { "name": "@gradio/dataframe", - "version": "0.10.1", + "version": "0.11.0-beta.8", "description": "Gradio UI packages", "type": "module", "author": "", diff --git a/js/dataframe/shared/Arrow.svelte b/js/dataframe/shared/Arrow.svelte new file mode 100644 index 0000000000000..52000514b5a61 --- /dev/null +++ b/js/dataframe/shared/Arrow.svelte @@ -0,0 +1,10 @@ + + + + + diff --git a/js/dataframe/shared/CellMenu.svelte b/js/dataframe/shared/CellMenu.svelte new file mode 100644 index 0000000000000..9ba1aaa3093fb --- /dev/null +++ b/js/dataframe/shared/CellMenu.svelte @@ -0,0 +1,111 @@ + + +
+ {#if !is_header} + + + {/if} + + +
+ + diff --git a/js/dataframe/shared/Table.svelte b/js/dataframe/shared/Table.svelte index 58e25e6fab115..72306a469854b 100644 --- a/js/dataframe/shared/Table.svelte +++ b/js/dataframe/shared/Table.svelte @@ -4,19 +4,14 @@ import { dequal } from "dequal/lite"; import { copy } from "@gradio/utils"; import { Upload } from "@gradio/upload"; - import { BaseButton } from "@gradio/button"; + import EditableCell from "./EditableCell.svelte"; import type { SelectData } from "@gradio/utils"; import type { I18nFormatter } from "js/core/src/gradio_helper"; import { type Client } from "@gradio/client"; import VirtualTable from "./VirtualTable.svelte"; - import type { - Headers, - HeadersWithIDs, - Data, - Metadata, - Datatype - } from "./utils"; + import type { Headers, HeadersWithIDs, Metadata, Datatype } from "./utils"; + import CellMenu from "./CellMenu.svelte"; export let datatype: Datatype | Datatype[]; export let label: string | null = null; @@ -36,7 +31,7 @@ export let root: string; export let i18n: I18nFormatter; - export let height = 500; + export let max_height = 500; export let line_breaks = true; export let column_widths: string[] = []; export let upload: Client["upload"]; @@ -64,7 +59,7 @@ $: { if (selected !== false) { const [row, col] = selected; - if (!isNaN(row) && !isNaN(col)) { + if (!isNaN(row) && !isNaN(col) && data[row]) { dispatch("select", { index: [row, col], value: get_data_at(row, col), @@ -346,7 +341,14 @@ } } + let active_cell: { row: number; col: number } | null = null; + async function handle_cell_click(i: number, j: number): Promise { + if (active_cell && active_cell.row === i && active_cell.col === j) { + active_cell = null; + } else { + active_cell = { row: i, col: j }; + } if (dequal(editing, [i, j])) return; header_edit = false; selected_header = false; @@ -410,35 +412,39 @@ return; } - data.splice( - index ? index + 1 : data.length, - 0, - Array(data[0].length) - .fill(0) - .map((_, i) => { - const _id = make_id(); + const new_row = Array(data[0].length) + .fill(0) + .map((_, i) => { + const _id = make_id(); + els[_id] = { cell: null, input: null }; + return { id: _id, value: "" }; + }); - els[_id] = { cell: null, input: null }; - return { id: _id, value: "" }; - }) - ); + if (index !== undefined && index >= 0 && index <= data.length) { + data.splice(index, 0, new_row); + } else { + data.push(new_row); + } data = data; - selected = [index ? index + 1 : data.length - 1, 0]; + selected = [index !== undefined ? index : data.length - 1, 0]; } - $: data && trigger_change(); + $: (data || selected_header) && trigger_change(); - async function add_col(): Promise { + async function add_col(index?: number): Promise { parent.focus(); if (col_count[1] !== "dynamic") return; + + const insert_index = index !== undefined ? index : data[0].length; + for (let i = 0; i < data.length; i++) { const _id = make_id(); els[_id] = { cell: null, input: null }; - data[i].push({ id: _id, value: "" }); + data[i].splice(insert_index, 0, { id: _id, value: "" }); } - headers.push(`Header ${headers.length + 1}`); + headers.splice(insert_index, 0, `Header ${headers.length + 1}`); data = data; headers = headers; @@ -446,13 +452,23 @@ await tick(); requestAnimationFrame(() => { - edit_header(headers.length - 1, true); + edit_header(insert_index, true); const new_w = parent.querySelectorAll("tbody")[1].offsetWidth; parent.querySelectorAll("table")[1].scrollTo({ left: new_w }); }); } function handle_click_outside(event: Event): void { + if ( + (active_cell_menu && + !(event.target as HTMLElement).closest(".cell-menu")) || + (active_header_menu && + !(event.target as HTMLElement).closest(".cell-menu")) + ) { + active_cell_menu = null; + active_header_menu = null; + } + event.stopImmediatePropagation(); const [trigger] = event.composedPath() as HTMLElement[]; if (parent.contains(trigger)) { @@ -463,6 +479,9 @@ header_edit = false; selected_header = false; selected = false; + active_cell = null; + active_cell_menu = null; + active_header_menu = null; } function guess_delimitaor( @@ -564,7 +583,7 @@ } let table_height: number = - values.slice(0, (height / values.length) * 37).length * 37 + 37; + values.slice(0, (max_height / values.length) * 37).length * 37 + 37; let scrollbar_width = 0; function sort_data( @@ -640,6 +659,107 @@ observer.disconnect(); }; }); + + let highlighted_column: number | null = null; + + let active_cell_menu: { + row: number; + col: number; + x: number; + y: number; + } | null = null; + + function toggle_cell_menu(event: MouseEvent, row: number, col: number): void { + event.stopPropagation(); + if ( + active_cell_menu && + active_cell_menu.row === row && + active_cell_menu.col === col + ) { + active_cell_menu = null; + } else { + const cell = (event.target as HTMLElement).closest("td"); + if (cell) { + const rect = cell.getBoundingClientRect(); + active_cell_menu = { + row, + col, + x: rect.right, + y: rect.bottom + }; + } + } + } + + function add_row_at(index: number, position: "above" | "below"): void { + const row_index = position === "above" ? index : index + 1; + add_row(row_index); + active_cell_menu = null; + active_header_menu = null; + } + + function add_col_at(index: number, position: "left" | "right"): void { + const col_index = position === "left" ? index : index + 1; + add_col(col_index); + active_cell_menu = null; + active_header_menu = null; + } + + onMount(() => { + document.addEventListener("click", handle_click_outside); + return () => { + document.removeEventListener("click", handle_click_outside); + }; + }); + + let active_button: { + type: "header" | "cell"; + row?: number; + col: number; + } | null = null; + + function toggle_header_button(col: number): void { + if (active_button?.type === "header" && active_button.col === col) { + active_button = null; + } else { + active_button = { type: "header", col }; + } + } + + function toggle_cell_button(row: number, col: number): void { + if ( + active_button?.type === "cell" && + active_button.row === row && + active_button.col === col + ) { + active_button = null; + } else { + active_button = { type: "cell", row, col }; + } + } + + let active_header_menu: { + col: number; + x: number; + y: number; + } | null = null; + + function toggle_header_menu(event: MouseEvent, col: number): void { + event.stopPropagation(); + if (active_header_menu && active_header_menu.col === col) { + active_header_menu = null; + } else { + const header = (event.target as HTMLElement).closest("th"); + if (header) { + const rect = header.getBoundingClientRect(); + active_header_menu = { + col, + x: rect.right, + y: rect.bottom + }; + } + } + } { + toggle_header_button(i); + }} >
- edit_header(i)} - {select_on_focus} - header - {root} - /> - - - - -
handle_sort(i)} - > - + edit_header(i)} + {select_on_focus} + header + {root} + /> + + + +
{ + event.stopPropagation(); + handle_sort(i); + }} > - - + + + +
+ + {#if editable} + + {/if}
{/each} @@ -802,11 +940,17 @@ start_edit(index, j)} - on:click={() => handle_cell_click(index, j)} + on:click={() => { + handle_cell_click(index, j); + toggle_cell_button(index, j); + }} on:dblclick={() => start_edit(index, j)} style:width="var(--cell-width-{j})" style={styling?.[index]?.[j] || ""} class:focus={dequal(selected, [index, j])} + class:menu-active={active_cell_menu && + active_cell_menu.row === index && + active_cell_menu.col === j} >
+ {#if editable} + + {/if}
{/each} @@ -829,64 +984,35 @@
- {#if editable} -
- {#if row_count[1] === "dynamic"} - - (e.stopPropagation(), add_row())} - > - - {i18n("dataframe.new_row")} - - - {/if} - {#if col_count[1] === "dynamic"} - - (e.stopPropagation(), add_col())} - > - - {i18n("dataframe.new_column")} - - - {/if} -
- {/if}
+{#if active_cell_menu !== null} + add_row_at(active_cell_menu?.row ?? -1, "above")} + on_add_row_below={() => add_row_at(active_cell_menu?.row ?? -1, "below")} + on_add_column_left={() => add_col_at(active_cell_menu?.col ?? -1, "left")} + on_add_column_right={() => add_col_at(active_cell_menu?.col ?? -1, "right")} + /> +{/if} + +{#if active_header_menu !== null} + add_col_at(active_header_menu?.col ?? -1, "left")} + on_add_column_right={() => + add_col_at(active_header_menu?.col ?? -1, "right")} + /> +{/if} + diff --git a/js/dataframe/shared/VirtualTable.svelte b/js/dataframe/shared/VirtualTable.svelte index 1aebc44f76650..ecc47252ef2bb 100644 --- a/js/dataframe/shared/VirtualTable.svelte +++ b/js/dataframe/shared/VirtualTable.svelte @@ -341,7 +341,6 @@ top: 0; left: 0; z-index: var(--layer-1); - box-shadow: var(--shadow-drop); overflow: hidden; } diff --git a/js/dataset/CHANGELOG.md b/js/dataset/CHANGELOG.md index b632c49c27791..cc449a6b1e162 100644 --- a/js/dataset/CHANGELOG.md +++ b/js/dataset/CHANGELOG.md @@ -1,11 +1,70 @@ # @gradio/dataset -## 0.2.5 +## 0.2.5-beta.7 + +### Dependency updates + +- @gradio/upload@0.13.0-beta.7 +- @gradio/atoms@0.9.0-beta.5 +- @gradio/textbox@0.7.0-beta.5 + +## 0.2.5-beta.6 + +### Dependency updates + +- @gradio/atoms@0.9.0-beta.4 +- @gradio/client@1.6.0-beta.4 +- @gradio/upload@0.13.0-beta.6 +- @gradio/textbox@0.7.0-beta.4 + +## 0.2.5-beta.5 + +### Dependency updates + +- @gradio/upload@0.13.0-beta.5 +- @gradio/atoms@0.9.0-beta.3 +- @gradio/textbox@0.7.0-beta.3 + +## 0.2.5-beta.4 + +### Dependency updates + +- @gradio/upload@0.13.0-beta.4 +- @gradio/textbox@0.7.0-beta.2 + +## 0.2.5-beta.3 + +### Dependency updates + +- @gradio/upload@0.13.0-beta.3 +- @gradio/client@1.6.0-beta.3 + +## 0.2.5-beta.2 + +### Dependency updates + +- @gradio/atoms@0.9.0-beta.2 +- @gradio/upload@0.13.0-beta.2 +- @gradio/client@1.6.0-beta.2 +- @gradio/utils@0.7.0-beta.2 +- @gradio/textbox@0.7.0-beta.2 + +## 0.2.5-beta.1 ### Features - [#9187](https://github.com/gradio-app/gradio/pull/9187) [`5bf00b7`](https://github.com/gradio-app/gradio/commit/5bf00b7524ebf399b48719120a49d15bb21bd65c) - make all component SSR compatible. Thanks @pngwn! +### Dependency updates + +- @gradio/atoms@0.8.1-beta.1 +- @gradio/utils@0.7.0-beta.1 +- @gradio/client@1.6.0-beta.1 +- @gradio/upload@0.12.4-beta.1 +- @gradio/textbox@0.7.0-beta.1 + +## 0.2.5-beta.0 + ### Fixes - [#9163](https://github.com/gradio-app/gradio/pull/9163) [`2b6cbf2`](https://github.com/gradio-app/gradio/commit/2b6cbf25908e42cf027324e54ef2cc0baad11a91) - fix exports and generate types. Thanks @pngwn! diff --git a/js/dataset/package.json b/js/dataset/package.json index a7433162d9de4..1f3aef53e0c17 100644 --- a/js/dataset/package.json +++ b/js/dataset/package.json @@ -1,6 +1,6 @@ { "name": "@gradio/dataset", - "version": "0.2.5", + "version": "0.2.5-beta.7", "description": "Gradio UI packages", "type": "module", "author": "", diff --git a/js/datetime/CHANGELOG.md b/js/datetime/CHANGELOG.md index 2d2150c407ccf..be7a36b1ad9d3 100644 --- a/js/datetime/CHANGELOG.md +++ b/js/datetime/CHANGELOG.md @@ -1,5 +1,60 @@ # @gradio/datetime +## 0.2.0-beta.5 + +### Features + +- [#9437](https://github.com/gradio-app/gradio/pull/9437) [`c3d93be`](https://github.com/gradio-app/gradio/commit/c3d93bef94b9401747a363f7bad88a1d347d535b) - Adding new themes to Gradio 5.0. Thanks @allisonwhilden! + +### Dependency updates + +- @gradio/statustracker@0.8.0-beta.5 +- @gradio/icons@0.8.0-beta.4 +- @gradio/atoms@0.9.0-beta.5 + +## 0.2.0-beta.4 + +### Features + +- [#9521](https://github.com/gradio-app/gradio/pull/9521) [`06ef22e`](https://github.com/gradio-app/gradio/commit/06ef22e83cdd27e7afb381396d153d9db3dea16e) - Allow `info=` to render markdown. Thanks @dawoodkhan82! + +### Dependency updates + +- @gradio/statustracker@0.8.0-beta.4 +- @gradio/atoms@0.9.0-beta.4 + +## 0.1.4-beta.3 + +### Dependency updates + +- @gradio/statustracker@0.8.0-beta.3 +- @gradio/icons@0.8.0-beta.3 +- @gradio/atoms@0.9.0-beta.3 + +## 0.1.4-beta.2 + +### Dependency updates + +- @gradio/statustracker@0.8.0-beta.2 + +## 0.1.4-beta.2 + +### Dependency updates + +- @gradio/atoms@0.9.0-beta.2 +- @gradio/icons@0.8.0-beta.2 +- @gradio/statustracker@0.8.0-beta.2 +- @gradio/utils@0.7.0-beta.2 + +## 0.1.4-beta.1 + +### Dependency updates + +- @gradio/atoms@0.8.1-beta.1 +- @gradio/icons@0.8.0-beta.1 +- @gradio/statustracker@0.8.0-beta.1 +- @gradio/utils@0.7.0-beta.1 + ## 0.1.4 ### Fixes diff --git a/js/datetime/Index.svelte b/js/datetime/Index.svelte index a5f401219fb6d..65adfe6b03785 100644 --- a/js/datetime/Index.svelte +++ b/js/datetime/Index.svelte @@ -21,6 +21,7 @@ let old_value = value; export let scale: number | null = null; export let min_width: number | undefined = undefined; + export let root: string; export let include_time = true; $: if (value !== old_value) { @@ -84,7 +85,7 @@ padding={true} >
- {label} + {label}
{ await expect(item.value).toBe("apple_choice"); }); - test("ensure dropdown can have an empty value", async () => { + test("ensure dropdown can have the first item of the choices as a default value", async () => { const { getByLabelText } = await render(Dropdown, { show_label: true, loading_status, @@ -488,7 +488,7 @@ describe("Dropdown", () => { const item: HTMLInputElement = getByLabelText( "Dropdown" ) as HTMLInputElement; - await expect(item.value).toBe(""); + await expect(item.value).toBe("apple_choice"); }); test("ensure dropdown works when initial value is undefined and allow custom value is set", async () => { @@ -508,6 +508,6 @@ describe("Dropdown", () => { const item: HTMLInputElement = getByLabelText( "Dropdown" ) as HTMLInputElement; - await expect(item.value).toBe(""); + await expect(item.value).toBe("apple_choice"); }); }); diff --git a/js/dropdown/package.json b/js/dropdown/package.json index fbfe925fd754b..14e93b5468b5f 100644 --- a/js/dropdown/package.json +++ b/js/dropdown/package.json @@ -1,6 +1,6 @@ { "name": "@gradio/dropdown", - "version": "0.7.13", + "version": "0.8.0-beta.6", "description": "Gradio UI packages", "type": "module", "author": "", diff --git a/js/dropdown/shared/Dropdown.svelte b/js/dropdown/shared/Dropdown.svelte index 5acb8948a853c..e64d72252f4a4 100644 --- a/js/dropdown/shared/Dropdown.svelte +++ b/js/dropdown/shared/Dropdown.svelte @@ -6,18 +6,21 @@ import type { SelectData, KeyUpData } from "@gradio/utils"; import { handle_filter, handle_change, handle_shared_keys } from "./utils"; + type Item = string | number; + export let label: string; export let info: string | undefined = undefined; - export let value: string | number | (string | number)[] | undefined = []; - let old_value: string | number | (string | number)[] | undefined = []; + export let value: Item | Item[] | undefined = undefined; + let old_value: typeof value = undefined; export let value_is_output = false; - export let choices: [string, string | number][]; - let old_choices: [string, string | number][]; + export let choices: [string, Item][]; + let old_choices: typeof choices; export let disabled = false; export let show_label: boolean; export let container = true; export let allow_custom_value = false; export let filterable = true; + export let root: string; let filter_input: HTMLElement; @@ -80,12 +83,10 @@ } } - $: { - if (value != old_value) { - set_input_text(); - handle_change(dispatch, value, value_is_output); - old_value = value; - } + $: if (JSON.stringify(old_value) !== JSON.stringify(value)) { + set_input_text(); + handle_change(dispatch, value, value_is_output); + old_value = value; } function set_choice_names_values(): void { @@ -204,7 +205,7 @@
- {label} + {label}
@@ -277,6 +278,7 @@ .wrap:focus-within { box-shadow: var(--input-shadow-focus); border-color: var(--input-border-color-focus); + background: var(--input-background-fill-focus); } .wrap-inner { diff --git a/js/dropdown/shared/Multiselect.svelte b/js/dropdown/shared/Multiselect.svelte index 7c3213ac43ffe..d94532b5f31e0 100644 --- a/js/dropdown/shared/Multiselect.svelte +++ b/js/dropdown/shared/Multiselect.svelte @@ -1,26 +1,29 @@