From 3ee84df863071028d75e41deb040c5546897bebf Mon Sep 17 00:00:00 2001 From: Eric Kidd Date: Sat, 30 Mar 2024 09:08:50 -0400 Subject: [PATCH] substudy v0.6.5: Run AI requests concurrently, and cache --- Cargo.lock | 2 +- substudy/CHANGELOG.md | 8 ++++++++ substudy/Cargo.toml | 2 +- 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4a2dbff..42bcf97 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1993,7 +1993,7 @@ checksum = "5ee073c9e4cd00e28217186dbe12796d692868f432bf2e97ee73bed0c56dfa01" [[package]] name = "substudy" -version = "0.6.4" +version = "0.6.5" dependencies = [ "anyhow", "async-openai", diff --git a/substudy/CHANGELOG.md b/substudy/CHANGELOG.md index f2cbe87..b7918bc 100644 --- a/substudy/CHANGELOG.md +++ b/substudy/CHANGELOG.md @@ -6,6 +6,14 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), ## [Unreleased] +## [0.6.5] - 2024-03-30 + +### Added + +- Run more transcription and translation requests in parallel. This greatly reduces the time needed to work with large media files. +- Cache AI API requests. Calling an AI model is slow and costs money. Making the same calls over and over again is a waste of time and money, +especially when we successfully process 99.5% of a large media file. So now we cache recent successful requests. So if you need to re-run a incomplete translation, it should be much faster and cheaper. (Cache files are stored wherever your OS thinks they should be stored. On Linux, this is `~/.cache/substudy`.) + ## [0.6.4] - 2024-03-24 ### Added diff --git a/substudy/Cargo.toml b/substudy/Cargo.toml index ee9d1e4..696cd1f 100644 --- a/substudy/Cargo.toml +++ b/substudy/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "substudy" -version = "0.6.4" +version = "0.6.5" authors = ["Eric Kidd "] license = "Apache-2.0" edition = "2021"