diff --git a/.github/release-drafter.yml b/.github/release-drafter.yml index 18ecf994..282b1fd4 100644 --- a/.github/release-drafter.yml +++ b/.github/release-drafter.yml @@ -3,11 +3,6 @@ name-template: 'v$RESOLVED_VERSION' tag-template: 'v$RESOLVED_VERSION' -template: | - ## What Changed 👀 - - $CHANGES - **Full Changelog**: https://github.com/$OWNER/$REPOSITORY/compare/$PREVIOUS_TAG...v$RESOLVED_VERSION categories: - title: 🚀 Features labels: @@ -54,6 +49,7 @@ version-resolver: labels: - patch default: patch +template: | + ## Changes -exclude-labels: - - skip-changelog + $CHANGES diff --git a/CHANGELOG.md b/CHANGELOG.md index 3203e293..1dad5f69 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,12 @@ All notable changes to the *readme-ai* project will be documented in this file. ## [v0.1.1] - *2023-09-24* +### 🚀 Features + +- Add CLI option to run *readme-ai* offline, generating the same README output excluding the LLM generated text. + - This option is useful for users who want to generate READMEs without an API key. + - The option can be used by passing the `-offline-mode` flag to the CLI. + ### 🛠 Changes - Refactor remaining dataclasses in [conf.py](https://github.com/eli64s/readme-ai/blob/main/readmeai/conf.py) to Pydantic models. diff --git a/README.md b/README.md index 3a51296a..bcb28728 100644 --- a/README.md +++ b/README.md @@ -403,8 +403,9 @@ To generate a *README.md* file, use the `readmeai` command in your terminal, alo | Short Flag | Long Flag | Description | Status | |------------|----------------|---------------------------------------------------|--------------| -| `-k` | `--api-key` | Your OpenAI API secret key. | Required | +| `-k` | `--api-key` | Your OpenAI API secret key. | Optional | | `-e` | `--engine` | OpenAI GPT language model engine (gpt-3.5-turbo) | Optional | +| `-f` | `--offline-mode`| Run offline without calling the OpenAI API. | Optional | | `-o` | `--output` | The output path for your README.md file. | Optional | | `-r` | `--repository` | The URL or path to your code repository. | Required | | `-t` | `--temperature`| The temperature (randomness) of the model | Optional | diff --git a/pyproject.toml b/pyproject.toml index 1e4992b3..00e19d06 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "readmeai" -version = "0.3.070" +version = "0.3.071" description = "🚀 Generate beautiful README files automatically, powered by GPT-4 đŸĒ" authors = ["Eli <0x.eli.64s@gmail.com>"] license = "MIT" diff --git a/readmeai/builder.py b/readmeai/builder.py index 778e7f74..af01f9b3 100644 --- a/readmeai/builder.py +++ b/readmeai/builder.py @@ -1,6 +1,8 @@ """Builds the README Markdown file for your codebase.""" +import os import subprocess +import urllib.parse from pathlib import Path from typing import List, Tuple @@ -21,7 +23,9 @@ def build_markdown_file( readme_sections = create_markdown_sections(config, helper, packages, summaries) readme_file = "\n".join(readme_sections) readme_path = Path(config.paths.readme) + factory.FileHandler().write(readme_path, readme_file) + logger.info(f"README file generated at: {readme_path}") @@ -31,10 +35,11 @@ def create_markdown_sections( packages: list, summaries: tuple, ) -> List[str]: - """Creates each section of the README Markdown file.""" + """Constructs each section of the README file.""" name = config.git.name repository = config.git.repository user_repo = utils.get_user_repository_name(repository) + badges_path = resource_filename(__package__, f"{config.paths.badges}") badges_dict = factory.FileHandler().read(badges_path) @@ -46,11 +51,13 @@ def create_markdown_sections( if "invalid" in user_repo.lower() else markdown_badges ) - markdown_tables = create_tables( - create_markdown_tables(summaries), config.md.dropdown, user_repo - ) + markdown_setup_guide = create_setup_guide(config, helper, summaries) + if not config.api.offline_mode: + tables = create_markdown_tables(summaries) + config.md.tables = create_tables(tables, config.md.dropdown, user_repo) + markdown_sections = [ config.md.header, markdown_badges, @@ -58,7 +65,7 @@ def create_markdown_sections( config.md.intro, config.md.tree, config.md.modules, - markdown_tables, + config.md.tables, config.md.setup.format(name, repository, *markdown_setup_guide), config.md.ending, ] @@ -197,6 +204,31 @@ def create_table(data: List[Tuple[str, str]], user_repo_name: str) -> str: return "\n".join(formatted_lines) +def generate_code_summary_table(base_url: str, directory: Path, level=0) -> str: + """Creates a Markdown table structure for the given directory.""" + markdown = "" + markdown += "| File | Summary |\n" + markdown += "| --- | --- |\n" + + for item in sorted(directory.iterdir()): + if item.is_file(): + relative_path = os.path.relpath(item, start=directory) + url_path = urllib.parse.quote(relative_path) + full_url = urllib.parse.urljoin(base_url, url_path) + markdown += f"| [{item.name}]({full_url}) | Summary of {item.name} |\n" + + for item in sorted(directory.iterdir()): + if item.is_dir(): + # If it is a sub-directory, create a collapsible section + markdown += f"\n
{item.name}\n\n" + # Recursive call for sub-directory + markdown += generate_code_summary_table(base_url, item, level + 1) + # Close the collapsible section + markdown += "\n
\n\n" + + return markdown + + def create_directory_tree(repo_path: Path) -> str: """Creates a directory tree for the project.""" try: diff --git a/readmeai/conf.py b/readmeai/conf.py index d6a7ed12..65513a9a 100644 --- a/readmeai/conf.py +++ b/readmeai/conf.py @@ -32,6 +32,7 @@ class ApiConfig(BaseModel): tokens: int tokens_max: int temperature: float + offline_mode: bool api_key: Optional[SecretStr] = Field(default=None) @validator("api_key", pre=True, always=True) @@ -115,6 +116,7 @@ class MarkdownConfig(BaseModel): intro: str modules: str setup: str + tables: str toc: str tree: str diff --git a/readmeai/conf/conf.toml b/readmeai/conf/conf.toml index 7715df9f..067c57ec 100644 --- a/readmeai/conf/conf.toml +++ b/readmeai/conf/conf.toml @@ -7,6 +7,7 @@ rate_limit = 5 tokens = 650 tokens_max = 3800 temperature = 1.2 +offline_mode = false # Repository [git] @@ -55,9 +56,10 @@ Craft 3-4 sentences that encapsulate the core functionalities of the project, it """ slogan = "Conceptualize a catchy and memorable slogan for the GitHub project: {}. Limit your response to 80 characters." -# Markdown Templates +# Markdown Template Code [md] -default = "`ℹī¸ INSERT-DESCRIPTION`" +tables = "" +default = "â–ļī¸Ž INSERT-DESCRIPTION" dropdown = """
{}\n\n{}\n\n
\n""" header = """

diff --git a/readmeai/main.py b/readmeai/main.py index 1ef0b1ba..2e3cf70b 100755 --- a/readmeai/main.py +++ b/readmeai/main.py @@ -19,14 +19,14 @@ config_helper = conf.load_config_helper(config_model) -async def main(repository: str) -> None: +async def main(repository: str, offline: bool) -> None: """Main entrypoint for the readme-ai application.""" config.git = conf.GitConfig(repository=repository) llm = model.OpenAIHandler(config) - await generate_readme(llm) + await generate_readme(llm, offline) -async def generate_readme(llm: model.OpenAIHandler) -> None: +async def generate_readme(llm: model.OpenAIHandler, offline: bool) -> None: """Orchestrates the README file generation process.""" name = config.git.name repository = config.git.repository @@ -41,28 +41,32 @@ async def generate_readme(llm: model.OpenAIHandler) -> None: logger.info(f"Dependencies: {dependencies}") logger.info(f"Total files: {len(file_text)}") + if offline: + logger.warning("Skipping OpenAI API calls as offline mode is enabled.") + config.md.tables = builder.generate_code_summary_table(repository, temp_dir) + code_summary = config.md.tables + slogan, overview, features = ( + config.md.default, + config.md.default, + config.md.default, + ) + else: + code_summary = await generate_code_to_text(llm, file_text) + slogan, overview, features = await generate_markdown_text( + llm, repository, code_summary + ) + await llm.close() + + config.md.header = config.md.header.format(name, slogan) + config.md.intro = config.md.intro.format(overview, features) + builder.build_markdown_file(config, config_helper, dependencies, code_summary) + except Exception as excinfo: logger.error(f"Exception: {excinfo}") raise excinfo - finally: shutil.rmtree(temp_dir) - try: - code_summary, slogan, overview, features = {}, "", "", "" - code_summary = await generate_code_to_text(llm, file_text) - slogan, overview, features = await generate_markdown_text( - llm, repository, code_summary - ) - except Exception as excinfo: - logger.error(f"Exception: {excinfo}") - finally: - await llm.close() - - config.md.header = config.md.header.format(name, slogan) - config.md.intro = config.md.intro.format(overview, features) - builder.build_markdown_file(config, config_helper, dependencies, code_summary) - async def generate_code_to_text( llm: model.OpenAIHandler, file_text: str @@ -99,6 +103,13 @@ async def generate_markdown_text( default="gpt-3.5-turbo", help="OpenAI language model engine to use.", ) +@click.option( + "-f", + "--offline-mode", + is_flag=True, + default=False, + help="Run the tool in offline mode without calling the OpenAI API.", +) @click.option( "-o", "--output", @@ -130,6 +141,7 @@ async def generate_markdown_text( def cli( api_key: str, engine: Optional[str], + offline_mode: bool, output: Optional[str], repository: str, temperature: Optional[float], @@ -141,13 +153,14 @@ def cli( config.api.api_key = api_key config.api.engine = engine config.api.temperature = temperature + config.api.offline_mode = offline_mode logger.info("README-AI is now executing.") logger.info(f"Output file: {config.paths.readme}") logger.info(f"OpenAI Engine: {config.api.engine}") logger.info(f"OpenAI Temperature: {config.api.temperature}") - asyncio.run(main(repository)) + asyncio.run(main(repository, offline_mode)) logger.info("README-AI execution complete.")