From 6b275409f69892eb85fc5c2d326a90d10849f429 Mon Sep 17 00:00:00 2001 From: Veetaha Date: Wed, 24 Apr 2024 09:58:59 +0000 Subject: [PATCH 1/4] Add typos check to CI --- .github/workflows/main.yml | 10 ++++++++-- atlassian_backup-1.0.0/README.md | 2 +- aws-backup-elastio-integration/lambda_handler.py | 8 ++++---- demo-pipeline/buildspec/instance-backup.yaml | 2 +- demo-pipeline/terraform/iam.tf | 2 +- elastio-api-php-lumen/app/Service/EapService.php | 4 ++-- elastio-fargate-mysql-backup/README.md | 2 +- elastio-lambda-stream-backup/readme.md | 2 +- elastio-sql-backup-ssstar-stream/README.md | 4 ++-- elastio-stream-kafka/common.py | 2 +- elastio-stream-kafka/elastio_stream_kafka.py | 14 +++++++------- elastio-velero-integration/README.md | 2 +- typos.toml | 8 ++++++++ 13 files changed, 38 insertions(+), 24 deletions(-) create mode 100644 typos.toml diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 9440639..6ebdb15 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -32,9 +32,15 @@ jobs: - 'aws-backup-elastio-integration/**' elastio-s3-changelog: - 'elastio-s3-changelog/**' + + typos: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: crate-ci/typos@v1.20.10 + upload-aws-backup-elastio-integration: - runs-on: - - ubuntu-latest + runs-on: ubuntu-latest needs: changes if: >- needs.changes.outputs.aws-backup-elastio-integration == 'true' || diff --git a/atlassian_backup-1.0.0/README.md b/atlassian_backup-1.0.0/README.md index 0b4f62f..657af15 100644 --- a/atlassian_backup-1.0.0/README.md +++ b/atlassian_backup-1.0.0/README.md @@ -35,6 +35,6 @@ export ATLASSIAN_TOKEN = "your_site_token" ### Download of large files times out. -When including attachments, the download file size can get rather large. To seperate scope of conerns, download is kept seperate from protect to ensure protect only starts on successful downloads. +When including attachments, the download file size can get rather large. To separate scope of conerns, download is kept separate from protect to ensure protect only starts on successful downloads. diff --git a/aws-backup-elastio-integration/lambda_handler.py b/aws-backup-elastio-integration/lambda_handler.py index c2d917f..322202f 100644 --- a/aws-backup-elastio-integration/lambda_handler.py +++ b/aws-backup-elastio-integration/lambda_handler.py @@ -61,7 +61,7 @@ def handle_aws_backup_event(event): tag_list = response.get('Tags') enable_elastio_scan = any(item in tag_list for item in ENABLE_ELASTIO_SCAN_TAG_LIST) - #Existance of Scan enable or the Lambda trigger tag + #Existence of Scan enable or the Lambda trigger tag if (enable_elastio_scan): elastio_status_eb = os.environ.get('ElastioStatusEB') if not elastio_status_eb: @@ -140,7 +140,7 @@ def save_event_data_to_s3(s3_log_bucket,json_content): def process_ransomware_details(account_id,product_arn,generator_id,scan_timestamp,aws_asset_id,aws_backup_rp_arn,elastio_rp_id,ransomware_details): """ - This is the function responsbile to create ransomware findings based on ransomware_details + This is the function responsible to create ransomware findings based on ransomware_details """ try: logger.info(f'Starting process_ransomware_details') @@ -175,7 +175,7 @@ def process_ransomware_details(account_id,product_arn,generator_id,scan_timestam def process_malware_details(account_id,product_arn,generator_id,scan_timestamp,aws_asset_id,aws_backup_rp_arn,elastio_rp_id,malware_details): """ - This is the function responsbile to create malware findings based on malware_details + This is the function responsible to create malware findings based on malware_details """ try: logger.info(f'Starting process_malware_details') @@ -386,7 +386,7 @@ def handler(event, context): if s3_log_bucket: save_event_data_to_s3(s3_log_bucket,event) else: - logger.info('S3 Log Bucket Name Env Paramter LogsBucketName is missing. Skipping logging to S3 Bucket') + logger.info('S3 Log Bucket Name Env Parameter LogsBucketName is missing. Skipping logging to S3 Bucket') generate_security_hub_findings(event) diff --git a/demo-pipeline/buildspec/instance-backup.yaml b/demo-pipeline/buildspec/instance-backup.yaml index bd4ffb7..59db2b1 100755 --- a/demo-pipeline/buildspec/instance-backup.yaml +++ b/demo-pipeline/buildspec/instance-backup.yaml @@ -18,7 +18,7 @@ phases: commands: - export INSTANCE_ID=$(aws ec2 describe-instances | jq -r --arg env $ENVIRONMENT '.Reservations[].Instances[] | select(.Tags != null) | select(.Tags[].Key == "Environment" and .Tags[].Value == $env) | .InstanceId') - echo "Backing up instance ${INSTANCE_ID}" - - OUTPUT=$(elastio ec2 backup --instance-id $INSTANCE_ID --tag relase:$CODEBUILD_RESOLVED_SOURCE_VERSION --output-format json) + - OUTPUT=$(elastio ec2 backup --instance-id $INSTANCE_ID --tag release:$CODEBUILD_RESOLVED_SOURCE_VERSION --output-format json) - echo "${OUTPUT}" - export JOB_ID=$(echo "${OUTPUT}" | jq -r '.job_id') - export ABORT_TOKEN=$(echo "${OUTPUT}" | jq -r '.abort_token') diff --git a/demo-pipeline/terraform/iam.tf b/demo-pipeline/terraform/iam.tf index 31f1482..a53aff3 100644 --- a/demo-pipeline/terraform/iam.tf +++ b/demo-pipeline/terraform/iam.tf @@ -121,7 +121,7 @@ data "aws_iam_policy_document" "codepipeline" { } statement { - sid = "ComputeDatabaseQueueNotifcationManagementPolicy" + sid = "ComputeDatabaseQueueNotificationManagementPolicy" effect = "Allow" resources = ["*"] actions = [ diff --git a/elastio-api-php-lumen/app/Service/EapService.php b/elastio-api-php-lumen/app/Service/EapService.php index 0f80a34..e476512 100644 --- a/elastio-api-php-lumen/app/Service/EapService.php +++ b/elastio-api-php-lumen/app/Service/EapService.php @@ -209,8 +209,8 @@ public static function iScanRp($json) public static function iScanFile($request) { - $direcotry = "iscan_" . date("U") . "_" . rand(100,1000); - $path = "/var/www/iscan_temp/" . $direcotry; + $directory = "iscan_" . date("U") . "_" . rand(100,1000); + $path = "/var/www/iscan_temp/" . $directory; @mkdir($path, $mode = 0777, false); $file = $request->file('iscan_file'); diff --git a/elastio-fargate-mysql-backup/README.md b/elastio-fargate-mysql-backup/README.md index 356a0f8..e9d6683 100644 --- a/elastio-fargate-mysql-backup/README.md +++ b/elastio-fargate-mysql-backup/README.md @@ -29,7 +29,7 @@ 4. Select `ElastioMySQLBackupRole` as Task role 5. Type Elastio-CLI as container name 6. Paste `public.ecr.aws/elastio-dev/elastio-cli:latest` in container image URI -7. Expand Docker configuration and paste `sh,-c` in Entry point and following comman in Command: +7. Expand Docker configuration and paste `sh,-c` in Entry point and following command in Command: ``` apt-get install awscli jq default-mysql-client -y && creds=$(aws secretsmanager get-secret-value --secret-id MySQLBackupCreds | jq ".SecretString | fromjson") && mysqldump -h $(echo $creds | jq -r ".host") -u $(echo $creds | jq -r ".username") -P $(echo $creds | jq -r ".port") -p"$(echo $creds | jq -r '.password')" DATABASE | elastio stream backup --stream-name MySQL-Daily-backup --hostname-override MySQL-hostname ``` diff --git a/elastio-lambda-stream-backup/readme.md b/elastio-lambda-stream-backup/readme.md index d57a60c..d0a381b 100644 --- a/elastio-lambda-stream-backup/readme.md +++ b/elastio-lambda-stream-backup/readme.md @@ -8,7 +8,7 @@ 3. Download code from contrib repository and open directory elastio-lambda-stream-backup. -> if you want to use arm64/amd64 architecture, you should use the coresponding architecture for the instance +> if you want to use arm64/amd64 architecture, you should use the corresponding architecture for the instance 4. Build and push docker image: 1. Review the dockerfile and read the following comments for arguments such as ARCH(architecture), VERSION_TAG(elastio cli version) diff --git a/elastio-sql-backup-ssstar-stream/README.md b/elastio-sql-backup-ssstar-stream/README.md index 8b93522..8255a6c 100644 --- a/elastio-sql-backup-ssstar-stream/README.md +++ b/elastio-sql-backup-ssstar-stream/README.md @@ -1,4 +1,4 @@ -This article describs the procedure of backup and restore Miscrosoft SQL server databse. If your database hosted in Amazon RDS see [Amazon RDS SQL Server](https://github.com/elastio/contrib/edit/MSSQL/elastio-sql-backup-ssstar-stream/README.md#amazon-rds-sql-server), if you have selfhosted database see [self hosted SQL Server](https://github.com/elastio/contrib/edit/MSSQL/elastio-sql-backup-ssstar-stream/README.md#self-hosted-sql-server). +This article describes the procedure of backup and restore Microsoft SQL server database. If your database hosted in Amazon RDS see [Amazon RDS SQL Server](https://github.com/elastio/contrib/edit/MSSQL/elastio-sql-backup-ssstar-stream/README.md#amazon-rds-sql-server), if you have selfhosted database see [self hosted SQL Server](https://github.com/elastio/contrib/edit/MSSQL/elastio-sql-backup-ssstar-stream/README.md#self-hosted-sql-server). # Amazon RDS SQL Server @@ -24,7 +24,7 @@ This article describs the procedure of backup and restore Miscrosoft SQL server exec msdb.dbo.rds_backup_database @source_db_name='database_name', @s3_arn_to_backup_to='arn:aws:s3:::bucket_name/file_name.extension', - [@kms_master_key_arn='arn:aws:kms:region:account-id:key/key-id'], + [@kms_master_key_arn='arn:aws:kms:region:account-id:key/key-id'], [@overwrite_s3_backup_file=0|1], [@type='DIFFERENTIAL|FULL'], [@number_of_files=n]; diff --git a/elastio-stream-kafka/common.py b/elastio-stream-kafka/common.py index f7b6bc4..d5e3ce9 100644 --- a/elastio-stream-kafka/common.py +++ b/elastio-stream-kafka/common.py @@ -13,7 +13,7 @@ def id_generator(size=6, chars=string.ascii_lowercase + string.digits) -> str: def new_message_exists(topic: str, bootstrap_servers: list, partition: int, offset: int) -> dict: """ - Check Kafka topic partiton for new message. + Check Kafka topic partition for new message. Function connect to Kafka, read message for the specified partition. Check message offset compares with the specified offset Returns True if the message offset is 2 greater than the specified offset. diff --git a/elastio-stream-kafka/elastio_stream_kafka.py b/elastio-stream-kafka/elastio_stream_kafka.py index f28ed65..20183c9 100644 --- a/elastio-stream-kafka/elastio_stream_kafka.py +++ b/elastio-stream-kafka/elastio_stream_kafka.py @@ -17,7 +17,7 @@ prog="Elastio stream kafka", ) subparser = parser.add_subparsers(dest="mod") -# subparser accept two posible modes of work this script backup and restore +# subparser accept two possible modes of work this script backup and restore backup_parser = subparser.add_parser("backup") # backup mode arguments @@ -41,13 +41,13 @@ topic_info_data = {} topic_info_data['topic_name'] = args.topic_name - # Creating Kafka consummer with random group. + # Creating Kafka consumer with random group. consumer = KafkaConsumer( group_id=f'{_id}-group', bootstrap_servers=bootstrap_servers, auto_offset_reset='earliest', # latest/earliest enable_auto_commit=True, - auto_commit_interval_ms=1000, # 1s + auto_commit_interval_ms=1000, # 1s consumer_timeout_ms=10000, # 10s api_version=(0, 10, 1) ) @@ -56,7 +56,7 @@ Call Elastio CLI to get stream recovery points list. Checking if the topic was already backup. If the topic previously backed up set variable topic_previously_backed_up = True - and geting offset last message what be stored in last time. + and getting offset last message what be stored in last time. Last message offset stored in recovery point tags with name "partition__last_msg_offset". """ topic_previously_backed_up = False @@ -203,8 +203,8 @@ msg_count = 0 # Read data from elastio stream restore process. # Load to json format. - datas = (json.loads(line.decode()) for line in res.stdout.splitlines()) - for data in datas: + lines = (json.loads(line.decode()) for line in res.stdout.splitlines()) + for data in lines: # Write data to the Kafka topic. msg_stat = prod.send( topic=data['topic'], @@ -215,4 +215,4 @@ ) msg_count+=1 prod.close() - print("Restore finished successfuly!\nRestored messeges count: {msg_count}".format(msg_count=msg_count)) + print("Restore finished successfully!\nRestored messages count: {msg_count}".format(msg_count=msg_count)) diff --git a/elastio-velero-integration/README.md b/elastio-velero-integration/README.md index 64a5fb3..1e0c115 100644 --- a/elastio-velero-integration/README.md +++ b/elastio-velero-integration/README.md @@ -73,7 +73,7 @@ export veleroBackupName=BackupName export veleroS3Bucket=BucketName export AWS_DEFAULT_REGION=AWSRegion ``` -Replace `BackupName` and `BucketName` with coresponding names. +Replace `BackupName` and `BucketName` with corresponding names. Retrieve the restored EBS volume ID and its tags using the following commands. diff --git a/typos.toml b/typos.toml new file mode 100644 index 0000000..8fd15e1 --- /dev/null +++ b/typos.toml @@ -0,0 +1,8 @@ +# See documentation for this config file at https://github.com/crate-ci/typos/blob/master/docs/reference.md + +[default.extend-identifiers] +# Abbreviation: Security information and event management +SIEMs = "SIEMs" + +# Abbreviation: Recovery Time Objectives +RTO = "RTO" From ecdc4dfcc8e07e293542073647622eeb6c720cac Mon Sep 17 00:00:00 2001 From: Veetaha Date: Wed, 24 Apr 2024 10:06:59 +0000 Subject: [PATCH 2/4] Enable CI on PRs --- .github/workflows/main.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 6ebdb15..22e4154 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -3,6 +3,9 @@ on: push: branches: - master + pull_request: + + permissions: contents: read id-token: write @@ -13,6 +16,7 @@ env: AWS_REGION: us-east-2 jobs: changes: + if: github.event_name != 'pull_request' runs-on: ubuntu-latest outputs: ci: "${{ steps.filter.outputs.ci }}" @@ -40,6 +44,7 @@ jobs: - uses: crate-ci/typos@v1.20.10 upload-aws-backup-elastio-integration: + if: github.event_name != 'pull_request' runs-on: ubuntu-latest needs: changes if: >- @@ -67,6 +72,7 @@ jobs: zip ${NAME}.zip lambda_handler.py aws s3 cp ${NAME}.zip s3://${S3_BUCKET}/${S3_KEY}/${NAME}.zip upload-elastio-s3-changelog: + if: github.event_name != 'pull_request' runs-on: ubuntu-latest needs: changes if: >- From fff1b1b78a8de408cfe3a3750a6c4111d0536e3f Mon Sep 17 00:00:00 2001 From: Veetaha Date: Wed, 24 Apr 2024 10:08:12 +0000 Subject: [PATCH 3/4] Fix syntax --- .github/workflows/main.yml | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 22e4154..9d4f721 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -44,12 +44,13 @@ jobs: - uses: crate-ci/typos@v1.20.10 upload-aws-backup-elastio-integration: - if: github.event_name != 'pull_request' runs-on: ubuntu-latest needs: changes if: >- - needs.changes.outputs.aws-backup-elastio-integration == 'true' || - needs.changes.outputs.ci == 'true' + github.event_name != 'pull_request' && ( + needs.changes.outputs.aws-backup-elastio-integration == 'true' || + needs.changes.outputs.ci == 'true' + ) steps: - name: Checkout repository uses: actions/checkout@v4 @@ -72,12 +73,13 @@ jobs: zip ${NAME}.zip lambda_handler.py aws s3 cp ${NAME}.zip s3://${S3_BUCKET}/${S3_KEY}/${NAME}.zip upload-elastio-s3-changelog: - if: github.event_name != 'pull_request' runs-on: ubuntu-latest needs: changes if: >- - needs.changes.outputs.elastio-s3-changelog == 'true' || - needs.changes.outputs.ci == 'true' + github.event_name != 'pull_request' && ( + needs.changes.outputs.elastio-s3-changelog == 'true' || + needs.changes.outputs.ci == 'true' + ) env: S3_BUCKET: elastio-prod-artifacts-us-east-2 steps: From 7a8c9be50142f0d5f3cdcfa40c324b6a5aa70654 Mon Sep 17 00:00:00 2001 From: Veetaha Date: Wed, 24 Apr 2024 10:08:57 +0000 Subject: [PATCH 4/4] Rename CI to remove assumption about master --- .github/workflows/{main.yml => ci.yml} | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename .github/workflows/{main.yml => ci.yml} (99%) diff --git a/.github/workflows/main.yml b/.github/workflows/ci.yml similarity index 99% rename from .github/workflows/main.yml rename to .github/workflows/ci.yml index 9d4f721..c875f55 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/ci.yml @@ -1,4 +1,4 @@ -name: master +name: ci on: push: branches: