diff --git a/.gitignore b/.gitignore index f518239c..85401bc7 100644 --- a/.gitignore +++ b/.gitignore @@ -3,4 +3,8 @@ **/bin/Debug/ **/obj/ /.vscode/ +.idea /auth/**/*.exe +credentials-fetcher.sln +cmake-build-debug/ + diff --git a/.idea/.gitignore b/.idea/.gitignore deleted file mode 100644 index df633058..00000000 --- a/.idea/.gitignore +++ /dev/null @@ -1,8 +0,0 @@ - # Default ignored files - /shelf/ - /workspace.xml - # Editor-based HTTP Client requests - /httpRequests/ - # Datasource local storage ignored files - /dataSources/ - /dataSources.local.xml \ No newline at end of file diff --git a/.idea/codeStyles/Project.xml b/.idea/codeStyles/Project.xml deleted file mode 100644 index 75db43f2..00000000 --- a/.idea/codeStyles/Project.xml +++ /dev/null @@ -1,81 +0,0 @@ - - - - \ No newline at end of file diff --git a/.idea/codeStyles/codeStyleConfig.xml b/.idea/codeStyles/codeStyleConfig.xml deleted file mode 100644 index 20e311cf..00000000 --- a/.idea/codeStyles/codeStyleConfig.xml +++ /dev/null @@ -1,5 +0,0 @@ - - - - \ No newline at end of file diff --git a/.idea/credentials-fetcher.iml b/.idea/credentials-fetcher.iml deleted file mode 100644 index f08604bb..00000000 --- a/.idea/credentials-fetcher.iml +++ /dev/null @@ -1,2 +0,0 @@ - - \ No newline at end of file diff --git a/.idea/misc.xml b/.idea/misc.xml deleted file mode 100644 index 121b6caa..00000000 --- a/.idea/misc.xml +++ /dev/null @@ -1,4 +0,0 @@ - - - - \ No newline at end of file diff --git a/.idea/modules.xml b/.idea/modules.xml deleted file mode 100644 index c58df96a..00000000 --- a/.idea/modules.xml +++ /dev/null @@ -1,8 +0,0 @@ - - - - - - - - \ No newline at end of file diff --git a/.idea/vcs.xml b/.idea/vcs.xml deleted file mode 100644 index 54e4b961..00000000 --- a/.idea/vcs.xml +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - diff --git a/CMakeLists.txt b/CMakeLists.txt index 1171c241..8d11a451 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -194,9 +194,8 @@ endif() add_custom_command( TARGET credentials-fetcherd PRE_LINK - COMMAND bash -c "CURR_DIR=$PWD && echo $CURR_DIR && cd ${CMAKE_CURRENT_SOURCE_DIR}/auth/kerberos/src/utf16_decode && ./build-using-csc.sh Program.cs && cp Program.exe $CURR_DIR/credentials_fetcher_utf16_private.exe && cp Program.runtimeconfig.json $CURR_DIR/credentials_fetcher_utf16_private.runtimeconfig.json" + COMMAND bash -c "CURR_DIR=$PWD && echo $CURR_DIR && cd ${CMAKE_CURRENT_SOURCE_DIR}/auth/kerberos/src/utf16_decode && ./build-using-native-aot.sh && cp bin/Release/net8.0/linux-x64/publish/utf16_decode $CURR_DIR/credentials_fetcher_utf16_private" VERBATIM) - target_include_directories(credentials-fetcherd PUBLIC common) if(${Protobuf_VERSION} VERSION_GREATER_EQUAL "3.21.0.0") @@ -222,12 +221,11 @@ install(FILES ${CMAKE_BINARY_DIR}/credentials-fetcherd install(FILES ${CMAKE_SOURCE_DIR}/scripts/systemd/credentials-fetcher.service DESTINATION "/usr/lib/systemd/system/") -install(FILES ${CMAKE_BINARY_DIR}/credentials_fetcher_utf16_private.exe - DESTINATION "/usr/sbin/" - PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ) -install(FILES ${CMAKE_BINARY_DIR}/credentials_fetcher_utf16_private.runtimeconfig.json + +install(FILES ${CMAKE_BINARY_DIR}/credentials_fetcher_utf16_private DESTINATION "/usr/sbin/" PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ) + install(FILES ${CMAKE_BINARY_DIR}/krb5.conf DESTINATION "/usr/sbin/" PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ) @@ -238,3 +236,20 @@ add_test(NAME check_help COMMAND ${CMAKE_BINARY_DIR}/credentials-fetcherd "--hel add_test(NAME run_self_test COMMAND ${CMAKE_BINARY_DIR}/credentials-fetcherd "--self_test") set_tests_properties(check_help PROPERTIES WILL_FAIL TRUE) set_tests_properties(run_self_test PROPERTIES WILL_FAIL FALSE) + + +find_package(GTest CONFIG REQUIRED) +set(TEST_FILES test/tester.cpp) +add_executable(credentials-fetcher-test ${metadata} ${TEST_FILES} common) +target_include_directories(credentials-fetcher-test + PUBLIC + common + ${GLIB_INCLUDE_DIR} + ${GLIB_CONFIG_DIR} + ${CMAKE_CURRENT_BINARY_DIR}) +target_link_libraries(credentials-fetcher-test gtest gtest_main + systemd + glib-2.0 + jsoncpp ssl crypto + krb5 kadm5srv_mit kdb5 gssrpc gssapi_krb5 gssrpc k5crypto + com_err krb5support resolv) diff --git a/README.md b/README.md index 1b05dd87..e0c90bc9 100644 --- a/README.md +++ b/README.md @@ -1,21 +1,59 @@ # Credentials Fetcher +NOTE: This branch is un-released, additional tests are not complete. +-------------------------------------------------------------------- + `credentials-fetcher` is a Linux daemon that retrieves gMSA credentials from Active Directory over LDAP. It creates and refreshes kerberos tickets from gMSA credentials. Kerberos tickets can be used by containers to run apps/services that authenticate using Active Directory. This daemon works in a similar way as ccg.exe and the gMSA plugin in Windows as described in - https://docs.microsoft.com/en-us/virtualization/windowscontainers/manage-containers/manage-serviceaccounts#gmsa-architecture-and-improvements ### How to install and run -On [Fedora 36](_https://alt.fedoraproject.org/cloud/_) and similar distributions, the binary RPM can be installed as +- To use the custom credentials-fetcher rpm in ECS domainless mode, modify the user data script as follows +https://docs.aws.amazon.com/AmazonECS/latest/developerguide/linux-gmsa.html#linux-gmsa-setup + + ``` + #!/bin/bash + + # prerequisites + dnf install -y dotnet + dnf install -y realmd + dnf install -y oddjob + dnf install -y oddjob-mkhomedir + dnf install -y sssd + dnf install -y adcli + dnf install -y krb5-workstation + dnf install -y samba-common-tools + + # install custom credentials-fetcher rpm from branch - https://github.com/aws/credentials-fetcher/tree/fixes_for_DNS_and_distinguishedName gMSA credentials management for containers + curl -L -O https://github.com/aws/credentials-fetcher/raw/refs/heads/fixes_for_DNS_and_distinguishedName/rpm/credentials-fetcher-..-0.amzn2023.x86_64.rpm + dnf install -y ./credentials-fetcher-..-0.amzn2023.x86_64.rpm + + # start credentials-fetcher + systemctl enable credentials-fetcher + systemctl start credentials-fetcher + + echo "ECS_GMSA_SUPPORTED=true" >> /etc/ecs/ecs.config + echo ECS_CLUSTER=MyCluster >> /etc/ecs/ecs.config + ``` + + + Add an additional optional field in the secret in AWS Secrets Manager along with the standard user's username, password, and the domain. Enter the service account's Distinguished Name (DN) into JSON key-value pairs called `distinguishedName` + + ``` + {"username":"username","password":"passw0rd", "domainName":"example.com", "distinguishedName":"CN=WebApp01,OU=DemoOU,OU=Users,OU=example,DC=example,DC=com"} + ``` + +- On [Fedora 41](_https://alt.fedoraproject.org/cloud/_) and similar distributions, the binary RPM can be installed as `sudo dnf install credentials-fetcher`. You can also use yum if dnf is not present. The daemon can be started using `sudo systemctl start credentials-fetcher`. -On Enterprise Linux 9 ( RHEL | CentOS | AlmaLinux ), the binary can be installed from EPEL. To add EPEL, see the [EPEL Quickstart](_https://docs.fedoraproject.org/en-US/epel/#_quickstart_). +- On Enterprise Linux 9 ( RHEL | CentOS | AlmaLinux ), the binary can be installed from EPEL. To add EPEL, see the [EPEL Quickstart](_https://docs.fedoraproject.org/en-US/epel/#_quickstart_). Once EPEL is enabled, install credentials-fetcher with `sudo dnf install credentials-fetcher`. -For other linux distributions, the daemon binary needs to be built from source code. +- For other linux distributions, the daemon binary needs to be built from source code. ## Development @@ -48,7 +86,106 @@ To start a local dev environment from scratch: * ./credentials-fetcher to start the program in non-daemon mode. ``` -#### Testing +## Logging + +Logs about request/response to the daemon and any failures. + +``` +journalctl -u credentials-fetcher +``` + +### Default environment variables + +| Environment Key | Examples values | Description | +| :-------------------------- | ---------------------------------- | :------------------------------------------------------------------------------------------- | +| `CF_KRB_DIR` | '/var/credentials-fetcher/krbdir' | _(Default)_ Dir path for storing the kerberos tickets | +| `CF_UNIX_DOMAIN_SOCKET_DIR` | '/var/credentials-fetcher/socket' | _(Default)_ Dir path for the domain socker for gRPC communication 'credentials_fetcher.sock' | +| `CF_LOGGING_DIR` | '/var/credentials-fetcher/logging' | _(Default)_ Dir Path for log | +| `CF_TEST_DOMAIN_NAME` | 'contoso.com' | Test domain name | +| `CF_TEST_GMSA_ACCOUNT` | 'webapp01' | Test gMSA account name | + +### Runtime environment variables + +| Environment Variable | Examples values | Description | +| :------------------- | ----------------------------------------------------- | :------------------------------------------------------------------------- | +| `CF_CRED_SPEC_FILE` | '/var/credentials-fetcher/my-credspec.json' | Path to a credential spec file used as input. (Lease id default: credspec) | +| | '/var/credentials-fetcher/my-credspec.json:myLeaseId' | An optional lease id specified after a colon | +| `CF_GMSA_OU` | 'CN=Managed Service Accounts' | Component of GMSA distinguished name (see docs/cf_gmsa_ou.md) | + + +## Testing + +### Test using Personal CDK Stack + +Use the AWS CDK to create +- Active Directory Server +- Windows EC2 instance to manage AD +- EC2 Linux Containers on Amazon ECS +- gMSA Account(s) in Active Directory +The CDK will create all necessary infrastructure and install the necessary dependencies to run credentials-fetcher on non-domain-joined ECS hosts. Detailed steps to deploy and test using the CDK stack are present [here](https://github.com/aws/credentials-fetcher/blob/mainline/cdk/cdk-domainless-mode/README.md). + +### Test APIs using Integration Test Script + +`/api/tests/gmsa_api_integration_test.cpp` contains integration tests for the of the gMSA APIs. + +#### Prerequisites +Follow the instructions in the [Domainless Mode README](cdk/cdk-domainless-mode/README.md) to set up the required infrastructure for testing gMSA on Linux containers. + +#### Setup +Set AWS environment variables +``` +export AWS_ACCESS_KEY_ID=XXXX +export AWS_SECRET_ACCESS_KEY=XXXX +export AWS_SESSION_TOKEN=XXXX +export AWS_REGION=XXXX +``` + +Set Amazon S3 ARN containing the credential spec file. +``` +export CF_TEST_CREDSPEC_ARN=XXX +``` + +Set standard username, password and domain used for testing +``` +export CF_TEST_STANDARD_USERNAME=XXXX +export CF_TEST_STANDARD_USER_PASSWORD=XXXX +export CF_TEST_DOMAIN=XXXX +``` + +#### Build && Test +Follow the instructions from [Standalone mode](#standalone-mode) sections to build the code, generate binaries and start the server. Once the server has started, run integration tests by running + +``` +cd credentials-fetcher/build/ +sudo -E api/tests/gmsa_api_integration_test +``` + +#### Sample output +``` +> sudo api/tests/gmsa_api_integration_test +[==========] Running 6 tests from 1 test suite. +[----------] Global test environment set-up. +[----------] 6 tests from GmsaIntegrationTest +[ RUN ] GmsaIntegrationTest.HealthCheck_Test +[ OK ] GmsaIntegrationTest.HealthCheck_Test (4 ms) +[ RUN ] GmsaIntegrationTest.A_AddNonDomainJoinedKerberosLeaseMethod_Test +[ OK ] GmsaIntegrationTest.A_AddNonDomainJoinedKerberosLeaseMethod_Test (1028 ms) +[ RUN ] GmsaIntegrationTest.B_RenewNonDomainJoinedKerberosLeaseMethod_Test +[ OK ] GmsaIntegrationTest.B_RenewNonDomainJoinedKerberosLeaseMethod_Test (553 ms) +[ RUN ] GmsaIntegrationTest.C_DeleteKerberosLeaseMethod_Test +[ OK ] GmsaIntegrationTest.C_DeleteKerberosLeaseMethod_Test (7 ms) +[ RUN ] GmsaIntegrationTest.A_AddKerberosArnLeaseMethod_Test +[ OK ] GmsaIntegrationTest.A_AddKerberosArnLeaseMethod_Test (768 ms) +[ RUN ] GmsaIntegrationTest.B_RenewKerberosArnLeaseMethod_Test +[ OK ] GmsaIntegrationTest.B_RenewKerberosArnLeaseMethod_Test (691 ms) +[----------] 6 tests from GmsaIntegrationTest (3054 ms total) + +[----------] Global test environment tear-down +[==========] 6 tests from 1 test suite ran. (3054 ms total) +[ PASSED ] 6 tests. +``` + +### Testing Tips without using CDK stack or Test Scripts To communicate with the daemon over gRPC, install grpc-cli. For example `sudo yum install grpc-cli` @@ -88,33 +225,6 @@ grpc_cli call unix:/var/credentials-fetcher/socket/credentials_fetcher.sock Dele ``` -### Logging - -Logs about request/response to the daemon and any failures. - -``` -journalctl -u credentials-fetcher -``` - -#### Default environment variables - -| Environment Key | Examples values | Description | -| :-------------------------- | ---------------------------------- | :------------------------------------------------------------------------------------------- | -| `CF_KRB_DIR` | '/var/credentials-fetcher/krbdir' | _(Default)_ Dir path for storing the kerberos tickets | -| `CF_UNIX_DOMAIN_SOCKET_DIR` | '/var/credentials-fetcher/socket' | _(Default)_ Dir path for the domain socker for gRPC communication 'credentials_fetcher.sock' | -| `CF_LOGGING_DIR` | '/var/credentials-fetcher/logging' | _(Default)_ Dir Path for log | -| `CF_TEST_DOMAIN_NAME` | 'contoso.com' | Test domain name | -| `CF_TEST_GMSA_ACCOUNT` | 'webapp01' | Test gMSA account name | - -#### Runtime environment variables - -| Environment Variable | Examples values | Description | -| :------------------- | ----------------------------------------------------- | :------------------------------------------------------------------------- | -| `CF_CRED_SPEC_FILE` | '/var/credentials-fetcher/my-credspec.json' | Path to a credential spec file used as input. (Lease id default: credspec) | -| | '/var/credentials-fetcher/my-credspec.json:myLeaseId' | An optional lease id specified after a colon | -| `CF_GMSA_OU` | 'CN=Managed Service Accounts' | Component of GMSA distinguished name (see docs/cf_gmsa_ou.md) | - - ### Examples #### Testing with Active Directory domain-joined mode (opensource) diff --git a/api/src/gmsa_service.cpp b/api/src/gmsa_service.cpp index 76a6f903..dd5aee51 100644 --- a/api/src/gmsa_service.cpp +++ b/api/src/gmsa_service.cpp @@ -603,10 +603,9 @@ class CredentialsFetcherImpl final std::pair status; if ( username.empty() || password.empty() ) { - cf_logger.logger( LOG_ERR, - "Invalid credentials for " - "domainless user ", - username.c_str() ); + std::string log_message = + "Invalid credentials for domainless user " + username; + cf_logger.logger( LOG_ERR, log_message.c_str() ); err_msg = "ERROR: Invalid credentials for domainless user"; std::cerr << Util::getCurrentTime() << '\t' << err_msg << std::endl; break; @@ -617,7 +616,10 @@ class CredentialsFetcherImpl final { err_msg = "ERROR :" + std::to_string( status.first ) + ": Cannot retrieve domainless user kerberos tickets"; - cf_logger.logger( LOG_ERR, err_msg.c_str(), status ); + std::string log_message = err_msg + + " Status: " + std::to_string( status.first ) + + " " + status.second; + cf_logger.logger( LOG_ERR, log_message.c_str() ); std::cerr << Util::getCurrentTime() << '\t' << err_msg << std::endl; break; } @@ -644,13 +646,14 @@ class CredentialsFetcherImpl final ": Cannot get gMSA krb ticket"; std::cerr << Util::getCurrentTime() << '\t' << err_msg.c_str() << std::endl; - cf_logger.logger( LOG_ERR, err_msg.c_str(), status.first ); + cf_logger.logger( LOG_ERR, err_msg.c_str() ); break; } else { - cf_logger.logger( LOG_INFO, "gMSA ticket is at %s", - gmsa_ticket_result.second.c_str() ); + std::string log_message = + "gMSA ticket is at " + gmsa_ticket_result.second; + cf_logger.logger( LOG_INFO, log_message.c_str() ); std::cerr << Util::getCurrentTime() << '\t' << "INFO: gMSA ticket is " "created" @@ -1140,6 +1143,7 @@ class CredentialsFetcherImpl final std::unordered_set krb_ticket_dirs; std::string err_msg; + std::string log_message; create_krb_reply_.set_lease_id( lease_id ); for ( int i = 0; i < create_krb_request_.credspec_contents_size(); i++ ) { @@ -1197,8 +1201,9 @@ class CredentialsFetcherImpl final } if ( status.first < 0 ) { - cf_logger.logger( LOG_ERR, "Error %d: Cannot get machine krb ticket", - status ); + log_message = "Error: " + std::to_string( status.first ) + + " Cannot get machine krb ticket " + status.second; + cf_logger.logger( LOG_ERR, log_message.c_str() ); err_msg = "ERROR: cannot get machine krb ticket"; std::cerr << Util::getCurrentTime() << '\t' << err_msg << std::endl; break; @@ -1207,10 +1212,9 @@ class CredentialsFetcherImpl final std::string krb_file_path = krb_ticket->krb_file_path; if ( std::filesystem::exists( krb_file_path ) ) { - cf_logger.logger( LOG_INFO, - "Directory already exists: " - "%s", - krb_file_path.c_str() ); + log_message = "Directory already exists: " + krb_file_path; + + cf_logger.logger( LOG_INFO, log_message.c_str() ); break; } std::filesystem::create_directories( krb_file_path ); @@ -1232,14 +1236,15 @@ class CredentialsFetcherImpl final { err_msg = "ERROR: Cannot get gMSA krb ticket"; std::cerr << Util::getCurrentTime() << '\t' << err_msg << std::endl; - cf_logger.logger( LOG_ERR, "ERROR: Cannot get gMSA krb ticket", - status ); + log_message = "ERROR: Cannot get gMSA krb ticket " + + std::to_string( status.first ) + " " + status.second; + cf_logger.logger( LOG_ERR, log_message.c_str() ); break; } else { - cf_logger.logger( LOG_INFO, "gMSA ticket is at %s", - gmsa_ticket_result.second.c_str() ); + log_message = "gMSA ticket is at " + gmsa_ticket_result.second; + cf_logger.logger( LOG_INFO, log_message.c_str() ); std::cerr << Util::getCurrentTime() << '\t' << "INFO: gMSA ticket is at " << gmsa_ticket_result.second << std::endl; @@ -1422,6 +1427,7 @@ class CredentialsFetcherImpl final std::string domain = create_domainless_krb_request_.domain(); std::string err_msg; + std::string log_message; if ( isValidDomain( domain ) && !contains_invalid_characters_in_ad_account_name( username ) ) { @@ -1502,10 +1508,8 @@ class CredentialsFetcherImpl final std::pair status; if ( username.empty() || password.empty() ) { - cf_logger.logger( LOG_ERR, - "Invalid credentials for " - "domainless user ", - username.c_str() ); + log_message = "Invalid credentials for domainless user " + username; + cf_logger.logger( LOG_ERR, log_message.c_str() ); err_msg = "ERROR: Invalid credentials for domainless user"; std::cerr << Util::getCurrentTime() << '\t' << err_msg << std::endl; break; @@ -1516,7 +1520,7 @@ class CredentialsFetcherImpl final { err_msg = "ERROR: " + std::to_string( status.first ) + ": cannot retrieve domainless user kerberos tickets"; - cf_logger.logger( LOG_ERR, err_msg.c_str(), status.first ); + cf_logger.logger( LOG_ERR, err_msg.c_str() ); std::cerr << Util::getCurrentTime() << '\t' << err_msg << std::endl; break; } @@ -1524,10 +1528,8 @@ class CredentialsFetcherImpl final std::string krb_file_path = krb_ticket->krb_file_path; if ( std::filesystem::exists( krb_file_path ) ) { - cf_logger.logger( LOG_INFO, - "Directory already exists: " - "%s", - krb_file_path.c_str() ); + log_message = "Directory already exists: " + krb_file_path; + cf_logger.logger( LOG_INFO, log_message.c_str() ); break; } std::filesystem::create_directories( krb_file_path ); @@ -1564,13 +1566,13 @@ class CredentialsFetcherImpl final err_msg = "ERROR: Cannot get gMSA krb ticket " + gmsa_ticket_result.second; std::cerr << Util::getCurrentTime() << '\t' << err_msg << std::endl; - cf_logger.logger( LOG_ERR, err_msg.c_str(), gmsa_ticket_result.second ); + cf_logger.logger( LOG_ERR, err_msg.c_str() ); break; } else { - cf_logger.logger( LOG_INFO, "gMSA ticket is at %s", - gmsa_ticket_result.second.c_str() ); + log_message = "gMSA ticket is at " + gmsa_ticket_result.second; + cf_logger.logger( LOG_INFO, log_message.c_str() ); std::cerr << Util::getCurrentTime() << '\t' << "INFO: gMSA ticket is created" << std::endl; } @@ -2383,15 +2385,16 @@ int ProcessCredSpecFile( std::string krb_files_dir, std::string credspec_filepat { std::string err_msg; std::string credspec_contents; + std::string log_message = "Generating lease id " + cred_file_lease_id; - cf_logger.logger( LOG_INFO, "Generating lease id %s", cred_file_lease_id.c_str() ); + cf_logger.logger( LOG_INFO, log_message.c_str() ); if ( !std::filesystem::exists( credspec_filepath ) ) { std::cerr << Util::getCurrentTime() << '\t' << "The credential spec file " << credspec_filepath << " was not found!" << std::endl; - cf_logger.logger( LOG_ERR, "The credential spec file %s was not found!", - credspec_filepath.c_str() ); + log_message = "The credential spec file " + credspec_filepath + " was not found!"; + cf_logger.logger( LOG_ERR, log_message.c_str() ); return EXIT_FAILURE; } @@ -2405,8 +2408,8 @@ int ProcessCredSpecFile( std::string krb_files_dir, std::string credspec_filepat } else { - cf_logger.logger( LOG_ERR, "Unable to open credential spec file: %s", - credspec_filepath.c_str() ); + log_message = "Unable to open credential spec file: " + credspec_filepath; + cf_logger.logger( LOG_ERR, log_message.c_str() ); std::cerr << Util::getCurrentTime() << '\t' << "Unable to open credential spec file: " << credspec_filepath << std::endl; @@ -2438,7 +2441,9 @@ int ProcessCredSpecFile( std::string krb_files_dir, std::string credspec_filepat status = generate_krb_ticket_from_machine_keytab( krb_ticket_info->domain_name, cf_logger ); if ( status.first < 0 ) { - cf_logger.logger( LOG_ERR, "Error %d: Cannot get machine krb ticket", status ); + log_message = "Error: " + std::to_string( status.first ) + " " + status.second + + " Cannot get machine krb ticket"; + cf_logger.logger( LOG_ERR, log_message.c_str() ); delete krb_ticket_info; return EXIT_FAILURE; @@ -2447,8 +2452,8 @@ int ProcessCredSpecFile( std::string krb_files_dir, std::string credspec_filepat std::string krb_file_path = krb_ticket_info->krb_file_path; if ( std::filesystem::exists( krb_file_path ) ) { - cf_logger.logger( LOG_INFO, "Deleting existing credential file directory %s", - +krb_file_path.c_str() ); + log_message = "Deleting existing credential file directory " + krb_file_path; + cf_logger.logger( LOG_INFO, log_message.c_str() ); std::filesystem::remove_all( krb_file_path ); } @@ -2470,13 +2475,15 @@ int ProcessCredSpecFile( std::string krb_files_dir, std::string credspec_filepat { err_msg = "ERROR: Cannot get gMSA krb ticket"; std::cerr << Util::getCurrentTime() << '\t' << err_msg << std::endl; - cf_logger.logger( LOG_ERR, "ERROR: Cannot get gMSA krb ticket", status ); + log_message = "ERROR: Cannot get gMSA krb ticket " + std::to_string( status.first ) + + " " + status.second; + cf_logger.logger( LOG_ERR, log_message.c_str() ); } else { chmod( krb_ccname_str.c_str(), S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH ); - - cf_logger.logger( LOG_INFO, "gMSA ticket is at %s", gmsa_ticket_result.second.c_str() ); + log_message = "gMSA ticket is at " + gmsa_ticket_result.second; + cf_logger.logger( LOG_INFO, log_message.c_str() ); std::cerr << Util::getCurrentTime() << '\t' << "INFO: gMSA ticket is created" << std::endl; } @@ -2491,7 +2498,7 @@ int ProcessCredSpecFile( std::string krb_files_dir, std::string credspec_filepat std::filesystem::remove_all( krb_ticket_info->krb_file_path ); std::cerr << err_msg << std::endl; - cf_logger.logger( LOG_ERR, "%s", err_msg.c_str() ); + cf_logger.logger( LOG_ERR, err_msg.c_str() ); delete krb_ticket_info; return EXIT_FAILURE; @@ -2688,27 +2695,13 @@ std::string retrieve_credspec_from_s3( std::string s3_arn, std::string region, std::cerr << objectName; return dummy_credspec; } - - // regex for callerId - std::regex callerIdRegex( "^\\d{12}$" ); - std::string callerId = get_caller_id( region, creds ); - if ( callerId.empty() && !std::regex_match( callerId, callerIdRegex ) ) - { - std::cerr << Util::getCurrentTime() << '\t' - << "ERROR: Unable to get caller information" << std::endl; - return std::string( "" ); - } - - Aws::S3::S3Client s3Client( - credentials, - Aws::MakeShared( Aws::S3::S3Client::ALLOCATION_TAG ), - clientConfig ); + Aws::S3::S3Client s3Client (credentials,Aws::MakeShared + (Aws::S3::S3Client::ALLOCATION_TAG), clientConfig); Aws::S3::Model::GetObjectRequest request; - request.SetExpectedBucketOwner( callerId ); - request.SetBucket( s3Bucket ); - request.SetKey( objectName ); - Aws::S3::Model::GetObjectOutcome outcome = s3Client.GetObject( request ); - + request.SetBucket(s3Bucket); + request.SetKey(objectName); + Aws::S3::Model::GetObjectOutcome outcome = + s3Client.GetObject(request); if ( !outcome.IsSuccess() ) { const Aws::S3::S3Error& err = outcome.GetError(); @@ -2817,5 +2810,4 @@ retrieve_credspec_from_secrets_manager( std::string sm_arn, std::string region, } return { "", "", "", "" }; } - #endif diff --git a/api/tests/CMakeLists.txt b/api/tests/CMakeLists.txt index d406a19a..d7af8c89 100644 --- a/api/tests/CMakeLists.txt +++ b/api/tests/CMakeLists.txt @@ -20,10 +20,20 @@ add_executable(gmsa_test_client "gmsa_test_client.cpp") target_link_libraries(gmsa_test_client cf_gmsa_service_private ${_PROTOBUF_LIBPROTOBUF}) +add_executable(gmsa_api_integration_test "gmsa_api_integration_test.cpp") +target_link_libraries(gmsa_api_integration_test + cf_gmsa_service_private + ${_PROTOBUF_LIBPROTOBUF} + jsoncpp + gtest + gtest_main) + cmake_policy(SET CMP0083 NEW) include(CheckPIESupported) check_pie_supported() if (CMAKE_C_LINK_PIE_SUPPORTED) set_property(TARGET gmsa_test_client PROPERTY POSITION_INDEPENDENT_CODE TRUE) + set_property(TARGET gmsa_api_integration_test + PROPERTY POSITION_INDEPENDENT_CODE TRUE) endif () diff --git a/api/tests/gmsa_api_integration_test.cpp b/api/tests/gmsa_api_integration_test.cpp new file mode 100644 index 00000000..574a3f7f --- /dev/null +++ b/api/tests/gmsa_api_integration_test.cpp @@ -0,0 +1,318 @@ +#include "daemon.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define unix_socket_address "unix:/var/credentials-fetcher/socket/credentials_fetcher.sock" + +#define CF_TEST_STANDARD_USERNAME "CF_TEST_STANDARD_USERNAME" +#define CF_TEST_STANDARD_USER_PASSWORD "CF_TEST_STANDARD_USER_PASSWORD" +#define CF_TEST_DOMAIN "CF_TEST_DOMAIN" +#define CF_TEST_CREDSPEC_ARN "CF_TEST_CREDSPEC_ARN" + +#define AWS_ACCESS_KEY_ID "AWS_ACCESS_KEY_ID" +#define AWS_SECRET_ACCESS_KEY "AWS_SECRET_ACCESS_KEY" +#define AWS_SESSION_TOKEN "AWS_SESSION_TOKEN" +#define AWS_REGION "AWS_REGION" + +static std::string get_environment_var( const char* varname ) +{ + const char* value = std::getenv( varname ); + if ( !value ) + { + throw std::runtime_error( std::string( "Environment variable not set: " ) + varname ); + } + return std::string( value ); +} + +class GmsaIntegrationTest : public ::testing::Test +{ + public: + static std::string cred_spec_contents; + + protected: + std::unique_ptr _stub; + static std::string + arn_lease_id_; // Static member to share between AddKerberosArnLeaseMethod_Test and + // RenewKerberosArnLeaseMethod_Test + static std::string + non_domain_joined_lease_id_; // Static member to share between + // AddNonDomainJoinedKerberosLeaseMethod_Test and + // RenewNonDomainJoinedKerberosLeaseMethod_Test + + void SetUp() override + { + auto channel = + grpc::CreateChannel( unix_socket_address, grpc::InsecureChannelCredentials() ); + _stub = credentialsfetcher::CredentialsFetcherService::NewStub( channel ); + } +}; + +std::string GmsaIntegrationTest::arn_lease_id_; +std::string GmsaIntegrationTest::non_domain_joined_lease_id_; +std::string GmsaIntegrationTest::cred_spec_contents; + +TEST_F( GmsaIntegrationTest, HealthCheck_Test ) +{ + // Prepare request + credentialsfetcher::HealthCheckRequest request; + request.set_service( "cfservice" ); + + // Call the API + credentialsfetcher::HealthCheckResponse response; + grpc::ClientContext context; + grpc::Status status = _stub->HealthCheck( &context, request, &response ); + + // Verify response + ASSERT_TRUE( status.ok() ) << status.error_message(); + ASSERT_EQ( response.status(), "OK" ) << "Health check should return OK"; +} + +TEST_F( GmsaIntegrationTest, A_AddNonDomainJoinedKerberosLeaseMethod_Test ) +{ + // Prepare request + credentialsfetcher::CreateNonDomainJoinedKerberosLeaseRequest request; + + // Set test credentials + request.set_username( get_environment_var( CF_TEST_STANDARD_USERNAME ) ); + request.set_password( get_environment_var( CF_TEST_STANDARD_USER_PASSWORD ) ); + request.set_domain( get_environment_var( CF_TEST_DOMAIN ) ); + + // Add test credspec content + request.add_credspec_contents( cred_spec_contents ); + + credentialsfetcher::CreateNonDomainJoinedKerberosLeaseResponse response; + grpc::ClientContext context; + + // Call the API + grpc::Status status = _stub->AddNonDomainJoinedKerberosLease( &context, request, &response ); + non_domain_joined_lease_id_ = response.lease_id(); + + // Verify response + ASSERT_TRUE( status.ok() ) << status.error_message(); + ASSERT_FALSE( response.lease_id().empty() ) << "Lease ID should not be empty"; + ASSERT_GT( response.created_kerberos_file_paths_size(), 0 ) + << "Should have created at least one kerberos file"; + + // Verify file paths exist + for ( int i = 0; i < response.created_kerberos_file_paths_size(); i++ ) + { + const std::string& file_path = response.created_kerberos_file_paths( i ) + "/krb5cc"; + ASSERT_TRUE( std::filesystem::exists( file_path ) ) + << "Kerberos file " << file_path << " should exist"; + } +} + +TEST_F( GmsaIntegrationTest, B_RenewNonDomainJoinedKerberosLeaseMethod_Test ) +{ + if ( non_domain_joined_lease_id_.empty() ) + { + GTEST_SKIP() << "Skipping test because AddNonDomainJoinedKerberosLease_Test failed"; + } + + // Prepare request + credentialsfetcher::RenewNonDomainJoinedKerberosLeaseRequest request; + + // Set test credentials + request.set_username( get_environment_var( CF_TEST_STANDARD_USERNAME ) ); + request.set_password( get_environment_var( CF_TEST_STANDARD_USER_PASSWORD ) ); + request.set_domain( get_environment_var( CF_TEST_DOMAIN ) ); + + credentialsfetcher::RenewNonDomainJoinedKerberosLeaseResponse response; + grpc::ClientContext context; + + // Call the API + grpc::Status status = _stub->RenewNonDomainJoinedKerberosLease( &context, request, &response ); + + // Verify response + ASSERT_TRUE( status.ok() ) << status.error_message(); +} + +TEST_F( GmsaIntegrationTest, C_DeleteKerberosLeaseMethod_Test ) +{ + if ( non_domain_joined_lease_id_.empty() ) + { + GTEST_SKIP() << "Skipping test because AddNonDomainJoinedKerberosLease_Test failed"; + } + + // Prepare request + credentialsfetcher::DeleteKerberosLeaseRequest request; + + // Set test credentials + request.set_lease_id( non_domain_joined_lease_id_ ); + + credentialsfetcher::DeleteKerberosLeaseResponse response; + grpc::ClientContext context; + + // Call the API + grpc::Status status = _stub->DeleteKerberosLease( &context, request, &response ); + + // Verify response + ASSERT_TRUE( status.ok() ) << status.error_message(); + ASSERT_FALSE( response.lease_id().empty() ) << "Lease ID should not be empty"; + ASSERT_GT( response.deleted_kerberos_file_paths_size(), 0 ) + << "Should have deleted at least one kerberos file"; + + // Verify file paths doesn't exist + for ( int i = 0; i < response.deleted_kerberos_file_paths_size(); i++ ) + { + const std::string& file_path = response.deleted_kerberos_file_paths( i ); + ASSERT_TRUE( !std::filesystem::exists( file_path ) ) + << "Kerberos file " << file_path << " shouldn't exist"; + } +} + +TEST_F( GmsaIntegrationTest, A_AddKerberosArnLeaseMethod_Test ) +{ + // Prepare request + credentialsfetcher::KerberosArnLeaseRequest request; + + std::string arn = get_environment_var( CF_TEST_CREDSPEC_ARN ); + arn += "#123/WebApp01"; + request.add_credspec_arns( arn ); + request.set_access_key_id( get_environment_var( AWS_ACCESS_KEY_ID ) ); + request.set_secret_access_key( get_environment_var( AWS_SECRET_ACCESS_KEY ) ); + request.set_session_token( get_environment_var( AWS_SESSION_TOKEN ) ); + request.set_region( get_environment_var( AWS_REGION ) ); + + credentialsfetcher::CreateKerberosArnLeaseResponse response; + grpc::ClientContext context; + + // Call the API + grpc::Status status = _stub->AddKerberosArnLease( &context, request, &response ); + + arn_lease_id_ = response.lease_id(); + + // Verify response + ASSERT_TRUE( status.ok() ) << status.error_message(); + ASSERT_FALSE( response.lease_id().empty() ) << "Lease ID should not be empty"; + ASSERT_GT( response.krb_ticket_response_map_size(), 0 ) + << "Should have created at least one kerberos file"; + + // Verify file paths exist + for ( int i = 0; i < response.krb_ticket_response_map_size(); i++ ) + { + const std::string& file_path = + response.krb_ticket_response_map( i ).created_kerberos_file_paths() + "/krb5cc"; + ASSERT_TRUE( std::filesystem::exists( file_path ) ) + << "Kerberos file " << file_path << " should exist"; + } +} + +TEST_F( GmsaIntegrationTest, B_RenewKerberosArnLeaseMethod_Test ) +{ + if ( arn_lease_id_.empty() ) + { + GTEST_SKIP() << "Skipping test because AddKerberosArnLeaseMethod_Test failed"; + } + + // Prepare request + credentialsfetcher::RenewKerberosArnLeaseRequest request; + + request.set_access_key_id( get_environment_var( AWS_ACCESS_KEY_ID ) ); + request.set_secret_access_key( get_environment_var( AWS_SECRET_ACCESS_KEY ) ); + request.set_session_token( get_environment_var( AWS_SESSION_TOKEN ) ); + request.set_region( get_environment_var( AWS_REGION ) ); + + credentialsfetcher::RenewKerberosArnLeaseResponse response; + grpc::ClientContext context; + + // Call the API + grpc::Status status = _stub->RenewKerberosArnLease( &context, request, &response ); + + // Verify response + ASSERT_TRUE( status.ok() ) << status.error_message(); +} + +struct S3Location +{ + Aws::String bucket; + Aws::String key; +}; + +S3Location parseS3Arn( const Aws::String& arnString ) +{ + // Split ARN into components + Aws::Vector arnParts = Aws::Utils::StringUtils::Split( arnString, ':' ); + + // Get the bucket and key part (last component) + Aws::String resourcePart = arnParts[3]; + + // Split resource into bucket and key if there's a '/' + size_t delimiterPos = resourcePart.find( '/' ); + return S3Location{ resourcePart.substr( 0, delimiterPos ), + resourcePart.substr( delimiterPos + 1 ) }; +} + +void get_cred_spec_contents() +{ + auto location = parseS3Arn( get_environment_var( CF_TEST_CREDSPEC_ARN ) ); + Aws::String bucket = location.bucket; + Aws::String key = location.key; + + Aws::SDKOptions options; + Aws::InitAPI( options ); + { + // Create S3 client + Aws::Client::ClientConfiguration clientConfig; + clientConfig.region = get_environment_var( AWS_REGION ); + Aws::S3::S3Client s3_client( clientConfig ); + + // Configure S3 request + Aws::S3::Model::GetObjectRequest request; + request.SetBucket( bucket ); + request.SetKey( key ); + + // Get the object + auto outcome = s3_client.GetObject( request ); + + if ( outcome.IsSuccess() ) + { + // Read the JSON data + std::stringstream json_data; + json_data << outcome.GetResult().GetBody().rdbuf(); + json_data.seekg( 0 ); + // Parse JSON + Json::Value root; + Json::CharReaderBuilder reader; + Json::StreamWriterBuilder builder; + builder["indentation"] = ""; // for single line output + std::string errors; + if ( !Json::parseFromStream( reader, json_data, &root, &errors ) ) + { + throw std::runtime_error( "Failed to parse JSON: " + errors ); + } + if ( root.empty() ) + { + throw std::runtime_error( "Parsed JSON is empty" ); + } + // Assign values to test constants + std::string jsonString = Json::writeString( builder, root ); + if ( jsonString.empty() ) + { + throw std::runtime_error( "Failed to serialize JSON to string" ); + } + GmsaIntegrationTest::cred_spec_contents = jsonString; + } + else + { + throw std::runtime_error( "Error accessing S3: " + outcome.GetError().GetMessage() ); + } + } +} + +int main( int argc, char** argv ) +{ + get_cred_spec_contents(); + + testing::InitGoogleTest( &argc, argv ); + return RUN_ALL_TESTS(); +} diff --git a/auth/kerberos/src/krb.cpp b/auth/kerberos/src/krb.cpp index 076a165d..17bae4f1 100644 --- a/auth/kerberos/src/krb.cpp +++ b/auth/kerberos/src/krb.cpp @@ -12,7 +12,7 @@ const std::vector invalid_characters = { '&', '|', ';', ':', '$', '*', ' '>', '!', ' ', '\\', '.', ']', '[', '+', '\'', '`', '~', '}', '{', '"', ')', '(' }; -const std::string install_path_for_decode_exe = "/usr/sbin/credentials_fetcher_utf16_private.exe"; +const std::string install_path_for_decode_exe = "/usr/sbin/credentials_fetcher_utf16_private"; const std::string install_path_for_aws_cli = "/usr/bin/aws"; @@ -121,7 +121,9 @@ std::pair fetch_gmsa_password_and_create_krb_ticket( if ( domain_name.empty() || gmsa_account_name.empty() ) { - cf_logger.logger( LOG_ERR, "ERROR: %s:%d null args", __func__, __LINE__ ); + std::string log_message = + "ERROR: " + std::string( __func__ ) + ": " + std::to_string( __LINE__ ) + " null args"; + cf_logger.logger( LOG_ERR, log_message.c_str() ); std::string err_msg = std::string( "domain_name " + domain_name + " or gmsa_account_name " + gmsa_account_name + " is empty" ); return std::make_pair( -1, err_msg ); @@ -152,7 +154,7 @@ std::pair fetch_gmsa_password_and_create_krb_ticket( { distinguished_name = distinguished_name_result.second; } - std::string log_str = "Found dn = " + distinguished_name + "\n"; + std::string log_str = "Found dn = " + distinguished_name; cf_logger.logger( LOG_INFO, log_str.c_str() ); } @@ -169,7 +171,7 @@ std::pair fetch_gmsa_password_and_create_krb_ticket( { std::string log_str = ldap_search_result.second.substr( 0, pos ); log_str = "ldapsearch successful with FQDN = " + fqdn + ", cmd = " + log_str + "," + - "search_string = " + search_string + "\n"; + "search_string = " + search_string; std::cerr << log_str << std::endl; cf_logger.logger( LOG_INFO, log_str.c_str() ); } @@ -211,7 +213,7 @@ std::pair fetch_gmsa_password_and_create_krb_ticket( std::string default_principal = "'" + gmsa_account_name + "$'" + "@" + domain_name; /* Pipe password to the utf16 decoder and kinit */ - std::string kinit_cmd = std::string( "dotnet " ) + std::string( install_path_for_decode_exe ) + + std::string kinit_cmd = std::string( install_path_for_decode_exe ) + std::string( " | kinit " ) + std::string( " -c " ) + krb_cc_name + " -V " + default_principal; std::cerr << Util::getCurrentTime() << '\t' << "INFO:" << kinit_cmd << std::endl; @@ -221,7 +223,9 @@ std::pair fetch_gmsa_password_and_create_krb_ticket( perror( "kinit failed" ); OPENSSL_cleanse( password_found_result.second, password_found_result.first ); OPENSSL_free( password_found_result.second ); - cf_logger.logger( LOG_ERR, "ERROR: %s:%d kinit failed", __func__, __LINE__ ); + std::string log_message = "ERROR: " + std::string( __func__ ) + " : " + + std::to_string( __LINE__ ) + " kinit failed"; + cf_logger.logger( LOG_ERR, log_message.c_str() ); std::cerr << Util::getCurrentTime() << '\t' << "ERROR: kinit failed" << std::endl; return std::make_pair( -1, std::string( "kinit failed" ) ); } @@ -497,7 +501,7 @@ std::string renew_gmsa_ticket( krb_ticket_info_t* krb_ticket, std::string domain std::string renewed_krb_ticket_path; std::pair gmsa_ticket_result; std::string krb_cc_name = krb_ticket->krb_file_path; - + std::string log_message; // gMSA kerberos ticket generation needs to have ldap over kerberos // if the ticket exists for the machine/user already reuse it for getting gMSA password else // retry the ticket creation again after generating user/machine kerberos ticket @@ -510,16 +514,16 @@ std::string renew_gmsa_ticket( krb_ticket_info_t* krb_ticket, std::string domain { if ( i == 0 ) { - cf_logger.logger( LOG_WARNING, - "WARNING: Cannot get gMSA krb ticket " - "because of expired user/machine ticket, " - "will be retried automatically, service_account_name = %s", - krb_ticket->service_account_name.c_str() ); + log_message = "WARNING: Cannot get gMSA krb ticket because of expired user/machine " + "ticket, will be retried automatically, service_account_name = " + + krb_ticket->service_account_name; + cf_logger.logger( LOG_WARNING, log_message.c_str() ); } else { - cf_logger.logger( LOG_ERR, "ERROR: Cannot get gMSA krb ticket using account %s", - krb_ticket->service_account_name.c_str() ); + log_message = "ERROR: Cannot get gMSA krb ticket using account " + + krb_ticket->service_account_name; + cf_logger.logger( LOG_ERR, log_message.c_str() ); std::cerr << Util::getCurrentTime() << '\t' << "ERROR: Cannot get gMSA krb ticket using account" << std::endl; @@ -534,7 +538,9 @@ std::string renew_gmsa_ticket( krb_ticket_info_t* krb_ticket, std::string domain if ( status.first < 0 ) { - cf_logger.logger( LOG_ERR, "ERROR %d: Cannot get user krb ticket", status ); + log_message = + "ERROR " + std::to_string( status.first ) + ": Cannot get user krb ticket"; + cf_logger.logger( LOG_ERR, log_message.c_str() ); std::cerr << Util::getCurrentTime() << '\t' << "ERROR: Cannot get user krb ticket" << std::endl; } diff --git a/auth/kerberos/src/utf16_decode/Program.runtimeconfig.json b/auth/kerberos/src/utf16_decode/Program.runtimeconfig.json deleted file mode 100644 index 355167e4..00000000 --- a/auth/kerberos/src/utf16_decode/Program.runtimeconfig.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "runtimeOptions": { - "framework": { - "name": "Microsoft.NETCore.App", - "version": "6.0.0" - } - } -} diff --git a/auth/kerberos/src/utf16_decode/build-using-csc.sh b/auth/kerberos/src/utf16_decode/build-using-csc.sh deleted file mode 100755 index eaa71acd..00000000 --- a/auth/kerberos/src/utf16_decode/build-using-csc.sh +++ /dev/null @@ -1,63 +0,0 @@ -#!/bin/sh - -# Thanks to https://github.com/dotnet/sdk/issues/8742#issuecomment-890559867 - -DOTNET_CLI_TELEMETRY_OPTOUT=1 -export DOTNET_CLI_TELEMETRY_OPTOUT - -sdkver=$(LC_ALL=C dotnet --version) -fwkver=$(LC_ALL=C dotnet --list-runtimes | \ - LC_ALL=C sed --posix -n '/^Microsoft.NETCore.App \([^ ]*\) .*$/{s//\1/p;q;}') - -dotnethome=/usr/lib/dotnet -if [ -d /usr/lib64/dotnet ]; then - dotnethome=/usr/lib64/dotnet -fi -echo "dotnethome=$dotnethome" - -dotnetlib=$dotnethome/shared/Microsoft.NETCore.App/$fwkver -if [ -d /usr/share/dotnet/packs/Microsoft.NETCore.App.Ref/$fwkver/ref/net8.0/ ]; then - dotnetlib=/usr/share/dotnet/packs/Microsoft.NETCore.App.Ref/$fwkver/ref/net8.0/ -elif [ -d /usr/share/dotnet/packs/Microsoft.NETCore.App.Ref/$fwkver/ref/net6.0/ ]; then - dotnetlib=/usr/share/dotnet/packs/Microsoft.NETCore.App.Ref/$fwkver/ref/net6.0/ -fi -echo "dotnetlib=$dotnetlib" - -dotnet_cscdll=$dotnethome/sdk/$sdkver/Roslyn/bincore/csc.dll -if [ -f /usr/share/dotnet/sdk/$sdkver/Roslyn/bincore/csc.dll ]; then - dotnet_cscdll=/usr/share/dotnet/sdk/$sdkver/Roslyn/bincore/csc.dll -fi -echo "dotnet_cscdll=$dotnet_cscdll" - -dotnet_csclib='-r:netstandard.dll -r:Microsoft.CSharp.dll -r:System.dll' -for x in "$dotnetlib"/System.*.dll; do - dotnet_csclib="$dotnet_csclib -r:${x##*/}" -done -echo "dotnet_csclib=$dotnet_csclib" -# add if needed -#dotnet_csclib="$dotnet_csclib -r:Microsoft.Win32.Primitives.dll" - -exec dotnet "$dotnet_cscdll" "-lib:$dotnetlib" $dotnet_csclib "$@" - -#!/bin/sh - -DOTNET_CLI_TELEMETRY_OPTOUT=1 -export DOTNET_CLI_TELEMETRY_OPTOUT - -sdkver=$(LC_ALL=C dotnet --version) -fwkver=$(LC_ALL=C dotnet --list-runtimes | \ - LC_ALL=C sed --posix -n '/^Microsoft.NETCore.App \([^ ]*\) .*$/{s//\1/p;q;}') - -exename=$1 -case $exename in -(*.exe|*.EXE) ;; -(*) - echo >&2 "E: $exename is not a .exe file" - exit 1 - ;; -esac - -jsonname=${exename%.*}.runtimeconfig.json -printf '%s"%s"%s\n' \ - '{"runtimeOptions":{"framework":{"name":"Microsoft.NETCore.App","version":' \ - "$fwkver" '}}}' >"$jsonname" diff --git a/auth/kerberos/src/utf16_decode/build-using-native-aot.sh b/auth/kerberos/src/utf16_decode/build-using-native-aot.sh new file mode 100755 index 00000000..17cf90f2 --- /dev/null +++ b/auth/kerberos/src/utf16_decode/build-using-native-aot.sh @@ -0,0 +1,19 @@ +#!/bin/sh + +# Ensure .NET CLI doesn't send telemetry data +DOTNET_CLI_TELEMETRY_OPTOUT=1 +export DOTNET_CLI_TELEMETRY_OPTOUT + +# Get the .NET SDK version +sdkver=$(dotnet --version) + +project_file="utf16_decode.csproj" + +dotnet publish "$project_file" \ + -c Release \ + -r linux-x64 \ + --self-contained true \ + -p:PublishAot=true \ + -p:InvariantGlobalization=true + +echo "NativeAOT compilation complete. Check the publish directory for the output." \ No newline at end of file diff --git a/auth/kerberos/src/utf16_decode/utf16_decode.csproj b/auth/kerberos/src/utf16_decode/utf16_decode.csproj index 53c2ea92..f301226f 100644 --- a/auth/kerberos/src/utf16_decode/utf16_decode.csproj +++ b/auth/kerberos/src/utf16_decode/utf16_decode.csproj @@ -2,10 +2,10 @@ Exe - net6.0 + net8.0 enable enable - true + true true linux-x64 true diff --git a/cdk/cdk-domainless-mode/Dockerfile b/cdk/cdk-domainless-mode/Dockerfile new file mode 100644 index 00000000..45684d8e --- /dev/null +++ b/cdk/cdk-domainless-mode/Dockerfile @@ -0,0 +1,6 @@ +FROM mcr.microsoft.com/mssql-tools +RUN cat /etc/os-release +RUN apt-get update -y +RUN apt-get install krb5-user unzip inetutils-ping dnsutils -y +ENV KRB5CCNAME=/var/credentials-fetcher/krbdir/krb5cc +CMD ["sleep", "infinity"] \ No newline at end of file diff --git a/cdk/cdk-domainless-mode/README.md b/cdk/cdk-domainless-mode/README.md index 9f0e3f7b..b54488f9 100644 --- a/cdk/cdk-domainless-mode/README.md +++ b/cdk/cdk-domainless-mode/README.md @@ -11,16 +11,19 @@ Runs a couple of tasks in the ECS-optimized Linux instance using gMSA in domainl Disclaimer This CDK and scripts are only for test, please modify as needed. +Create the following environment variables: +1. AWS_REGION +2. S3_PREFIX +3. KEY_PAIR_NAME +4. PREFIX_LIST + Pre-requisites Please take a look at data.json for default values. -1) Create secret in Secrets Manager as per https://docs.aws.amazon.com/AmazonECS/latest/developerguide/linux-gmsa.html#linux-gmsa-setup with the following values: - This is the same secret in data.json. - ``` - Secret key Secret value - username standarduser01 - password p@ssw0rd - domainName activedirectory1.com - ``` +If you're testing a new RPM, upload it in the S3 bucket. +Ensure you have docker running in the background. + +1. Update data.json, and make sure there are no values with "xxxxxxxx" + 2) 'default' AWS profile with administrator access is needed, a separate/burner AWS account would suffice. Steps to run tasks in ECS with Credentials-fetcher. @@ -47,6 +50,7 @@ Steps to run tasks in ECS with Credentials-fetcher. Once the virtualenv is activated, you can install the required dependencies. ``` + $ cd cdk/cdk-domainless-mode $ pip install -r requirements.txt ``` @@ -66,16 +70,24 @@ Steps to run tasks in ECS with Credentials-fetcher. [10:29:46] Command line arguments: { _: [ 'bootstrap' ], ``` - -7) Run copy_credspecs_and_create_task_defs.py to create and copy credspecs to S3 bucket and also to register ECS task definitions. + +6) Run End-To-End SQL test with Credentials Fetcher ECS Domainless Setup + ```aiignore + (.venv) tests % python3 run_e2e_test.py + ``` +7) Done: If everything worked as expected, you should see an output like this in the console: ``` - (.venv) cdk % python3 copy_credspecs_and_create_task_defs.py + EmpID EmpName Designation DepartmentJoiningDate + ----------- -------------------------------------------------- -------------------------------------------------- ------------------------------------------------------------------------- + 1 CHIN YEN LAB ASSISTANT LAB2022-03-05 03:57:09.967 + 2 MIKE PEARL SENIOR ACCOUNTANT ACCOUNTS2022-03-05 03:57:09.967 + 3 GREEN FIELD ACCOUNTANT ACCOUNTS2022-03-05 03:57:09.967 + 4 DEWANE PAUL PROGRAMMER IT2022-03-05 03:57:09.967 + 5 MATTS SR. PROGRAMMER IT2022-03-05 03:57:09.967 + 6 PLANK OTO ACCOUNTANT ACCOUNTS2022-03-05 03:57:09.967 + + (6 rows affected) ``` -8) After CloudFormation stack is complete, launch tasks using run_tasks.py. (You can install a test RPM into the ECS intance here, if you like) - ``` - (.venv) samiull@6cb1339dd38d cdk % python3 run_tasks.py - ``` -9) Done: You can see the tasks in EC2 Console diff --git a/cdk/cdk-domainless-mode/app.py b/cdk/cdk-domainless-mode/app.py index 8ca4ba3e..8da6cafc 100644 --- a/cdk/cdk-domainless-mode/app.py +++ b/cdk/cdk-domainless-mode/app.py @@ -15,24 +15,29 @@ # Load the JSON data data = json.load(file) -#print(data) +def get_value(key): + return os.environ.get(key, data.get(key.lower())) tag = cdk.Tag("Name", "Test Credentials-fetcher in Domainless mode") -aws_region = data["aws_region"] -prefix_list = data["prefix_list"] +aws_region = get_value("AWS_REGION") +prefix_list = get_value("PREFIX_LIST") domain_admin_password = data["domain_admin_password"] directory_name = data["directory_name"] windows_instance_tag = data["windows_instance_tag"] linux_instance_tag = data["linux_instance_tag"] -key_name = data["key_pair_name"] +key_name = get_value("KEY_PAIR_NAME") number_of_gmsa_accounts = data["number_of_gmsa_accounts"] -empty_s3_bucket = data["s3_bucket"] +s3_bucket = get_value("S3_PREFIX") + data["s3_bucket_suffix"] app_name = data["stack_name"] username = data["username"] password = data["password"] secret_name = data["secret_name"] task_definition_template_name = data["task_definition_template_name"] cluster_name = data["cluster_name"] +docker_image_tag = data["docker_image_tag"] +dockerfile_path = data["dockerfile_path"] +ecr_repo_name = data["ecr_repo_name"] +rpm_file = data["rpm_file"] app = cdk.App() @@ -52,7 +57,7 @@ domain_name = directory_name, key_name = key_name, number_of_gmsa_accounts = number_of_gmsa_accounts, - s3_bucket_name = empty_s3_bucket + s3_bucket_name = s3_bucket ) windows_instance.node.add_dependency(cfn_microsoft_AD) @@ -64,9 +69,11 @@ key_pair=cdk_stack.key_pair, number_of_gmsa_accounts=number_of_gmsa_accounts, vpc = cdk_stack.vpc, - security_group=cdk_stack.security_group) + security_group=cdk_stack.security_group, rpm_file=rpm_file, s3_bucket=s3_bucket) ecs_cluster.node.add_dependency(windows_instance) task_definition = cdk_stack.create_task_definition(task_definition_template_name=task_definition_template_name) +docker_image_uri = cdk_stack.build_push_dockerfile_to_ecr(dockerfile_path, ecr_repo_name, aws_region, docker_image_tag) + app.synth() diff --git a/cdk/cdk-domainless-mode/cdk/cdk_stack.py b/cdk/cdk-domainless-mode/cdk/cdk_stack.py index ff0724bb..d6cc646c 100644 --- a/cdk/cdk-domainless-mode/cdk/cdk_stack.py +++ b/cdk/cdk-domainless-mode/cdk/cdk_stack.py @@ -19,9 +19,9 @@ from aws_cdk import aws_route53resolver as route53resolver from aws_cdk import Duration as duration import uuid -import json import boto3 -import json +import docker +import os class CdkStack(Stack): @@ -83,6 +83,13 @@ def init_vpc(self, prefix_list: str, key_pair_name: str, stack_name: str): self.security_group.add_ingress_rule (self.prefix_list, ec2.Port.all_traffic()) + + self.security_group.add_ingress_rule( + peer=self.security_group, + connection=ec2.Port.all_traffic(), + description="Allow all traffic from self" + ) + # Import existing keypair using keyname self.key_pair = ec2.KeyPair.from_key_pair_name(self, "KeyPair", key_pair_name) @@ -148,6 +155,7 @@ def init_DirectoryService(self, directory_name:str, domain_admin_password: str): enable_sso=False ) + self.cfn_microsoft_AD.node.add_dependency(self.vpc) return self.cfn_microsoft_AD @@ -158,13 +166,8 @@ def launch_windows_instance(self, instance_tag: str, password: str, number_of_gmsa_accounts: int, s3_bucket_name: str): - user_data_script = self.setup_windows_userdata(password=password, - domain_name=domain_name, - number_of_gmsa_accounts=number_of_gmsa_accounts, - s3_bucket_name=s3_bucket_name) # Add user_data_script to user_data user_data = ec2.UserData.for_windows(persist=True) - user_data.add_commands(user_data_script) user_data = cdk.Fn.base64(user_data.render()) # Create an instance role @@ -207,7 +210,6 @@ def launch_windows_instance(self, instance_tag: str, password: str, "MyCfnInstance", instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE3, ec2.InstanceSize.XLARGE).to_string(), image_id=ec2.WindowsImage(version=ec2.WindowsVersion.WINDOWS_SERVER_2022_ENGLISH_FULL_SQL_2022_ENTERPRISE).get_image(self).image_id, - user_data=user_data, security_group_ids=[self.security_group.security_group_id], subnet_id=self.subnet_1.subnet_id, tags=[cdk.CfnTag(key="Name", value=instance_tag)], @@ -245,10 +247,10 @@ def create_ecs_cluster(self, cluster_name: str, key_pair: ec2.KeyPair, number_of_gmsa_accounts: int, vpc : str, - security_group : str): + security_group : str, rpm_file:str, s3_bucket:str): machine_image = ecs.EcsOptimizedImage.amazon_linux2023(hardware_type=ecs.AmiHardwareType.STANDARD) - instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.XLARGE) + instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE3, ec2.InstanceSize.XLARGE) role = iam.Role(self, "Role", role_name="ecs-instance-role", assumed_by=iam.ServicePrincipal("ec2.amazonaws.com")) role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AmazonEC2ContainerServiceforEC2Role")) @@ -259,7 +261,7 @@ def create_ecs_cluster(self, cluster_name: str, # add role for Directory Service role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name("AWSDirectoryServiceFullAccess")) - user_data_script = self.setup_linux_userdata(instance_tag, password, domain_name, key_pair.key_pair_name, number_of_gmsa_accounts) + user_data_script = self.setup_linux_userdata(instance_tag, password, domain_name, key_pair.key_pair_name, number_of_gmsa_accounts, rpm_file, s3_bucket) user_data = ec2.UserData.for_linux() user_data.add_commands(user_data_script) #user_data = cdk.Fn.base64(user_data.render()) @@ -298,14 +300,13 @@ def create_ecs_cluster(self, cluster_name: str, def setup_linux_userdata (self, instance_tag: str, password: str, domain_name: str, key_name: str, - number_of_gmsa_accounts: int): + number_of_gmsa_accounts: int, rpm_file: str, s3_bucket: str): #In instance, 'cat /var/lib/cloud/instance/user-data.txt' # get random uuid string random_uuid_str = str(uuid.uuid4()) ecs_cluster_name="ecs-load-test-" + random_uuid_str user_data_script = ''' echo "ECS_GMSA_SUPPORTED=true" >> /etc/ecs/ecs.config - dnf install -y dotnet dnf install -y realmd dnf install -y oddjob dnf install -y oddjob-mkhomedir @@ -313,12 +314,28 @@ def setup_linux_userdata (self, instance_tag: str, password: str, dnf install -y adcli dnf install -y krb5-workstation dnf install -y samba-common-tools - dnf install -y credentials-fetcher + if aws s3 ls "s3://BUCKET_NAME/RPM_FILE" &> /dev/null; then + echo "RPM file found in S3 bucket. Transferring to EC2 instance..." >> /tmp/userdata.log + aws s3 cp s3://BUCKET_NAME/RPM_FILE . + dnf install -y ./RPM_FILE + if [ $? -ne 0 ]; then + echo "RPM file installation failed. Installing credentials-fetcher..." >> /tmp/userdata.log + dnf install -y credentials-fetcher + else + echo "RPM file installation successful." >> /tmp/userdata.log + fi + else + echo "RPM file not found in S3 bucket. Installing credentials-fetcher..." >> /tmp/userdata.log + dnf install -y credentials-fetcher + fi systemctl enable credentials-fetcher systemctl start credentials-fetcher systemctl enable --now --no-block ecs.service user_data_script += "echo ECS_CLUSTER=" + ecs_cluster_name + " >> /etc/ecs/ecs.config" ''' + user_data_script = user_data_script.replace('BUCKET_NAME', s3_bucket) + user_data_script = user_data_script.replace('RPM_FILE', rpm_file) + return user_data_script # Save json values in secrets manager @@ -380,7 +397,7 @@ def create_task_definition(self, task_definition_template_name): container_definition = task_definition.add_container( "MyContainer", image=ecs.ContainerImage.from_registry("nginx:latest"), - memory_reservation_mib=256, + memory_reservation_mib=128, start_timeout=duration.seconds(120), stop_timeout=duration.seconds(60) ) @@ -392,3 +409,40 @@ def create_task_definition(self, task_definition_template_name): task_definition.node.add_dependency(self.cfn_microsoft_AD) return task_definition + + def build_push_dockerfile_to_ecr(self, dockerfile_path, repository_name, region, tag='latest'): + ecr_client = boto3.client('ecr', region_name=region) + + # Create ECR repository if it doesn't exist + try: + ecr_client.create_repository(repositoryName=repository_name) + except ecr_client.exceptions.RepositoryAlreadyExistsException: + pass + + # Get the repository URI + response = ecr_client.describe_repositories(repositoryNames=[repository_name]) + repository_uri = response['repositories'][0]['repositoryUri'] + + # Get ECR login token + token = ecr_client.get_authorization_token() + username, password = base64.b64decode(token['authorizationData'][0]['authorizationToken']).decode().split(':') + registry = token['authorizationData'][0]['proxyEndpoint'] + + # Build Docker image + docker_client = docker.from_env() + image, build_logs = docker_client.images.build( + path=os.path.dirname(dockerfile_path), + dockerfile=os.path.basename(dockerfile_path), + tag=f"{repository_uri}:{tag}" + ) + + # Login to ECR + docker_client.login(username=username, password=password, registry=registry) + + # Push image to ECR + push_logs = docker_client.images.push(repository_uri, tag=tag) + + # Construct the full image URI + image_uri = f"{repository_uri}:{tag}" + + return image_uri \ No newline at end of file diff --git a/cdk/cdk-domainless-mode/copy_credspecs_and_create_task_defs.py b/cdk/cdk-domainless-mode/copy_credspecs_and_create_task_defs.py deleted file mode 100644 index 576ae2de..00000000 --- a/cdk/cdk-domainless-mode/copy_credspecs_and_create_task_defs.py +++ /dev/null @@ -1,171 +0,0 @@ -import boto3 -import json - -# Open the input file -with open('data.json', 'r') as file: - # Load the JSON data - data = json.load(file) - -directory_name = data["directory_name"] -netbios_name = data["netbios_name"] -number_of_gmsa_accounts = data["number_of_gmsa_accounts"] -s3_bucket = data["s3_bucket"] -task_definition_template_name = data["task_definition_template_name"] -stack_name = data["stack_name"] - -credspec_template = """ -{ - "CmsPlugins": ["ActiveDirectory"], - "DomainJoinConfig": { - "Sid": "S-1-5-21-2421564706-1737585382-3854682907", - "MachineAccountName": "GMSA_NAME", - "Guid": "6a91814c-e151-4fb0-96f0-f517566fc883", - "DnsTreeName": "DOMAINNAME", - "DnsName": "DOMAINNAME", - "NetBiosName": "NETBIOS_NAME" - }, - "ActiveDirectoryConfig": { - "GroupManagedServiceAccounts": [ - { - "Name": "GMSA_NAME", - "Scope": "DOMAINNAME" - }, - { - "Name": "GMSA_NAME", - "Scope": "NETBIOS_NAME" - } - ], - "HostAccountConfig": { - "PortableCcgVersion": "1", - "PluginGUID": "{859E1386-BDB4-49E8-85C7-3070B13920E1}", - "PluginInput": { - "CredentialArn": "GMSA_SECRET_ARN" - } - } - } -} -""" - -credspec_template = credspec_template.replace("DOMAINNAME", directory_name) -credspec_template = credspec_template.replace("NETBIOS_NAME", netbios_name) - -secrets_manager_client = boto3.client('secretsmanager') -secret_id = "aws/directoryservice/" + netbios_name + "/gmsa" -# get secrets manager arn from secret name -print("Secret id = " + secret_id) -gmsa_secret_arn = secrets_manager_client.get_secret_value(SecretId=secret_id)['ARN'] -credspec_template = credspec_template.replace("GMSA_SECRET_ARN", gmsa_secret_arn) - -for i in range(1, number_of_gmsa_accounts + 1): - credspec_template.replace("GMSA_NAME", f"GMSA{i}") - -aws_profile_name = data["aws_profile_name"] - -boto3.setup_default_session(profile_name=aws_profile_name) - -# list iam roles with a given name -list_roles = boto3.client('iam').list_roles(MaxItems=1000) -for role in list_roles['Roles']: - print(role['RoleName']) -for role in list_roles['Roles']: - role_name = role['RoleName'] - if 'CredentialsFetcher-ECSTaskExecutionRolegMSA' == role_name: - ecs_task_execution_role_arn = role['Arn'] - break - -# list ECS task definitions -ecs_client = boto3.client('ecs') - -# task_definition_prefix = 'ecs-task-definition' -# Call the list_task_definitions method with a prefix filter - -task_definition_arn = "" -task_definition = "" -response = ecs_client.list_task_definitions() -# Check if any task definitions match the prefix -if 'taskDefinitionArns' in response: - task_definitions = response['taskDefinitionArns'] - if task_definitions == []: - print("No task definitions found") - exit() - for arn in task_definitions: - if task_definition_template_name in arn: - matching_task_definitions = arn - # Get task definition details - task_definition = ecs_client.describe_task_definition(taskDefinition=arn) - task_definition_arn = arn - break -else: - print(f"No task definitions found matching '{response}'") - exit() - -# Get ecs cluster -ecs_clusters = ecs_client.list_clusters() -ecs_cluster_arn = "" -ecs_cluster_instance = "" -ecs_cluster_name = "Credentials-fetcher-ecs-load-test" -for cluster_arn in ecs_clusters['clusterArns']: - cluster_name = cluster_arn.split('/')[1] - if cluster_name == ecs_cluster_name: - ecs_cluster_arn = cluster_arn - # Get instance-id attached running ecs cluster - ecs_cluster_instance_arn = ecs_client.list_container_instances(cluster=ecs_cluster_arn)['containerInstanceArns'][0] - break - -task_definition_orig = task_definition -print(task_definition) -for i in range(1, number_of_gmsa_accounts + 1): - task_definition = task_definition_orig - credspec_template = credspec_template.replace("GMSA_NAME", f"WebApp0{i}") - credspec = json.loads(credspec_template) - credspec_str = json.dumps(credspec) - # copy credspec to S3 folder - s3_client = boto3.client('s3') - bucket_location = "" - bucket_arn = "" - s3_key = "" - try: - # put credspec_str into s3 bucket - s3_key = f"WebApp0{i}_credspec.json" - print("Putting object") - s3_client.put_object(Body=credspec_str, Bucket=s3_bucket, Key=f'WebApp0{i}_credspec.json') - bucket_location = s3_client.get_bucket_location(Bucket=s3_bucket) - bucket_arn = f"arn:aws:s3:::{s3_bucket}" - except Exception as e: - print(e) - - #print(task_definition) - task_definition = task_definition["taskDefinition"] - task_definition["compatibilities"].append("FARGATE") - - container_defs = task_definition['containerDefinitions'] - pretty_json = json.dumps(container_defs, indent=4) - print(pretty_json) - for container_def in container_defs: - credspec = container_def['credentialSpecs'] - # Remove entry with key 'credentialspecdomainless' - credspec_without_key = [] - for d in credspec: - if 'credentialspecdomainless' not in d: - credspec_without_key.append(d) - credspec = credspec_without_key - print(credspec) - credspec.append("credentialspecdomainless:" + bucket_arn + "/" + s3_key) - container_def['credentialSpecs'] = credspec - attributes = task_definition['requiresAttributes'] - attribute = {} - attribute["name"] = "ecs.capability.gmsa-domainless" - attribute["targetId"] = ecs_cluster_arn - attributes.append(attribute) - family = task_definition['family'] + "-" + str(i) - ecs_client.register_task_definition(family=family, - taskRoleArn=ecs_task_execution_role_arn, - executionRoleArn=ecs_task_execution_role_arn, - networkMode=task_definition['networkMode'], - containerDefinitions=container_defs, - requiresCompatibilities=["EC2", "FARGATE"], - runtimePlatform={'cpuArchitecture': 'X86_64', 'operatingSystemFamily' : 'LINUX'}, - cpu=task_definition['cpu'], - memory=task_definition['memory']) - #print(ecs_cluster_arn) - diff --git a/cdk/cdk-domainless-mode/data.json b/cdk/cdk-domainless-mode/data.json index 6cee0412..68af567c 100644 --- a/cdk/cdk-domainless-mode/data.json +++ b/cdk/cdk-domainless-mode/data.json @@ -1,20 +1,27 @@ { "stack_name": "Credentials-fetcher-AD-Stack", - "aws_region": "us-west-1", - "prefix_list": "pl-XXXXXXXX", + "aws_region": "us-west-2", + "prefix_list": "xxxxxxxxx", + "domain_admin_username": "admin", "domain_admin_password": "Qn:51eJsORJNL@~{HY@?", - "key_pair_name": "samiull-sfo", - "directory_name": "ActiveDirectory1.com", - "netbios_name" : "ActiveDirectory1", + "key_pair_name": "xxxxxxxxxx", + "directory_name": "contoso.com", + "netbios_name" : "contoso", "windows_instance_tag": "ActiveDirectoryManagementInstance", "linux_instance_tag": "CredentialsFetcherLinuxInstance", - "s3_bucket": "credentials-fetcher-pre-created-bucket", + "s3_prefix": "xxxxxxxx", + "s3_bucket_suffix": "-credentials-fetcher-pre-created-bucket", "number_of_gmsa_accounts": 10, "aws_profile_name": "default", "username": "StandardUser01", "password": "p@ssw0rd", - "secret_name": "aws/directoryservice/ActiveDirectory1/gmsa", + "secret_name": "aws/directoryservice/contoso/gmsa", "task_definition_template_name": "CredentialsFetcherTaskDefinitiontemplate", "cluster_name": "Credentials-fetcher-ecs-load-test", - "vpc_name": "Credentials-fetcher-AD-Stack-vpc" + "vpc_name": "Credentials-fetcher-AD-Stack-vpc", + "ecr_repo_name": "my-ecr-repo", + "docker_image_tag": "latest", + "dockerfile_path": "./Dockerfile", + "rpm_file": "credentials-fetcher-1.3.65-0.amzn2023.x86_64.rpm", + "max_tasks_per_instance": 3 } diff --git a/cdk/cdk-domainless-mode/gmsa.ps1 b/cdk/cdk-domainless-mode/gmsa.ps1 deleted file mode 100644 index c60ea867..00000000 --- a/cdk/cdk-domainless-mode/gmsa.ps1 +++ /dev/null @@ -1,114 +0,0 @@ - -# This script does the following: -# 1) Install/Update SSM agent - without this the domain-join can fail -# 2) Create a new OU -# 3) Create a new security group -# 4) Create a new standard user account, this account's username and password needs to be stored in a secret store like AWS secrets manager. -# 5) Add members to the security group that is allowed to retrieve gMSA password -# 6) Create gMSA accounts with PrincipalsAllowedToRetrievePassword set to the security group created in 4) - -# 1) Install SSM agent -Write-Output "Updating SSM agent..." -[System.Net.ServicePointManager]::SecurityProtocol = 'TLS12' -$progressPreference = 'silentlyContinue' -Invoke-WebRequest https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/windows_amd64/AmazonSSMAgentSetup.exe -OutFile $env:USERPROFILE\Desktop\SSMAgent_latest.exe -Start-Process -FilePath $env:USERPROFILE\Desktop\SSMAgent_latest.exe -ArgumentList "/S" - -# To install the AD module on Windows Server, run Install-WindowsFeature RSAT-AD-PowerShell -# To install the AD module on Windows 10 version 1809 or later, run Add-WindowsCapability -Online -Name 'Rsat.ActiveDirectory.DS-LDS.Tools~~~~0.0.1.0' -# To install the AD module on older versions of Windows 10, see https://aka.ms/rsat -Write-Output "Installing Active Directory management tools..." -Install-WindowsFeature -Name "RSAT-AD-Tools" -IncludeAllSubFeature -Install-WindowsFeature RSAT-AD-PowerShell -Install-Module CredentialSpec -Install-Module -Name SqlServer -AllowClobber -Force - -$username = "admin@DOMAINNAME" -$password = "INPUTPASSWORD" | ConvertTo-SecureString -AsPlainText -Force -$credential = New-Object System.Management.Automation.PSCredential($username, $password) -$groupAllowedToRetrievePassword = "WebAppAccounts_OU" -# This is the basedn path that needs to be in secrets manager as "distinguishedName" : "OU=MYOU,OU=Users,OU=ActiveDirectory,DC=NETBIOS_NAME,DC=com" -$path = "OU=MYOU,OU=Users,OU=ActiveDirectory,DC=NETBIOS_NAME,DC=com" - - -# 2) Create OU -New-ADOrganizationalUnit -Name "MYOU" -Path "OU=Users,OU=ActiveDirectory,DC=NETBIOS_NAME,DC=com" -Credential $credential - -# 3) Create the security group -try { - New-ADGroup -Name "WebApp Authorized Accounts in OU" -SamAccountName $groupAllowedToRetrievePassword -Credential $credential -GroupScope DomainLocal -Server DOMAINNAME -} catch { - Write-Output "Security Group created" -} - -# 4) Create a new standard user account, this account's username and password needs to be stored in a secret store like AWS secrets manager. -try { - New-ADUser -Name "StandardUser01" -AccountPassword (ConvertTo-SecureString -AsPlainText "p@ssw0rd" -Force) -Enabled 1 -Credential $credential -Path $path -Server DOMAINNAME -} catch { - Write-Output "Created StandardUser01" -} - -# 5) Add members to the security group that is allowed to retrieve gMSA password -try { - Add-ADGroupMember -Identity $groupAllowedToRetrievePassword -Members "StandardUser01" -Credential $credential -Server DOMAINNAME - Add-ADGroupMember -Identity $groupAllowedToRetrievePassword -Members "admin" -Credential $credential -Server DOMAINNAME -} catch { - Write-Output "Created AD Group $groupAllowedToRetrievePassword" -} - -# 6) Create gMSA accounts with PrincipalsAllowedToRetrievePassword set to the security group created in 4) -$string_err = "" -for (($i = 1); $i -le NUMBER_OF_GMSA_ACCOUNTS; $i++) -{ - # Create the gMSA account - $gmsa_account_name = "WebApp0" + $i - $gmsa_account_with_domain = $gmsa_account_name + ".DOMAINNAME" - $gmsa_account_with_host = "host/" + $gmsa_account_name - $gmsa_account_with_host_and_domain = $gmsa_account_with_host + ".DOMAINNAME" - - try { - #New-ADServiceAccount -Name serviceuser1 -Path "OU=MYOU1,OU=Users,OU=ActiveDirectory,DC=ActiveDirectory1,DC=com" -Credential $credential -DNSHostname "ActiveDirectory1.com" - New-ADServiceAccount -Name $gmsa_account_name -DnsHostName $gmsa_account_with_domain -ServicePrincipalNames $gmsa_account_with_host, $gmsa_account_with_host_and_domain -PrincipalsAllowedToRetrieveManagedPassword $groupAllowedToRetrievePassword -Path $path -Credential $credential -Server DOMAINNAME - Write-Output "New-ADServiceAccount -Name $gmsa_account_name -DnsHostName $gmsa_account_with_domain -ServicePrincipalNames $gmsa_account_with_host, $gmsa_account_with_host_and_domain -PrincipalsAllowedToRetrieveManagedPassword $groupAllowedToRetrievePassword -Path $path -Credential $credential -Server DOMAINNAME" - } catch { - $string_err = $_ | Out-String - Write-Output "Error while gMSA account creation and copy credspec to S3 bucket: " + $string_err - } -} - -# Set the SQL Server instance name -$sqlInstance = $env:computername - -New-NetFirewallRule -DisplayName "SQLServer default instance" -Direction Inbound -LocalPort 1433 -Protocol TCP -Action Allow -New-NetFirewallRule -DisplayName "SQLServer Browser service" -Direction Inbound -LocalPort 1434 -Protocol UDP -Action Allow -netsh advfirewall firewall add rule name = SQLPort dir = in protocol = tcp action = allow localport = 1433 remoteip = localsubnet profile = DOMAIN - -# Create a connection string -$connectionString0 = "Server=$sqlInstance;Integrated Security=True;" -$connectionString1 = "Server=$sqlInstance;Database=EmployeesDB;Integrated Security=True;" - -$createDatabaseQuery = "CREATE DATABASE EmployeesDB" - -$query = @" -CREATE TABLE dbo.EmployeesTable ( - EmpID INT IDENTITY(1,1) PRIMARY KEY, - EmpName VARCHAR(50) NOT NULL, - Designation VARCHAR(50) NOT NULL, - Department VARCHAR(50) NOT NULL, - JoiningDate DATETIME NOT NULL -); - -INSERT INTO EmployeesDB.dbo.EmployeesTable (EmpName, Designation, Department, JoiningDate) -VALUES - ('CHIN YEN', 'LAB ASSISTANT', 'LAB', '2022-03-05 03:57:09.967'), - ('MIKE PEARL', 'SENIOR ACCOUNTANT', 'ACCOUNTS', '2022-03-05 03:57:09.967'), - ('GREEN FIELD', 'ACCOUNTANT', 'ACCOUNTS', '2022-03-05 03:57:09.967'), - ('DEWANE PAUL', 'PROGRAMMER', 'IT', '2022-03-05 03:57:09.967'), - ('MATTS', 'SR. PROGRAMMER', 'IT', '2022-03-05 03:57:09.967'), - ('PLANK OTO', 'ACCOUNTANT', 'ACCOUNTS', '2022-03-05 03:57:09.967'); -alter authorization on database::[EmployeesDB] to [WebApp01$] -"@ - -Invoke-Sqlcmd -ConnectionString $connectionString0 -Query $createDatabaseQuery -QueryTimeout 60 -Invoke-Sqlcmd -ConnectionString $connectionString1 -Query $query - diff --git a/cdk/cdk-domainless-mode/requirements.txt b/cdk/cdk-domainless-mode/requirements.txt index 108152a7..2a90f1b0 100644 --- a/cdk/cdk-domainless-mode/requirements.txt +++ b/cdk/cdk-domainless-mode/requirements.txt @@ -1,3 +1,4 @@ aws-cdk-lib==2.156.0 constructs>=10.0.0,<11.0.0 boto3>=1.35.15 +docker diff --git a/cdk/cdk-domainless-mode/run_tasks.py b/cdk/cdk-domainless-mode/run_tasks.py deleted file mode 100644 index ad4d74f9..00000000 --- a/cdk/cdk-domainless-mode/run_tasks.py +++ /dev/null @@ -1,82 +0,0 @@ -import boto3 -import json - -# Open the input file -with open('data.json', 'r') as file: - # Load the JSON data - data = json.load(file) - -directory_name = data["directory_name"] -netbios_name = data["netbios_name"] -number_of_gmsa_accounts = data["number_of_gmsa_accounts"] -stack_name = data["stack_name"] -cluster_name = data["cluster_name"] -vpc_name = data["vpc_name"] -task_definition_template_name = data["task_definition_template_name"] - -ecs_client = boto3.client('ecs') - -# Find VPC of stack_name -ec2_client = boto3.client('ec2') -response = ec2_client.describe_vpcs( - Filters=[ - { - 'Name': 'tag:Name', - 'Values': [vpc_name] - } - ] -) -vpc_id = response['Vpcs'][0]['VpcId'] - -ec2_client = boto3.client('ec2') - -# list of subnets from vpc_id -# Get a list of subnets in the VPC -response = ec2_client.describe_subnets( - Filters=[ - { - 'Name': 'vpc-id', - 'Values': [vpc_id] - } - ] -) -subnet_ids = [subnet['SubnetId'] for subnet in response['Subnets']] - -# list of security groups from vpc -# Get a list of security groups in the VPC -response = ec2_client.describe_security_groups( - Filters=[ - { - 'Name': 'vpc-id', - 'Values': [vpc_id] - } - ] -) -security_groups = [] -security_group_id = "" -for security_group in response['SecurityGroups']: - security_group_id = security_group['GroupId'] - security_group_name = security_group['GroupName'] - security_groups.append((security_group_id, security_group_name)) - -# list all task definitions -task_definitions = ecs_client.list_task_definitions() - -for task_definition in task_definitions['taskDefinitionArns']: - # If task definition matches CredentialsFetcherTaskDefinition - if task_definition_template_name in task_definition: - print(task_definition) - # Run a task with a task definition - task = ecs_client.run_task( - cluster=cluster_name, - taskDefinition=task_definition, - count=1, - launchType='EC2', - networkConfiguration={ - 'awsvpcConfiguration': { - 'subnets': subnet_ids, - 'securityGroups': [security_group_id], - } - } - ) - print("Started task " + str(task)) diff --git a/cdk/cdk-domainless-mode/start_stack.sh b/cdk/cdk-domainless-mode/start_stack.sh index dbbe688d..76fcdd4e 100755 --- a/cdk/cdk-domainless-mode/start_stack.sh +++ b/cdk/cdk-domainless-mode/start_stack.sh @@ -1,4 +1,10 @@ #!/bin/sh +# Install Docker if not present +if ! command -v docker &> /dev/null; then + echo "Please install and setup docker daemon and ensure it's running." + exit +fi + echo "Please edit the file to add your AWS account number below" cdk bootstrap aws://XXXXXXXXXXXX/us-west-1 --trust=XXXXXXXXXXXX --cloudformation-execution-policies=arn:aws:iam::aws:policy/AdministratorAccess --verbose && cdk synth && cdk deploy diff --git a/cdk/cdk-domainless-mode/test-scripts/README.md b/cdk/cdk-domainless-mode/test-scripts/README.md new file mode 100644 index 00000000..359d8d81 --- /dev/null +++ b/cdk/cdk-domainless-mode/test-scripts/README.md @@ -0,0 +1,30 @@ +### Test Scripts + +#### Pre Requisites +- Ensure cdk stack is deployed to your personal account +- Create a new AL2023/Ubuntu instance in the ADStack VPC +- Install credentials-fetcher dependencies using dnf +```aiignore +dnf install -y realmd +dnf install -y oddjob +dnf install -y oddjob-mkhomedir +dnf install -y sssd +dnf install -y adcli +dnf install -y krb5-workstation +dnf install -y samba-common-tools +``` +- Install the latest credentials-fetcher rpm in this instance +- Run credentials-fetcher rpm as a systemd process +```aiignore +systemctl start credentials-fetcher +systemctl status credentials-fetcher +``` +- Clone credentials-fetcher repo and create a python proto file +```aiignore +git clone -b dev https://github.com/aws/credentials-fetcher.git +cd credentials-fetcher/cdk/cdk-domainless-mode/test-scripts +python3 -m venv .venv +source .venv/bin/activate +pip install grpcio-tools +python3 -m grpc_tools.protoc -I/home/ec2-user/credentials-fetcher/protos --python_out=. --grpc_python_out=. credentialsfetcher.proto +``` \ No newline at end of file diff --git a/cdk/cdk-domainless-mode/test-scripts/add_delete_kerberos_leases.py b/cdk/cdk-domainless-mode/test-scripts/add_delete_kerberos_leases.py new file mode 100644 index 00000000..9c8c6c89 --- /dev/null +++ b/cdk/cdk-domainless-mode/test-scripts/add_delete_kerberos_leases.py @@ -0,0 +1,91 @@ +import grpc +import credentialsfetcher_pb2 +import credentialsfetcher_pb2_grpc +import os +import json +import time + +''' +Use this script to create and delete N kerberos leases in a recurring loop +(currently set to 100 times). This script is run to test that create/delete +functionality has no leaks or unexpected failures when run over a long +period of time. This script is run on a linux instance in stand-alone mode. +''' + +with open('../data.json', 'r') as file: + # Load the JSON data + data = json.load(file) + +def run(): + with grpc.insecure_channel('unix:///var/credentials-fetcher/socket/credentials_fetcher.sock') as channel: + stub = credentialsfetcher_pb2_grpc.CredentialsFetcherServiceStub(channel) + number_of_gmsa_accounts = data["number_of_gmsa_accounts"] + directory_name = data["directory_name"] + netbios_name = data["netbios_name"] + username = data["username"] + password = data["password"] + + for iter in range(100): # Repeat the process 100 times + lease_ids = [] + + # Create cred-specs for users ending with multiples of 5 + for i in range(2, number_of_gmsa_accounts, 2): + credspec_contents = f"""{{ + "CmsPlugins": ["ActiveDirectory"], + "DomainJoinConfig": {{ + "Sid": "S-1-5-21-2725122404-4129967127-2630707939", + "MachineAccountName": "WebApp0{i}", + "Guid": "e96e0e09-9305-462f-9e44-8a8179722897", + "DnsTreeName": "{directory_name}", + "DnsName": "{directory_name}", + "NetBiosName": "{netbios_name}" + }}, + "ActiveDirectoryConfig": {{ + "GroupManagedServiceAccounts": [ + {{"Name": "WebApp0{i}", "Scope": "{directory_name}"}}, + {{"Name": "WebApp0{i}", "Scope": "{netbios_name}"}} + ], + "HostAccountConfig": {{ + "PortableCcgVersion": "1", + "PluginGUID": "{{GDMA0342-266A-4D1P-831J-20990E82944F}}", + "PluginInput": {{ + "CredentialArn": "aws/directoryservice/contoso/gmsa" + }} + }} + }} + }}""" + + contents = [credspec_contents] + response = stub.AddNonDomainJoinedKerberosLease( + credentialsfetcher_pb2.CreateNonDomainJoinedKerberosLeaseRequest( + credspec_contents=contents, + username=username, + password=password, + domain=directory_name + ) + ) + print(f"Created lease for WebApp0{i}: {response.lease_id}") + lease_path = (f"/var/credentials-fetcher/krbdir/" + f"{response.lease_id}/WebApp0{i}/krb5cc") + assert os.path.exists(lease_path) + lease_ids.append(response.lease_id) + + # Small delay to allow for processing + time.sleep(1) + + # Delete the created cred-specs + for lease_id in lease_ids: + delete_response = stub.DeleteKerberosLease( + credentialsfetcher_pb2.DeleteKerberosLeaseRequest( + lease_id=lease_id + ) + ) + print(f"Deleted lease: {delete_response.lease_id}") + lease_path = f"/var/credentials-fetcher/krbdir/{lease_id}" + print(lease_path) + assert not os.path.exists(lease_path) + + print(f"Completed {iter} cycle of creation and deletion") + +if __name__ == '__main__': + run() \ No newline at end of file diff --git a/cdk/cdk-domainless-mode/test-scripts/create_domain_joined_AD_accounts.ps1 b/cdk/cdk-domainless-mode/test-scripts/create_domain_joined_AD_accounts.ps1 new file mode 100644 index 00000000..75a9deb8 --- /dev/null +++ b/cdk/cdk-domainless-mode/test-scripts/create_domain_joined_AD_accounts.ps1 @@ -0,0 +1,33 @@ +# Use this script to create new Domain Joined gMSA accounts and add them to +# the AD. This script is run on the Windows Instance with access to Managed AD. + +$username = "admin@CONTOSO.COM" +$password = "Qn:51eJsORJNL@~{HY@?" | ConvertTo-SecureString -AsPlainText -Force +$credential = New-Object System.Management.Automation.PSCredential($username, $password) + +$groupAllowedToRetrievePassword = "WebAppAccounts_OU" +$path = "OU=MYOU,OU=Users,OU=contoso,DC=contoso,DC=com" + +for (($i = 1); $i -le 10;$i++) +{ + # Create the gMSA account + $gmsa_account_name = "DJ_WebApp0" + $i + $gmsa_account_with_domain = $gmsa_account_name + "." + $env:USERDNSDOMAIN + $gmsa_account_with_host = "host/" + $gmsa_account_name + $gmsa_account_with_host_and_domain = $gmsa_account_with_host + "." + $env:USERDNSDOMAIN + + try { + New-ADServiceAccount -Name $gmsa_account_name ` + -DnsHostName $gmsa_account_with_domain ` + -ServicePrincipalNames $gmsa_account_with_host, $gmsa_account_with_host_and_domain ` + -PrincipalsAllowedToRetrieveManagedPassword $groupAllowedToRetrievePassword ` + -Path $path ` + -Credential $credential ` + -Server $env:USERDNSDOMAIN ` + -KerberosEncryptionType AES256 + Write-Output "Created gMSA account: $gmsa_account_name" + } catch { + $string_err = $_ | Out-String + Write-Output "Error while gMSA account creation: " + $string_err + } +} \ No newline at end of file diff --git a/cdk/cdk-domainless-mode/test-scripts/create_domain_joined_kerberos_leases.py b/cdk/cdk-domainless-mode/test-scripts/create_domain_joined_kerberos_leases.py new file mode 100644 index 00000000..3ac57587 --- /dev/null +++ b/cdk/cdk-domainless-mode/test-scripts/create_domain_joined_kerberos_leases.py @@ -0,0 +1,51 @@ +import grpc +import credentialsfetcher_pb2 +import credentialsfetcher_pb2_grpc +import json +import os +''' +Use this script to create and test N leases for N domain-joined gMSA +accounts. This script is run on a linux instance in stand-alone mode. +''' +with open('../data.json', 'r') as file: + # Load the JSON data + data = json.load(file) + +def run(): + with grpc.insecure_channel('unix:///var/credentials-fetcher/socket/credentials_fetcher.sock') as channel: + stub = credentialsfetcher_pb2_grpc.CredentialsFetcherServiceStub(channel) + number_of_gmsa_accounts = data["number_of_gmsa_accounts"] + directory_name = data["directory_name"] + netbios_name = data["netbios_name"] + for i in range(1, number_of_gmsa_accounts): + credspec_contents = f"""{{ + "CmsPlugins": ["ActiveDirectory"], + "DomainJoinConfig": {{ + "Sid": "S-1-5-21-2725122404-4129967127-2630707939", + "MachineAccountName": "DJ_WebApp0{i}", + "Guid": "e96e0e09-9305-462f-9e44-8a8179722897", + "DnsTreeName": "{directory_name}", + "DnsName": "{directory_name}", + "NetBiosName": "{netbios_name}" + }}, + "ActiveDirectoryConfig": {{ + "GroupManagedServiceAccounts": [ + {{"Name": "DJ_WebApp0{i}", "Scope": "{directory_name}"}}, + {{"Name": "DJ_WebApp0{i}", "Scope": "{netbios_name}"}} + ] + }} + }}""" + + contents = [credspec_contents] + response = stub.AddKerberosLease( + credentialsfetcher_pb2.CreateKerberosLeaseRequest( + credspec_contents=contents + ) + ) + lease_path = (f"/var/credentials-fetcher/krbdir/" + f"{response.lease_id}/DJ_WebApp0{i}/krb5cc") + assert os.path.exists(lease_path) + print(f"Server response: {response}") + +if __name__ == '__main__': + run() \ No newline at end of file diff --git a/cdk/cdk-domainless-mode/test-scripts/create_non_domain_joined_AD_accounts.ps1 b/cdk/cdk-domainless-mode/test-scripts/create_non_domain_joined_AD_accounts.ps1 new file mode 100644 index 00000000..12222187 --- /dev/null +++ b/cdk/cdk-domainless-mode/test-scripts/create_non_domain_joined_AD_accounts.ps1 @@ -0,0 +1,29 @@ +# Use this script to create new Non Domain Joined gMSA accounts and add them to +# the AD. This script is run on the Windows Instance with access to Managed AD. +# NOTE: The cdk stack already creates N gmsa accounts where N corresponds to the number_of_gmsa_accounts in data +# .json. Use this script if you would like to create new accounts without deploying/re-deploying the cdk stack + +$username = "admin@CONTOSO.COM" +$password = "Qn:51eJsORJNL@~{HY@?" | ConvertTo-SecureString -AsPlainText -Force +$credential = New-Object System.Management.Automation.PSCredential($username, $password) + +$groupAllowedToRetrievePassword = "WebAppAccounts_OU" +$path = "OU=MYOU,OU=Users,OU=contoso,DC=contoso,DC=com" + +for (($i = 11); $i -le 200; $i++) +{ + # Create the gMSA account + $gmsa_account_name = "WebApp0" + $i + $gmsa_account_with_domain = $gmsa_account_name + ".contoso.com" + $gmsa_account_with_host = "host/" + $gmsa_account_name + $gmsa_account_with_host_and_domain = $gmsa_account_with_host + ".contoso.com" + + try { + #New-ADServiceAccount -Name serviceuser1 -Path "OU=MYOU1,OU=Users,OU=ActiveDirectory,DC=contoso,DC=com" -Credential $credential -DNSHostname "contoso.com" + New-ADServiceAccount -Name $gmsa_account_name -DnsHostName $gmsa_account_with_domain -ServicePrincipalNames $gmsa_account_with_host, $gmsa_account_with_host_and_domain -PrincipalsAllowedToRetrieveManagedPassword $groupAllowedToRetrievePassword -Path $path -Credential $credential -Server contoso.com + Write-Output "New-ADServiceAccount -Name $gmsa_account_name -DnsHostName $gmsa_account_with_domain -ServicePrincipalNames $gmsa_account_with_host, $gmsa_account_with_host_and_domain -PrincipalsAllowedToRetrieveManagedPassword $groupAllowedToRetrievePassword -Path $path -Credential $credential -Server contoso.com" + } catch { + $string_err = $_ | Out-String + Write-Output "Error while gMSA account creation and copy credspec to S3 bucket: " + $string_err + } +} \ No newline at end of file diff --git a/cdk/cdk-domainless-mode/test-scripts/create_non_domain_joined_kerberos_leases.py b/cdk/cdk-domainless-mode/test-scripts/create_non_domain_joined_kerberos_leases.py new file mode 100644 index 00000000..d0c78d21 --- /dev/null +++ b/cdk/cdk-domainless-mode/test-scripts/create_non_domain_joined_kerberos_leases.py @@ -0,0 +1,64 @@ +import grpc +import credentialsfetcher_pb2 +import credentialsfetcher_pb2_grpc +import json +import os + +''' +Use this script to create and test N leases for N non domain-joined gMSA +accounts. This script is run on a linux instance in stand-alone mode. +''' + +with open('../data.json', 'r') as file: + # Load the JSON data + data = json.load(file) + +def run(): + with grpc.insecure_channel('unix:///var/credentials-fetcher/socket/credentials_fetcher.sock') as channel: + number_of_gmsa_accounts = data["number_of_gmsa_accounts"] + directory_name = data["directory_name"] + netbios_name = data["netbios_name"] + username = data["username"] + password = data["password"] + stub = credentialsfetcher_pb2_grpc.CredentialsFetcherServiceStub(channel) + for i in range(1, number_of_gmsa_accounts): + credspec_contents = f"""{{ + "CmsPlugins": ["ActiveDirectory"], + "DomainJoinConfig": {{ + "Sid": "S-1-5-21-2725122404-4129967127-2630707939", + "MachineAccountName": "WebApp0{i}", + "Guid": "e96e0e09-9305-462f-9e44-8a8179722897", + "DnsTreeName": "{directory_name}", + "DnsName": "{directory_name}", + "NetBiosName": "{netbios_name}" + }}, + "ActiveDirectoryConfig": {{ + "GroupManagedServiceAccounts": [ + {{"Name": "WebApp0{i}", "Scope": "{directory_name}"}}, + {{"Name": "WebApp0{i}", "Scope": "{netbios_name}"}} + ], + "HostAccountConfig": {{ + "PortableCcgVersion": "1", + "PluginGUID": "{{GDMA0342-266A-4D1P-831J-20990E82944F}}", + "PluginInput": {{ + "CredentialArn": "aws/directoryservice/contoso/gmsa" + }} + }} + }} + }}""" + contents = [credspec_contents] + response = stub.AddNonDomainJoinedKerberosLease( + credentialsfetcher_pb2.CreateNonDomainJoinedKerberosLeaseRequest( + credspec_contents=contents, + username=username, + password=password, + domain=directory_name + ) + ) + lease_path = (f"/var/credentials-fetcher/krbdir/" + f"{response.lease_id}/WebApp0{i}/krb5cc") + assert os.path.exists(lease_path) + print(f"Server response: {response}") + +if __name__ == '__main__': + run() \ No newline at end of file diff --git a/cdk/cdk-domainless-mode/tests/__init__.py b/cdk/cdk-domainless-mode/tests/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/cdk/cdk-domainless-mode/tests/copy_credspecs_and_create_task_defs.py b/cdk/cdk-domainless-mode/tests/copy_credspecs_and_create_task_defs.py new file mode 100644 index 00000000..dabe0a96 --- /dev/null +++ b/cdk/cdk-domainless-mode/tests/copy_credspecs_and_create_task_defs.py @@ -0,0 +1,113 @@ +import boto3 +import json +import os + +def setup_aws_session(aws_profile_name): + boto3.setup_default_session(profile_name=aws_profile_name) + +def get_ecs_task_execution_role_arn(): + iam_client = boto3.client('iam') + list_roles = iam_client.list_roles(MaxItems=1000) + for role in list_roles['Roles']: + if 'CredentialsFetcher-ECSTaskExecutionRolegMSA' == role['RoleName']: + return role['Arn'] + return None + +def get_task_definition_template(ecs_client, task_definition_template_name): + response = ecs_client.list_task_definitions() + if 'taskDefinitionArns' in response: + for arn in response['taskDefinitionArns']: + if task_definition_template_name in arn: + return ecs_client.describe_task_definition(taskDefinition=arn) + print(f"No task definitions found matching '{task_definition_template_name}'") + return None + +def get_ecs_cluster_info(ecs_client, ecs_cluster_name): + ecs_clusters = ecs_client.list_clusters() + for cluster_arn in ecs_clusters['clusterArns']: + cluster_name = cluster_arn.split('/')[1] + if cluster_name == ecs_cluster_name: + ecs_cluster_instance_arn = ecs_client.list_container_instances(cluster=cluster_arn)['containerInstanceArns'][0] + return cluster_arn, ecs_cluster_instance_arn + return None, None + +def create_credspec(directory_name, netbios_name, gmsa_name, gmsa_secret_arn): + credspec_template = """ + { + "CmsPlugins": ["ActiveDirectory"], + "DomainJoinConfig": { + "Sid": "S-1-5-21-2421564706-1737585382-3854682907", + "MachineAccountName": "GMSA_NAME", + "Guid": "6a91814c-e151-4fb0-96f0-f517566fc883", + "DnsTreeName": "DOMAINNAME", + "DnsName": "DOMAINNAME", + "NetBiosName": "NETBIOS_NAME" + }, + "ActiveDirectoryConfig": { + "GroupManagedServiceAccounts": [ + { + "Name": "GMSA_NAME", + "Scope": "DOMAINNAME" + }, + { + "Name": "GMSA_NAME", + "Scope": "NETBIOS_NAME" + } + ], + "HostAccountConfig": { + "PortableCcgVersion": "1", + "PluginGUID": "{859E1386-BDB4-49E8-85C7-3070B13920E1}", + "PluginInput": { + "CredentialArn": "GMSA_SECRET_ARN" + } + } + } + } + """ + credspec_template = credspec_template.replace("DOMAINNAME", directory_name) + credspec_template = credspec_template.replace("NETBIOS_NAME", netbios_name) + credspec_template = credspec_template.replace("GMSA_NAME", gmsa_name) + credspec_template = credspec_template.replace("GMSA_SECRET_ARN", gmsa_secret_arn) + return json.loads(credspec_template) + +def upload_credspec_to_s3(s3_client, s3_bucket, gmsa_name, credspec): + s3_key = f"{gmsa_name}_credspec.json" + try: + s3_client.put_object(Body=json.dumps(credspec), Bucket=s3_bucket, Key=s3_key) + bucket_location = s3_client.get_bucket_location(Bucket=s3_bucket) + bucket_arn = f"arn:aws:s3:::{s3_bucket}" + return bucket_arn, s3_key + except Exception as e: + print(f"Error uploading credspec to S3: {e}") + return None, None + +def modify_task_definition(task_definition, ecs_cluster_arn, bucket_arn, s3_key): + task_definition = task_definition["taskDefinition"] + task_definition["compatibilities"].append("FARGATE") + + for container_def in task_definition['containerDefinitions']: + credspec = container_def['credentialSpecs'] + credspec = [d for d in credspec if 'credentialspecdomainless' not in d] + credspec.append(f"credentialspecdomainless:{bucket_arn}/{s3_key}") + container_def['credentialSpecs'] = credspec + + attributes = task_definition['requiresAttributes'] + attributes.append({ + "name": "ecs.capability.gmsa-domainless", + "targetId": ecs_cluster_arn + }) + + return task_definition + +def register_new_task_definition(ecs_client, task_definition, family, ecs_task_execution_role_arn): + return ecs_client.register_task_definition( + family=family, + taskRoleArn=ecs_task_execution_role_arn, + executionRoleArn=ecs_task_execution_role_arn, + networkMode=task_definition['networkMode'], + containerDefinitions=task_definition['containerDefinitions'], + requiresCompatibilities=["EC2", "FARGATE"], + runtimePlatform={'cpuArchitecture': 'X86_64', 'operatingSystemFamily': 'LINUX'}, + cpu=task_definition['cpu'], + memory=task_definition['memory'] + ) diff --git a/cdk/cdk-domainless-mode/tests/create_secrets.py b/cdk/cdk-domainless-mode/tests/create_secrets.py new file mode 100644 index 00000000..edc4ea6f --- /dev/null +++ b/cdk/cdk-domainless-mode/tests/create_secrets.py @@ -0,0 +1,36 @@ +import boto3 +import json +from parse_data_from_json import (number_of_gmsa_accounts, netbios_name, + username, password, directory_name) + +def create_secrets(): + # Initialize the AWS Secrets Manager client + client = boto3.client('secretsmanager') + + # Base path for the secrets + base_path = "aws/directoryservice/contoso/gmsa" + + for i in range(1, number_of_gmsa_accounts + 1): + # Create the secret name + secret_name = f"{base_path}/WebApp0{i}" + + # Create the secret value + secret_value = { + "username": username, + "password": password, + "domainName": directory_name, + "distinguishedName": f"CN=WebApp0{i},OU=MYOU,OU=Users,OU={netbios_name},DC={netbios_name},DC=com" + } + + try: + # Create the secret + response = client.create_secret( + Name=secret_name, + Description=f"Secret for WebApp0{i}", + SecretString=json.dumps(secret_value) + ) + print(f"Created secret: {secret_name}") + except client.exceptions.ResourceExistsException: + print(f"Secret already exists: {secret_name}") + except Exception as e: + print(f"Error creating secret {secret_name}: {str(e)}") \ No newline at end of file diff --git a/cdk/cdk-domainless-mode/tests/delete_secrets.py b/cdk/cdk-domainless-mode/tests/delete_secrets.py new file mode 100644 index 00000000..73d88bec --- /dev/null +++ b/cdk/cdk-domainless-mode/tests/delete_secrets.py @@ -0,0 +1,24 @@ +import boto3 +from parse_data_from_json import number_of_gmsa_accounts +def delete_secrets(): + # Initialize the AWS Secrets Manager client + client = boto3.client('secretsmanager') + + # Base path for the secrets + base_path = "aws/directoryservice/contoso/gmsa" + + for i in range(1, number_of_gmsa_accounts + 1): + # Create the secret name + secret_name = f"{base_path}/WebApp0{i}" + + try: + # Delete the secret + response = client.delete_secret( + SecretId=secret_name, + ForceDeleteWithoutRecovery=True + ) + print(f"Deleted secret: {secret_name}") + except client.exceptions.ResourceNotFoundException: + print(f"Secret not found: {secret_name}") + except Exception as e: + print(f"Error deleting secret {secret_name}: {str(e)}") diff --git a/cdk/cdk-domainless-mode/tests/gmsa.ps1 b/cdk/cdk-domainless-mode/tests/gmsa.ps1 new file mode 100644 index 00000000..d5be8166 --- /dev/null +++ b/cdk/cdk-domainless-mode/tests/gmsa.ps1 @@ -0,0 +1,153 @@ + +# This script does the following: +# 1) Install/Update SSM agent - without this the domain-join can fail +# 2) Create a new OU +# 3) Create a new security group +# 4) Create a new standard user account, this account's username and password needs to be stored in a secret store like AWS secrets manager. +# 5) Add members to the security group that is allowed to retrieve gMSA password +# 6) Create gMSA accounts with PrincipalsAllowedToRetrievePassword set to the security group created in 4) + +# 1) Install SSM agent +function Test-SSMAgentUpdate { + $ssm = Get-Service -Name "AmazonSSMAgent" -ErrorAction SilentlyContinue + if (-not $ssm) { return $false } + # Add additional version checking logic if needed + return $true +} + +# To install the AD module on Windows Server, run Install-WindowsFeature RSAT-AD-PowerShell +# To install the AD module on Windows 10 version 1809 or later, run Add-WindowsCapability -Online -Name 'Rsat.ActiveDirectory.DS-LDS.Tools~~~~0.0.1.0' +# To install the AD module on older versions of Windows 10, see https://aka.ms/rsat +try { +# 1) Check and Update SSM agent if needed + if (-not (Test-SSMAgentUpdate)) { + Write-Output "Updating SSM agent..." + [System.Net.ServicePointManager]::SecurityProtocol = 'TLS12' + $progressPreference = 'silentlyContinue' + Invoke-WebRequest https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/windows_amd64/AmazonSSMAgentSetup.exe -OutFile $env:USERPROFILE\Desktop\SSMAgent_latest.exe + Start-Process -FilePath $env:USERPROFILE\Desktop\SSMAgent_latest.exe -ArgumentList "/S" + } + +# Check if AD tools are installed + if (-not (Get-WindowsFeature -Name "RSAT-AD-Tools").Installed) { + Write-Output "Installing Active Directory management tools..." + Install-WindowsFeature -Name "RSAT-AD-Tools" -IncludeAllSubFeature + Install-WindowsFeature RSAT-AD-PowerShell + Install-Module CredentialSpec -Force + Install-Module -Name SqlServer -AllowClobber -Force + } + + $username = "admin@DOMAINNAME" + $password = "INPUTPASSWORD" | ConvertTo-SecureString -AsPlainText -Force + $credential = New-Object System.Management.Automation.PSCredential($username, $password) + $groupAllowedToRetrievePassword = "WebAppAccounts_OU" + # This is the basedn path that needs to be in secrets manager as "distinguishedName" : "OU=MYOU,OU=Users,OU=ActiveDirectory,DC=contoso,DC=com" + $path = "OU=MYOU,OU=Users,OU=contoso,DC=NETBIOS_NAME,DC=com" + + # 2) Create OU if it doesn't exist + if (-not (Get-ADOrganizationalUnit -Filter "Name -eq 'MYOU'" -ErrorAction SilentlyContinue)) { + New-ADOrganizationalUnit -Name "MYOU" -Path "OU=Users,OU=contoso,DC=NETBIOS_NAME,DC=com" -Credential $credential + } + + # 3) Create security group if it doesn't exist + if (-not (Get-ADGroup -Filter "SamAccountName -eq '$groupAllowedToRetrievePassword'" -ErrorAction SilentlyContinue)) { + New-ADGroup -Name "WebApp Authorized Accounts in OU" -SamAccountName $groupAllowedToRetrievePassword -Credential $credential -GroupScope DomainLocal -Server DOMAINNAME + } + + # 4) Create standard user if it doesn't exist + if (-not (Get-ADUser -Filter "SamAccountName -eq 'StandardUser01'" -ErrorAction SilentlyContinue)) { + New-ADUser -Name "StandardUser01" -AccountPassword (ConvertTo-SecureString -AsPlainText "********" -Force) -Enabled 1 -Credential $credential -Path $path -Server DOMAINNAME + } + + # 5) Add members to security group if not already members + $group = Get-ADGroup $groupAllowedToRetrievePassword + $members = Get-ADGroupMember $group | Select-Object -ExpandProperty SamAccountName + + foreach ($member in @("StandardUser01", "admin")) { + if ($member -notin $members) { + Add-ADGroupMember -Identity $groupAllowedToRetrievePassword -Members $member -Credential $credential -Server DOMAINNAME + } + } + + # 6) Create gMSA accounts if they don't exist + for (($i = 1); $i -le $NUMBER_OF_GMSA_ACCOUNTS; $i++) { + $gmsa_account_name = "WebApp0" + $i + $gmsa_account_with_domain = $gmsa_account_name + ".DOMAINNAME" + $gmsa_account_with_host = "host/" + $gmsa_account_name + $gmsa_account_with_host_and_domain = $gmsa_account_with_host + ".DOMAINNAME" + + if (-not (Get-ADServiceAccount -Filter "Name -eq '$gmsa_account_name'" -ErrorAction SilentlyContinue)) { + New-ADServiceAccount -Name $gmsa_account_name ` + -DnsHostName $gmsa_account_with_domain ` + -ServicePrincipalNames $gmsa_account_with_host, $gmsa_account_with_host_and_domain ` + -PrincipalsAllowedToRetrieveManagedPassword $groupAllowedToRetrievePassword ` + -Path $path ` + -Credential $credential ` + -Server DOMAINNAME + } + } + + # SQL Server Configuration + $sqlInstance = $env:computername + + # Create firewall rules if they don't exist + $firewallRules = Get-NetFirewallRule | Select-Object -ExpandProperty DisplayName + + if ("SQLServer default instance" -notin $firewallRules) { + New-NetFirewallRule -DisplayName "SQLServer default instance" -Direction Inbound -LocalPort 1433 -Protocol TCP -Action Allow + } + if ("SQLServer Browser service" -notin $firewallRules) { + New-NetFirewallRule -DisplayName "SQLServer Browser service" -Direction Inbound -LocalPort 1434 -Protocol UDP -Action Allow + } + if ("AllowRDP" -notin $firewallRules) { + New-NetFirewallRule -DisplayName "AllowRDP" -Direction Inbound -Protocol TCP -LocalPort 3389 -Action Allow + } + if ("AllowSQLServer" -notin $firewallRules) { + New-NetFirewallRule -DisplayName "AllowSQLServer" -Direction Inbound -Protocol TCP -LocalPort 1433 -Action Allow + } + + # SQL Database creation and configuration + $connectionString0 = "Server=$sqlInstance;Integrated Security=True;" + $connectionString1 = "Server=$sqlInstance;Database=EmployeesDB;Integrated Security=True;" + + # Check if database exists + $dbExists = Invoke-Sqlcmd -ConnectionString $connectionString0 -Query "SELECT name FROM sys.databases WHERE name = 'EmployeesDB'" + + if (-not $dbExists) { + Invoke-Sqlcmd -ConnectionString $connectionString0 -Query "CREATE DATABASE EmployeesDB" + + $query = @" +CREATE TABLE dbo.EmployeesTable ( + EmpID INT IDENTITY(1,1) PRIMARY KEY, + EmpName VARCHAR(50) NOT NULL, + Designation VARCHAR(50) NOT NULL, + Department VARCHAR(50) NOT NULL, + JoiningDate DATETIME NOT NULL +); + +INSERT INTO EmployeesDB.dbo.EmployeesTable (EmpName, Designation, Department, JoiningDate) +VALUES + ('CHIN YEN', 'LAB ASSISTANT', 'LAB', '2022-03-05 03:57:09.967'), + ('MIKE PEARL', 'SENIOR ACCOUNTANT', 'ACCOUNTS', '2022-03-05 03:57:09.967'), + ('GREEN FIELD', 'ACCOUNTANT', 'ACCOUNTS', '2022-03-05 03:57:09.967'), + ('DEWANE PAUL', 'PROGRAMMER', 'IT', '2022-03-05 03:57:09.967'), + ('MATTS', 'SR. PROGRAMMER', 'IT', '2022-03-05 03:57:09.967'), + ('PLANK OTO', 'ACCOUNTANT', 'ACCOUNTS', '2022-03-05 03:57:09.967'); +alter authorization on database::[EmployeesDB] to [WebApp01$] +"@ + + Invoke-Sqlcmd -ConnectionString $connectionString1 -Query $query + } + + # Check if login exists before creating + $loginExists = Invoke-Sqlcmd -ConnectionString $connectionString0 -Query "SELECT name FROM sys.server_principals WHERE name = 'NETBIOS_NAME\webapp01$'" + + if (-not $loginExists) { + $createLoginQuery = "CREATE LOGIN [NETBIOS_NAME\webapp01$] FROM WINDOWS WITH DEFAULT_DATABASE = [master], DEFAULT_LANGUAGE = [us_english]; EXEC sp_addrolemember 'db_owner', 'NETBIOS_NAME\webapp01$';" + Invoke-Sqlcmd -ConnectionString $connectionString0 -Query $createLoginQuery + } + +} catch { + Write-Error "An error occurred: $_" + throw +} \ No newline at end of file diff --git a/cdk/cdk-domainless-mode/tests/parse_data_from_json.py b/cdk/cdk-domainless-mode/tests/parse_data_from_json.py new file mode 100644 index 00000000..e2474690 --- /dev/null +++ b/cdk/cdk-domainless-mode/tests/parse_data_from_json.py @@ -0,0 +1,33 @@ +import json +import os + +def load_data(): + with open('../data.json', 'r') as file: + return json.load(file) + +def get_value(key): + return os.environ.get(key, data.get(key.lower())) + +data = load_data() + +number_of_gmsa_accounts = data["number_of_gmsa_accounts"] +netbios_name = data["netbios_name"] +directory_name = data["directory_name"] +instance_name = data["windows_instance_tag"] +region = data["aws_region"] +stack_name = data["stack_name"] +cluster_name = data["cluster_name"] +vpc_name = data["vpc_name"] +task_definition_template_name = data["task_definition_template_name"] +repository_name = data["ecr_repo_name"] +tag = data["docker_image_tag"] +bucket_name = get_value("S3_PREFIX") + data["s3_bucket_suffix"] +aws_profile_name = data["aws_profile_name"] +username = data["username"] +password = data["password"] +windows_instance_tag = data["windows_instance_tag"] +domain_admin_password = data["domain_admin_password"] + +if "XXX" in bucket_name: + print("S3_PREFIX is not setup correctly, please set it and retry") + exit(1) \ No newline at end of file diff --git a/cdk/cdk-domainless-mode/tests/run_e2e_test.py b/cdk/cdk-domainless-mode/tests/run_e2e_test.py new file mode 100644 index 00000000..bcf80501 --- /dev/null +++ b/cdk/cdk-domainless-mode/tests/run_e2e_test.py @@ -0,0 +1,355 @@ +from time import sleep + +import boto3 +import os +from create_secrets import create_secrets +from copy_credspecs_and_create_task_defs import (setup_aws_session, + get_ecs_task_execution_role_arn, + get_task_definition_template, + get_ecs_cluster_info, + create_credspec, + upload_credspec_to_s3, + modify_task_definition, + register_new_task_definition) +from parse_data_from_json import (number_of_gmsa_accounts, netbios_name, + directory_name, stack_name, vpc_name, tag, + task_definition_template_name, + cluster_name, region, bucket_name, + aws_profile_name, instance_name, + windows_instance_tag, repository_name) +from setup_windows_instance import get_instance_id_by_name, run_powershell_script +from update_inbound_rules import add_security_group_to_instance +from update_task_def_and_run_tasks import (get_task_definition_families, + update_task_definition_image, run_task) +from run_sql_test import get_windows_hostname, run_shell_script +from delete_secrets import delete_secrets +from botocore.exceptions import ClientError + +s3_client = boto3.client('s3') +ecs_client = boto3.client('ecs') +secrets_manager_client = boto3.client('secretsmanager') + + +def check_s3_bucket_exists(): + s3_client = boto3.client('s3') + try: + # Try to head the bucket + s3_client.head_bucket(Bucket=bucket_name) + print(f"Bucket {bucket_name} exists and you have access to it.") + return True + except ClientError as e: + error_code = int(e.response['Error']['Code']) + if error_code == 403: + print(f"Bucket {bucket_name} exists, but you don't have permission to access it.") + return True + elif error_code == 404: + print(f"Bucket {bucket_name} does not exist.") + return False + else: + print(f"An error occurred: {e}") + return False + +def create_s3_bucket(): + """Create an S3 bucket in a specified region + + If a region is not specified, the bucket is created in the S3 default + region (us-east-1). + + :param bucket_name: Bucket to create + :param region: String region to create bucket in, e.g., 'us-west-2' + :return: True if bucket created, else False + """ + + # Create bucket + try: + if region is None: + s3_client = boto3.client('s3') + s3_client.create_bucket(Bucket=bucket_name) + else: + s3_client = boto3.client('s3', region_name=region) + location = {'LocationConstraint': region} + s3_client.create_bucket(Bucket=bucket_name, + CreateBucketConfiguration=location) + except ClientError as e: + print(f"Couldn't create bucket {bucket_name}.") + print(f"Error: {e}") + return False + + print(f"Bucket {bucket_name} created successfully.") + return True + +def create_and_register_tasks(): + setup_aws_session(aws_profile_name) + ecs_task_execution_role_arn = get_ecs_task_execution_role_arn() + task_definition_template = get_task_definition_template(ecs_client, task_definition_template_name) + ecs_cluster_arn, ecs_cluster_instance_arn = get_ecs_cluster_info(ecs_client, "Credentials-fetcher-ecs-load-test") + + if not all([ecs_task_execution_role_arn, task_definition_template, ecs_cluster_arn]): + print("Failed to retrieve necessary resources.") + return + + if not all([ecs_task_execution_role_arn, task_definition_template, ecs_cluster_arn]): + print("Failed to retrieve necessary resources.") + return + + for i in range(1, number_of_gmsa_accounts + 1): + gmsa_name = f"WebApp0{i}" + secret_id = f"aws/directoryservice/{netbios_name}/gmsa/{gmsa_name}" + gmsa_secret_arn = secrets_manager_client.get_secret_value(SecretId=secret_id)['ARN'] + + credspec = create_credspec(directory_name, netbios_name, gmsa_name, gmsa_secret_arn) + bucket_arn, s3_key = upload_credspec_to_s3(s3_client, bucket_name, gmsa_name, credspec) + + if not bucket_arn: + print(f"Failed to upload credspec for {gmsa_name}") + continue + + modified_task_definition = modify_task_definition(task_definition_template, ecs_cluster_arn, bucket_arn, s3_key) + family = f"{task_definition_template['taskDefinition']['family']}-{i}" + + response = register_new_task_definition(ecs_client, modified_task_definition, family, ecs_task_execution_role_arn) + print(f"Registered new task definition for {gmsa_name}: {response['taskDefinition']['taskDefinitionArn']}") + +def is_s3_bucket_empty(): + try: + # List objects in the bucket + response = s3_client.list_objects_v2(Bucket=bucket_name, MaxKeys=1) + + # If KeyCount is 0, the bucket is empty + if response['KeyCount'] == 0: + print(f"The bucket '{bucket_name}' is empty.") + return True + else: + print(f"The bucket '{bucket_name}' is not empty.") + return False + + except ClientError as e: + if e.response['Error']['Code'] == 'NoSuchBucket': + print(f"The bucket '{bucket_name}' does not exist.") + elif e.response['Error']['Code'] == 'AccessDenied': + print(f"Access denied to bucket '{bucket_name}'. Check your permissions.") + else: + print(f"An error occurred: {e}") + return None + +def empty_s3_bucket(): + """ + Empty an S3 bucket by deleting all objects and versions. + + :param bucket_name: Name of the S3 bucket to empty + """ + s3 = boto3.resource('s3') + bucket = s3.Bucket(bucket_name) + + try: + # Delete all objects + print(f"Deleting all objects in bucket '{bucket_name}'...") + bucket.objects.all().delete() + + # Delete all object versions (if versioning is enabled) + print(f"Deleting all object versions in bucket '{bucket_name}'...") + bucket.object_versions.all().delete() + + print(f"Bucket '{bucket_name}' has been emptied successfully.") + except ClientError as e: + print(f"An error occurred while emptying the bucket: {e}") + +def update_windows_instance(): + instance_id = get_instance_id_by_name(region, instance_name) + script_path = os.path.join(os.path.dirname(__file__), 'gmsa.ps1') + run_powershell_script(instance_id, script_path) + +def update_task_defs_and_run_tasks(): + ecs_client = boto3.client('ecs', region_name=region) + ec2_client = boto3.client('ec2', region_name=region) + + response = ec2_client.describe_vpcs(Filters=[{'Name': 'tag:Name', 'Values': [vpc_name]}]) + if not response['Vpcs']: + raise ValueError(f"No VPC found with name: {vpc_name}") + vpc_id = response['Vpcs'][0]['VpcId'] + + # Get subnets + response = ec2_client.describe_subnets(Filters=[{'Name': 'vpc-id', 'Values': [vpc_id]}]) + if not response['Subnets']: + raise ValueError(f"No subnets found in VPC: {vpc_id}") + subnet_ids = [subnet['SubnetId'] for subnet in response['Subnets']] + + # Get security group + response = ec2_client.describe_security_groups(Filters=[{'Name': 'vpc-id', 'Values': [vpc_id]}]) + if not response['SecurityGroups']: + raise ValueError(f"No security groups found in VPC: {vpc_id}") + security_group_id = response['SecurityGroups'][0]['GroupId'] + + # Get all task definition families + task_families = get_task_definition_families(ecs_client, task_definition_template_name) + if not task_families: + raise ValueError(f"No task definition families found matching pattern: {task_definition_template_name}") + + for task_family in task_families: + try: + # Update task definition and get the new ARN + new_task_definition_arn = update_task_definition_image(task_family, repository_name, tag, region) + task_arn = run_task(ecs_client, cluster_name, new_task_definition_arn, subnet_ids, security_group_id) + if task_arn: + print(f"Task started for family {task_family}: {task_arn}") + else: + print(f"Failed to start task for family {task_family}") + except Exception as e: + print(f"Error processing task family {task_family}: {str(e)}") + + print("All tasks have been processed.") + + +def get_running_task_definitions(): + """ + Get a set of task definition ARNs that are currently running. + """ + running_task_defs = set() + + try: + # List all clusters + clusters = ecs_client.list_clusters()['clusterArns'] + + for cluster in clusters: + # Get running tasks in each cluster + running_tasks = ecs_client.list_tasks( + cluster=cluster, + desiredStatus='RUNNING' + )['taskArns'] + + if running_tasks: + # Get task details including task definition + tasks = ecs_client.describe_tasks( + cluster=cluster, + tasks=running_tasks + )['tasks'] + + # Add task definition ARNs to set + for task in tasks: + running_task_defs.add(task['taskDefinitionArn']) + + except ClientError as e: + print(f"Error getting running tasks: {e}") + return None + + return running_task_defs + +def delete_unused_task_definitions(): + """ + Delete task definitions that are not running and have credentialSpecs defined. + """ + try: + # Get running task definitions + running_task_defs = get_running_task_definitions() + if running_task_defs is None: + return + + # Get all task definition families + families = ecs_client.list_task_definition_families()['families'] + + deleted_count = 0 + skipped_count = 0 + # List all task definitions for this family + task_defs = ecs_client.list_task_definitions() + + if 'taskDefinitionArns' in task_defs: + for arn in task_defs['taskDefinitionArns']: + # Skip if task definition is running + if arn in running_task_defs: + print(f"Skipping running task definition: {arn}") + skipped_count += 1 + continue + + # Get task definition details + task_def = ecs_client.describe_task_definition( + taskDefinition=arn + )['taskDefinition'] + + print(task_def) + # Check if any container has credentialSpecs + for container in task_def['containerDefinitions']: + if 'credentialSpecs' in container: + try: + # Deregister task definition + ecs_client.deregister_task_definition( + taskDefinition=arn + ) + print(f"Deleted task definition: {arn}") + deleted_count += 1 + except ClientError as e: + print(f"Error deleting task definition {arn}: {e}") + else: + print(f"Skipping task definition without credentialSpecs: {arn}") + skipped_count += 1 + + print(f"\nSummary:") + print(f"Deleted task definitions: {deleted_count}") + print(f"Skipped task definitions: {skipped_count}") + + except ClientError as e: + print(f"Error: {e}") + +def run_sql_test(): + instance_name_linux = stack_name + '/MyAutoScalingGroup' + instance_id_linux = get_instance_id_by_name(region, instance_name_linux) + instance_id_windows = get_instance_id_by_name(region, windows_instance_tag) + hostname = get_windows_hostname(instance_id_windows) + run_shell_script(instance_id_linux, hostname) + +def run_e2e_test(): + if not check_s3_bucket_exists(): + if not create_s3_bucket(): + print("s3 bucket was not created properly, exiting...") + return + if not is_s3_bucket_empty(): + empty_s3_bucket() + print("Using s3 bucket: " + bucket_name) + print("----------S3 bucket created and ready for use-----------------") + create_secrets() + print("\n" * 3) + print("-----------------Secret Creation Complete.-------------------") + print("\n" * 3) + create_and_register_tasks() + print("\n" * 3) + print("-----------------Created and Registered Tasks.---------------") + print("\n" * 3) + print("-----------------Windows instance is Ready--------------------") + add_security_group_to_instance(directory_name, instance_name) + print("\n" * 3) + print("--------Linux instance has necessary Security groups Added----") + print("\n" * 3) + update_task_defs_and_run_tasks() + print("\n" * 3) + print("--------Task definition updated, ready to run SQL Test--------") + print("Waiting 15 seconds before running SQL Test") + print("\n" * 3) + sleep(15) + print("Sleep complete. Executing SQL test now.") + run_sql_test() + print("###################################################") + print("###################################################") + print("###################################################") + print("###################################################") + print("\n" * 3) + print("------------E2E Test Successful!!------------------") + print("\n" * 3) + print("###################################################") + print("###################################################") + print("###################################################") + print("###################################################") + +def cleanup_after_test_complete(): + print("\n" * 3) + print("------------Initiating cleanup after test--------------") + print("\n" * 3) + delete_secrets() + empty_s3_bucket() + delete_unused_task_definitions() + print("\n" * 3) + print("------------Cleanup Complete!!--------------") + print("\n" * 3) + +run_e2e_test() +cleanup_after_test_complete() + + diff --git a/cdk/cdk-domainless-mode/tests/run_sql_test.py b/cdk/cdk-domainless-mode/tests/run_sql_test.py new file mode 100644 index 00000000..b3b60c68 --- /dev/null +++ b/cdk/cdk-domainless-mode/tests/run_sql_test.py @@ -0,0 +1,144 @@ +import boto3 +from parse_data_from_json import stack_name, windows_instance_tag, region + +""" +This script executes a shell script on the Linux instance via SSM that: +a. Lists Docker containers +b. Identifies a specific Docker container (my-ecr-repo:latest) +c. If the container is found, runs 'klist' and a SQL query inside the container + +This script validates that the Kerberos ticket can be used to access SQL server. + +""" + +def run_shell_script(instance_id, hostname): + + commands = [ + 'systemctl restart credentials-fetcher', + 'systemctl restart ecs', + f'HOSTNAME="{hostname}"', + 'echo "Listing all Docker containers:"', + 'IMAGEID=$(docker ps --format "{{.ID}} {{.Image}}" | grep "my-ecr-repo:latest" | awk \'{print $1}\' | head -n 1)', + 'echo "IMAGEID: $IMAGEID"', + 'if [ -n "$IMAGEID" ]; then', + ' echo "Container ID: $IMAGEID"', + ' echo "Running commands inside the container:"', + ' echo "klist && sqlcmd -S $HOSTNAME.contoso.com -C -Q \'SELECT * FROM employeesdb.dbo.employeestable;\'" | docker exec -i $IMAGEID env PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/opt/mssql-tools/bin bash', + 'else', + ' echo "No container found with my-ecr-repo:latest"', + 'fi' + ] + ssm = boto3.client('ssm') + + response = ssm.send_command( + InstanceIds=[instance_id], + DocumentName="AWS-RunShellScript", + Parameters={'commands': commands} + ) + + command_id = response['Command']['CommandId'] + + waiter = ssm.get_waiter('command_executed') + try: + waiter.wait( + CommandId=command_id, + InstanceId=instance_id, + WaiterConfig={ + 'Delay': 30, + 'MaxAttempts': 100 + } + ) + except Exception as e: + print(f"Command failed: {commands}") + print(f"Error: {str(e)}") + raise + + output = ssm.get_command_invocation( + CommandId=command_id, + InstanceId=instance_id + ) + + print(f"Command output:\n{output.get('StandardOutputContent', '')}") + + if output['Status'] == 'Success': + print(f"Command status: Success") + else: + print(f"Command failed with status: {output['Status']}") + print(f"Error: {output.get('StandardErrorContent', 'No error content available')}") + raise Exception(f"Command execution failed with status: {output['Status']}") + +def get_windows_hostname(instance_id): + commands = [ + 'hostname' + ] + ssm = boto3.client('ssm') + + response = ssm.send_command( + InstanceIds=[instance_id], + DocumentName="AWS-RunPowerShellScript", + Parameters={'commands': commands} + ) + + command_id = response['Command']['CommandId'] + + waiter = ssm.get_waiter('command_executed') + try: + waiter.wait( + CommandId=command_id, + InstanceId=instance_id, + WaiterConfig={ + 'Delay': 30, + 'MaxAttempts': 50 + } + ) + except Exception as e: + print(f"Command failed: {commands}") + print(f"Error: {str(e)}") + raise + + output = ssm.get_command_invocation( + CommandId=command_id, + InstanceId=instance_id + ) + + print(f"Command output:\n{output.get('StandardOutputContent', '')}") + + if output['Status'] == 'Success': + hostname = output['StandardOutputContent'].strip() + return hostname + else: + print(f"Command failed with status: {output['Status']}") + print(f"Error: {output.get('StandardErrorContent', 'No error content available')}") + raise Exception(f"Command execution failed with status: {output['Status']}") + +def get_instance_id_by_name(region, instance_name): + + ec2 = boto3.client('ec2', region_name=region) + response = ec2.describe_instances( + Filters=[ + { + 'Name': 'tag:Name', + 'Values': [instance_name] + }, + { + 'Name': 'instance-state-name', + 'Values': ['running'] + } + ] + ) + + for reservation in response['Reservations']: + for instance in reservation['Instances']: + return instance['InstanceId'] + + return None + + +# instance_name_linux = stack_name + '/MyAutoScalingGroup' +# instance_name_windows = windows_instance_tag +# instance_id_linux = get_instance_id_by_name(region, instance_name_linux) +# instance_id_windows = get_instance_id_by_name(region, instance_name_windows) +# +# hostname = get_windows_hostname(instance_id_windows) +# run_shell_script(instance_id_linux, hostname) + diff --git a/cdk/cdk-domainless-mode/tests/setup_windows_instance.py b/cdk/cdk-domainless-mode/tests/setup_windows_instance.py new file mode 100644 index 00000000..a9fa62a1 --- /dev/null +++ b/cdk/cdk-domainless-mode/tests/setup_windows_instance.py @@ -0,0 +1,108 @@ +import boto3 +import os +import json +from parse_data_from_json import (domain_admin_password, directory_name, + netbios_name, number_of_gmsa_accounts, + bucket_name, region, instance_name) + +""" +This script sets up the EC2 Windows instance. + +This script performs the following operations: + +1. Retrieves an EC2 instance ID based on a tag name. +2. Reads a PowerShell script ('gmsa.ps1') from the directory. +3. Executes the PowerShell script on the specified EC2 instance using AWS Systems Manager (SSM). +4. The Powershell script does the following: + - Installs AD management tools and related PowerShell modules. + - Creates a new Organizational Unit (OU) in Active Directory. + - Creates a new security group for gMSA account management. + - Creates a new standard user account. + - Adds members to the security group for gMSA password retrieval. + - Creates multiple Group Managed Service Accounts (gMSA) based on a specified count. + - Configures SQL Server firewall rules: + - Allows inbound traffic on ports 1433 (TCP) and 1434 (UDP). + - Sets up rules for RDP and SQL Server access. + - Creates a new SQL Server database named 'EmployeesDB'. + - Creates a table 'EmployeesTable' and inserts sample data. + - Alters the database authorization to allow access from a gMSA account. + +It's designed to automate the process of running a PowerShell script on a Windows EC2 instance, +for configuring Group Managed Service Accounts (gMSA) and related AWS resources. + +""" + +def run_powershell_script(instance_id, script_path): + + with open(script_path, 'r') as file: + script_content = file.read() + + script_content = script_content.replace("INPUTPASSWORD", + domain_admin_password) + script_content = script_content.replace("DOMAINNAME", directory_name) + script_content = script_content.replace("NETBIOS_NAME", netbios_name) + script_content = script_content.replace("NUMBER_OF_GMSA_ACCOUNTS", str(number_of_gmsa_accounts)) + script_content = script_content.replace("BUCKET_NAME", bucket_name) + + ssm = boto3.client('ssm') + + response = ssm.send_command( + InstanceIds=[instance_id], + DocumentName="AWS-RunPowerShellScript", + Parameters={'commands': [script_content]} + ) + + command_id = response['Command']['CommandId'] + + waiter = ssm.get_waiter('command_executed') + try: + waiter.wait( + CommandId=command_id, + InstanceId=instance_id, + WaiterConfig={ + 'Delay': 30, + 'MaxAttempts': 50 + } + ) + except Exception as e: + print(f"Command failed: {script_content}") + print(f"Error: {str(e)}") + raise + + output = ssm.get_command_invocation( + CommandId=command_id, + InstanceId=instance_id + ) + + print(f"Command output:\n{output.get('StandardOutputContent', '')}") + + if output['Status'] == 'Success': + print(f"Command status: Success") + + if output['Status'] != 'Success': + print(f"Command failed: {script_content}") + print(f"Error: {output['StandardErrorContent']}") + raise Exception(f"Command execution failed: {script_content}") + +def get_instance_id_by_name(region, instance_name): + + ec2 = boto3.client('ec2', region_name=region) + response = ec2.describe_instances( + Filters=[ + { + 'Name': 'tag:Name', + 'Values': [instance_name] + }, + { + 'Name': 'instance-state-name', + 'Values': ['running'] + } + ] + ) + + for reservation in response['Reservations']: + for instance in reservation['Instances']: + return instance['InstanceId'] + + return None + diff --git a/cdk/cdk-domainless-mode/tests/unit/__init__.py b/cdk/cdk-domainless-mode/tests/unit/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/cdk/cdk-domainless-mode/tests/unit/test_cdk_stack.py b/cdk/cdk-domainless-mode/tests/unit/test_cdk_stack.py deleted file mode 100644 index b266920e..00000000 --- a/cdk/cdk-domainless-mode/tests/unit/test_cdk_stack.py +++ /dev/null @@ -1,15 +0,0 @@ -import aws_cdk as core -import aws_cdk.assertions as assertions - -from cdk.cdk_stack import CdkStack - -# example tests. To run these tests, uncomment this file along with the example -# resource in cdk/cdk_stack.py -def test_sqs_queue_created(): - app = core.App() - stack = CdkStack(app, "cdk") - template = assertions.Template.from_stack(stack) - -# template.has_resource_properties("AWS::SQS::Queue", { -# "VisibilityTimeout": 300 -# }) diff --git a/cdk/cdk-domainless-mode/tests/update_inbound_rules.py b/cdk/cdk-domainless-mode/tests/update_inbound_rules.py new file mode 100644 index 00000000..52cca7d2 --- /dev/null +++ b/cdk/cdk-domainless-mode/tests/update_inbound_rules.py @@ -0,0 +1,100 @@ +import boto3 +import json +import os +from parse_data_from_json import stack_name, directory_name + +""" +This script executes the security group modification, enabling communication between the EC2 instance and the Active Directory. + +This script performs the following operations: + +Loads configuration from a 'data.json' file, including the Active Directory domain name and EC2 instance identifier. + +Defines a function 'add_security_group_to_instance' that: +a. Retrieves the AWS Directory Service details for the specified directory. +b. Identifies the security group associated with the directory. +c. Adds an inbound rule to the instance's security group, allowing all traffic from the directory's security group. + +""" + + +instance_name = stack_name + "/MyAutoScalingGroup" + +def add_security_group_to_instance(directory_name, instance_name): + + ds = boto3.client('ds') + ec2 = boto3.client('ec2') + + directories = ds.describe_directories()['DirectoryDescriptions'] + directory = next((d for d in directories if d['Name'] == directory_name), None) + + if not directory: + raise ValueError(f"Directory '{directory_name}' not found") + + directory_id = directory['DirectoryId'] + print(f"Found directory ID: {directory_id}") + + directory_details = ds.describe_directories(DirectoryIds=[directory_id])['DirectoryDescriptions'][0] + security_group_id = directory_details['VpcSettings']['SecurityGroupId'] + + response = ec2.describe_instances( + Filters=[ + { + 'Name': 'tag:Name', + 'Values': [instance_name] + }, + { + 'Name': 'instance-state-name', + 'Values': ['running'] + } + ] + ) + + if not response['Reservations']: + raise ValueError(f"No instances found with tag:Name '{instance_name}'") + + instances = response['Reservations'][0]['Instances'] + if not instances: + raise ValueError(f"No instances found in the reservation") + + instance = instances[0] + + if 'SecurityGroups' not in instance or not instance['SecurityGroups']: + raise ValueError(f"No security groups found for the instance") + + instance_sg_id = instance['SecurityGroups'][0]['GroupId'] + + # Check if the rule already exists + existing_rules = ec2.describe_security_group_rules( + Filters=[{'Name': 'group-id', 'Values': [instance_sg_id]}] + )['SecurityGroupRules'] + + rule_exists = any( + rule['IpProtocol'] == '-1' and + rule['FromPort'] == -1 and + rule['ToPort'] == -1 and + rule.get('ReferencedGroupInfo', {}).get('GroupId') == security_group_id + for rule in existing_rules + ) + + if rule_exists: + print(f"Rule already exists in security group {instance_sg_id}") + return + + # Add the new inbound rule to the security group + try: + ec2.authorize_security_group_ingress( + GroupId=instance_sg_id, + IpPermissions=[ + { + 'IpProtocol': '-1', # All traffic + 'FromPort': -1, # All ports + 'ToPort': -1, # All ports + 'UserIdGroupPairs': [{'GroupId': security_group_id}] + } + ] + ) + print(f"Successfully added inbound rule to security group {instance_sg_id}") + except Exception as e: + print(f"An error occurred: {str(e)}") + diff --git a/cdk/cdk-domainless-mode/tests/update_task_def_and_run_tasks.py b/cdk/cdk-domainless-mode/tests/update_task_def_and_run_tasks.py new file mode 100644 index 00000000..77596f6c --- /dev/null +++ b/cdk/cdk-domainless-mode/tests/update_task_def_and_run_tasks.py @@ -0,0 +1,113 @@ +import boto3 +import json +import os + +""" +AWS ECS Task Definition Update and Execution Script + +This script automates the process of updating and running ECS (Elastic Container Service) tasks. It performs the following operations: + +Task Definition Management: + +Identifies all task definition families matching a specified pattern. +For each task definition family: +a. Updates the task definition with a new Docker image URI from ECR. +b. Registers a new revision of the task definition with ECS. + +Task Execution: + +For each updated task definition: +a. Attempts to run a new task in the specified ECS cluster. +b. Configures the task with the appropriate network settings (subnets and security group). + +""" + +def update_task_definition_image(task_definition_family, repository_name, tag, region): + ecs_client = boto3.client('ecs', region_name=region) + ecr_client = boto3.client('ecr', region_name=region) + response = ecr_client.describe_repositories(repositoryNames=[repository_name]) + repository_uri = response['repositories'][0]['repositoryUri'] + image_uri = f"{repository_uri}:{tag}" + + # Get the current task definition + response = ecs_client.describe_task_definition(taskDefinition=task_definition_family) + task_definition = response['taskDefinition'] + + # Update the container definition with the new image URI + container_definitions = task_definition['containerDefinitions'] + for container in container_definitions: + if container['name'] == 'MyContainer': + container['image'] = image_uri + + # Prepare arguments for register_task_definition + register_args = { + 'family': task_definition['family'], + 'taskRoleArn': task_definition['taskRoleArn'], + 'executionRoleArn': task_definition['executionRoleArn'], + 'networkMode': task_definition['networkMode'], + 'containerDefinitions': container_definitions, + 'volumes': task_definition.get('volumes', []), + 'placementConstraints': task_definition.get('placementConstraints', []), + 'requiresCompatibilities': task_definition['requiresCompatibilities'], + 'cpu': task_definition['cpu'], + 'memory': task_definition['memory'], + } + + if 'tags' in task_definition and task_definition['tags']: + register_args['tags'] = task_definition['tags'] + + # Register the new task definition and get the revised ARN + new_task_definition = ecs_client.register_task_definition(**register_args) + + revised_arn = new_task_definition['taskDefinition']['taskDefinitionArn'] + + print(f"New task definition registered for {task_definition_family}") + print(f"Revised ARN: {revised_arn}") + + return revised_arn + +def run_task(ecs_client, cluster_name, task_definition, subnet_ids, security_group_id): + + try: + task_def_description = ecs_client.describe_task_definition(taskDefinition=task_definition) + container_defs = task_def_description['taskDefinition']['containerDefinitions'] + + # Check if any container in the task definition has a credentialSpecs field + has_cred_specs = any('credentialSpecs' in container and container['credentialSpecs'] + for container in container_defs) + + if not has_cred_specs: + print(f"Skipping task definition {task_definition} as it does not have credentialSpecs") + return None + + task = ecs_client.run_task( + cluster=cluster_name, + taskDefinition=task_definition, + count=1, + launchType='EC2', + networkConfiguration={ + 'awsvpcConfiguration': { + 'subnets': subnet_ids, + 'securityGroups': [security_group_id], + } + } + ) + print(f"Started task: {json.dumps(task['tasks'][0]['taskArn'], default=str)}") + return task['tasks'][0]['taskArn'] + except ecs_client.exceptions.ClientException as e: + print(f"Error starting task for {task_definition}: {str(e)}") + return None + +def get_task_definition_families(ecs_client, pattern): + paginator = ecs_client.get_paginator('list_task_definitions') + task_families = set() + + for page in paginator.paginate(): + for arn in page['taskDefinitionArns']: + if pattern in arn: + family = arn.split('/')[1].split(':')[0] + task_families.add(family) + + return list(task_families) + + diff --git a/common/daemon.h b/common/daemon.h index 1d9f635a..1f725555 100644 --- a/common/daemon.h +++ b/common/daemon.h @@ -89,16 +89,10 @@ class CF_logger log_level = _log_level; } - void write_log( const char* format, ... ) + void write_log( const char* message ) { const int max_log_len = 10 * 1024 * 1024; // 10 MB - char buffer[256]; - va_list args; - va_start( args, format ); - vsnprintf( buffer, 255, format, args ); - va_end( args ); - int fd = open( "/var/credentials-fetcher/logging/credentials-fetcher.log", O_RDWR ); struct stat st; if ( fstat( fd, &st ) < 0 ) @@ -123,31 +117,21 @@ class CF_logger struct tm* local_time = localtime( ¤t_time ); char time_buffer[80]; strftime( time_buffer, 80, "%Y-%m-%d %H:%M:%S", local_time ); - fprintf( fp, "%s: ", time_buffer ); - fprintf( fp, format, buffer ); - fprintf( fp, "\n" ); + fprintf( fp, "%s: %s \n", time_buffer, message ); fclose( fp ); } - std::string log_buf = std::string( buffer, strlen( buffer ) ); + std::string log_buf = std::string( message ); log_ring_buffer[log_buffer_count] = log_buf; log_buffer_count = ( log_buffer_count + 1 ) % MAX_LOG_BUFFER_COUNT; close( fd ); } - template void logger( const int level, const char* fmt, Logs... logs ) + void logger( const int level, const char* logs ) { if ( level >= log_level ) { - std::string logFmt = fmt; - for ( int i = 0; logFmt[i] != '\0'; ++i ) - { - if ( logFmt[i] == '\n' ) - { - logFmt[i] = ' '; // Replace '\n' with space - } - } - sd_journal_print( level, logFmt.c_str(), logs... ); - write_log( logFmt.c_str(), logs... ); + sd_journal_print( level, "%s", logs ); + write_log( logs ); } } }; diff --git a/common/util.hpp b/common/util.hpp index 972afa89..5b15dd2f 100644 --- a/common/util.hpp +++ b/common/util.hpp @@ -603,10 +603,11 @@ class Util { std::string cmd; std::pair ldap_search_result; - + // -N: Do not use reverse DNS to canonicalize SASL host name. + // With this flag, ldapsearch uses the IP address directly for identification purposes, rather than trying to resolve it to a hostname. cmd = std::string( "ldapsearch -o ldif_wrap=no -LLL -Y GSSAPI -H ldap://" ) + fqdn; cmd += std::string( " -b '" ) + distinguished_name + std::string( "' " ) + search_string; - + cmd += std::string( " -N" ); std::cerr << Util::getCurrentTime() << '\t' << "INFO: " << cmd << std::endl; std::cerr << cmd << std::endl; @@ -960,11 +961,11 @@ class Util // truncate the hostname to the host name size limit defined by microsoft if ( host_name.length() > HOST_NAME_LENGTH_LIMIT ) { - cf_logger.logger( LOG_ERR, - "WARNING: %s:%d hostname exceeds 15 characters," - "this can cause problems in getting kerberos tickets, please reduce " - "hostname length", - __func__, __LINE__ ); + std::string log_message = "WARNING: " + std::string( __func__ ) + " : " + + std::to_string( __LINE__ ) + + " hostname exceeds 15 characters, this can cause problems in " + "getting kerberos tickets, please reduce hostname length"; + cf_logger.logger( LOG_ERR, log_message.c_str() ); host_name = host_name.substr( 0, HOST_NAME_LENGTH_LIMIT ); std::cerr << Util::getCurrentTime() << '\t' << "INFO: hostname exceeds 15 characters this can " diff --git a/configuration/config.h.in b/configuration/config.h.in index e44aa17e..fe006d07 100644 --- a/configuration/config.h.in +++ b/configuration/config.h.in @@ -4,3 +4,12 @@ #cmakedefine CF_TEST_DOMAIN_NAME "@CF_TEST_DOMAIN_NAME@" #cmakedefine CF_TEST_GMSA_ACCOUNT "@CF_TEST_GMSA_ACCOUNT@" #cmakedefine CMAKE_PROJECT_VERSION "@CMAKE_PROJECT_VERSION@" + +// Integration Test constants +#cmakedefine CF_TEST_ACCESS_KEY_ID "@CF_TEST_ACCESS_KEY_ID@" +#cmakedefine CF_TEST_SECRET_ACCESS_KEY "@CF_TEST_SECRET_ACCESS_KEY@" +#cmakedefine CF_TEST_SESSION_TOKEN "@CF_TEST_SESSION_TOKEN@" +#cmakedefine CF_TEST_REGION "@CF_TEST_REGION@" +#cmakedefine CF_TEST_USERNAME "@CF_TEST_USERNAME@" +#cmakedefine CF_TEST_PASSWORD "@CF_TEST_PASSWORD@" +#cmakedefine CF_TEST_CREDSPEC_ARN "@CF_TEST_CREDSPEC_ARN@" diff --git a/daemon/src/daemon.cpp b/daemon/src/daemon.cpp index 04130913..aa9d974d 100644 --- a/daemon/src/daemon.cpp +++ b/daemon/src/daemon.cpp @@ -1,10 +1,10 @@ #include "daemon.h" +#include "util.hpp" +#include #include #include #include #include -#include -#include "util.hpp" Daemon cf_daemon; @@ -19,7 +19,7 @@ static const char* grpc_thread_name = "grpc_thread"; static void systemd_shutdown_signal_catcher( int signo ) { - printf("Credentials-fetcher shutdown: Caught signo %d\n", signo); + printf( "Credentials-fetcher shutdown: Caught signo %d\n", signo ); cf_daemon.got_systemd_shutdown_signal = 1; } @@ -138,18 +138,20 @@ std::pair create_pthread( void* ( *func )(void*), const char* pthrea return std::make_pair( EXIT_SUCCESS, tinfo ); } -int parse_cred_file_path(const std::string& cred_file_path, std::string& cred_file, std::string& cred_file_lease_id ) +int parse_cred_file_path( const std::string& cred_file_path, std::string& cred_file, + std::string& cred_file_lease_id ) { size_t colon_delim_pos; char path_lease_delimiter = ':'; - if (cred_file_path.empty()) + if ( cred_file_path.empty() ) return EXIT_FAILURE; - colon_delim_pos = cred_file_path.find(path_lease_delimiter); + colon_delim_pos = cred_file_path.find( path_lease_delimiter ); - //If the : delimiter is not found, then assume the cred_file_path is just the path to cred spec file and use the default lease id - if (colon_delim_pos == std::string::npos) + // If the : delimiter is not found, then assume the cred_file_path is just the path to cred spec + // file and use the default lease id + if ( colon_delim_pos == std::string::npos ) { cred_file = cred_file_path; cred_file_lease_id = DEFAULT_CRED_FILE_LEASE_ID; @@ -157,8 +159,8 @@ int parse_cred_file_path(const std::string& cred_file_path, std::string& cred_fi return EXIT_SUCCESS; } - cred_file = cred_file_path.substr(0, colon_delim_pos); - cred_file_lease_id = cred_file_path.substr(colon_delim_pos+1); + cred_file = cred_file_path.substr( 0, colon_delim_pos ); + cred_file_lease_id = cred_file_path.substr( colon_delim_pos + 1 ); return EXIT_SUCCESS; } @@ -183,24 +185,27 @@ int main( int argc, const char* argv[] ) std::string log_msg = "Credentials-fetcher daemon has started running"; std::cerr << log_msg << std::endl; - cf_daemon.cf_logger.logger( LOG_ERR, "%s", log_msg.c_str()); - std::cerr << "on request failures check logs located at " + cf_daemon.logging_dir << std::endl;; + cf_daemon.cf_logger.logger( LOG_ERR, log_msg.c_str() ); + std::cerr << "on request failures check logs located at " + cf_daemon.logging_dir << std::endl; + ; - if ( getenv(ENV_CF_CRED_SPEC_FILE) != NULL) + if ( getenv( ENV_CF_CRED_SPEC_FILE ) != NULL ) { - int parseResult = parse_cred_file_path( getenv(ENV_CF_CRED_SPEC_FILE), - cred_file, - cred_file_lease_id); + int parseResult = + parse_cred_file_path( getenv( ENV_CF_CRED_SPEC_FILE ), cred_file, cred_file_lease_id ); - if (parseResult == EXIT_FAILURE) + if ( parseResult == EXIT_FAILURE ) { - std::cerr << "Failed parsing environment variable " << getenv(ENV_CF_CRED_SPEC_FILE) << std::endl; - cf_daemon.cf_logger.logger( LOG_ERR, "Failed parsing environment variable %s", getenv(ENV_CF_CRED_SPEC_FILE) ); + std::cerr << "Failed parsing environment variable " << getenv( ENV_CF_CRED_SPEC_FILE ) + << std::endl; + std::string log_message = "Failed parsing environment variable " + + std::string( getenv( ENV_CF_CRED_SPEC_FILE ) ); + cf_daemon.cf_logger.logger( LOG_ERR, log_message.c_str() ); - exit( EXIT_FAILURE); + exit( EXIT_FAILURE ); } - if (!std::filesystem::exists( cred_file)) + if ( !std::filesystem::exists( cred_file ) ) { cred_file_lease_id.clear(); std::cerr << "Ignoring CF_CREF_FILE, file " << cred_file << " not found" << std::endl; @@ -217,26 +222,36 @@ int main( int argc, const char* argv[] ) */ cf_daemon.domain_name = CF_TEST_DOMAIN_NAME; cf_daemon.gmsa_account_name = CF_TEST_GMSA_ACCOUNT; + std::string log_message; std::cerr << "krb_files_dir = " << cf_daemon.krb_files_dir << std::endl; - //std::cerr << "cred_file = " << cf_daemon.cred_file << " (lease id: " << cred_file_lease_id - // << ")" << std::endl; + log_message = "krb_files_dir = " + cf_daemon.krb_files_dir; + cf_daemon.cf_logger.logger( LOG_ERR, log_message.c_str() ); + + std::cerr << "cred_file = " << cf_daemon.cred_file << " (lease id: " << cred_file_lease_id + << ")" << std::endl; + log_message = "cred_file = " + cf_daemon.cred_file + " (lease id: " + cred_file_lease_id + ")"; + cf_daemon.cf_logger.logger( LOG_ERR, log_message.c_str() ); + std::cerr << "logging_dir = " << cf_daemon.logging_dir << std::endl; + log_message = "logging_dir = " + cf_daemon.logging_dir; + cf_daemon.cf_logger.logger( LOG_ERR, log_message.c_str() ); + std::cerr << "unix_socket_dir = " << cf_daemon.unix_socket_dir << std::endl; + log_message = "unix_socket_dir = " + cf_daemon.unix_socket_dir; + cf_daemon.cf_logger.logger( LOG_ERR, log_message.c_str() ); if ( cf_daemon.run_diagnostic ) { - exit( read_meta_data_json_test() || - read_meta_data_invalid_json_test() || renewal_failure_krb_dir_not_found_test() || - write_meta_data_json_test() ); + exit( read_meta_data_json_test() || read_meta_data_invalid_json_test() || + renewal_failure_krb_dir_not_found_test() || write_meta_data_json_test() ); } struct sigaction sa; cf_daemon.got_systemd_shutdown_signal = 0; memset( &sa, 0, sizeof( struct sigaction ) ); sa.sa_handler = &systemd_shutdown_signal_catcher; - if ( ( sigaction( SIGTERM, &sa, NULL ) == -1 ) || - ( sigaction( SIGINT, &sa, NULL ) == -1 ) || + if ( ( sigaction( SIGTERM, &sa, NULL ) == -1 ) || ( sigaction( SIGINT, &sa, NULL ) == -1 ) || ( sigaction( SIGHUP, &sa, NULL ) == -1 ) ) { perror( "sigaction" ); @@ -247,40 +262,64 @@ int main( int argc, const char* argv[] ) // 1. Systemd - daemon // 2. grpc server // 3. timer to run every 45 min + if ( !cf_daemon.cred_file.empty() ) + { + log_message = "Credential file exists " + cf_daemon.cred_file; + cf_daemon.cf_logger.logger( LOG_INFO, log_message.c_str() ); - if ( !cf_daemon.cred_file.empty() ) { - cf_daemon.cf_logger.logger( LOG_INFO, "Credential file exists %s", cf_daemon.cred_file.c_str() ); - - int specFileReturn = ProcessCredSpecFile(cf_daemon.krb_files_dir, cf_daemon.cred_file, cf_daemon.cf_logger, cred_file_lease_id); - if (specFileReturn == EXIT_FAILURE) { + int specFileReturn = ProcessCredSpecFile( cf_daemon.krb_files_dir, cf_daemon.cred_file, + cf_daemon.cf_logger, cred_file_lease_id ); + if ( specFileReturn == EXIT_FAILURE ) + { std::cerr << "ProcessCredSpecFile() non 0 " << std::endl; exit( EXIT_FAILURE ); } } - + /* Create one pthread for gRPC processing */ - pthread_status = - create_pthread( grpc_thread_start, grpc_thread_name, -1 ); + pthread_status = create_pthread( grpc_thread_start, grpc_thread_name, -1 ); if ( pthread_status.first < 0 ) { - cf_daemon.cf_logger.logger( LOG_ERR, "Error %d: Cannot create pthreads", - pthread_status.first ); + log_message = + "Error " + std::to_string( pthread_status.first ) + ": Cannot create pthreads"; + cf_daemon.cf_logger.logger( LOG_ERR, log_message.c_str() ); exit( EXIT_FAILURE ); } grpc_pthread = pthread_status.second; - cf_daemon.cf_logger.logger( LOG_INFO, "grpc pthread is at %p", grpc_pthread ); + if ( grpc_pthread == nullptr ) + { + log_message = "Warning: grpc_pthread is null"; + } + else + { + std::ostringstream address_stream; + address_stream << grpc_pthread; + log_message = "grpc pthread is at " + address_stream.str(); + } + cf_daemon.cf_logger.logger( LOG_INFO, log_message.c_str() ); /* Create pthread for refreshing krb tickets */ pthread_status = create_pthread( refresh_krb_tickets_thread_start, "krb_ticket_refresh_thread", -1 ); if ( pthread_status.first < 0 ) { - cf_daemon.cf_logger.logger( LOG_ERR, "Error %d: Cannot create pthreads", - pthread_status.first ); + log_message = + "Error " + std::to_string( pthread_status.first ) + ": Cannot create pthreads"; + cf_daemon.cf_logger.logger( LOG_ERR, log_message.c_str() ); exit( EXIT_FAILURE ); } krb_refresh_pthread = pthread_status.second; - cf_daemon.cf_logger.logger( LOG_INFO, "krb refresh pthread is at %p", krb_refresh_pthread ); + if ( krb_refresh_pthread == nullptr ) + { + log_message = "Warning: krb_refresh_pthread is null"; + } + else + { + std::ostringstream address_stream; + address_stream << krb_refresh_pthread; + log_message = "krb refresh pthread is at " + address_stream.str(); + } + cf_daemon.cf_logger.logger( LOG_INFO, log_message.c_str() ); cf_daemon.cf_logger.set_log_level( LOG_NOTICE ); @@ -328,17 +367,16 @@ int main( int argc, const char* argv[] ) i ); // TBD: Remove later, visible in systemctl status ++i; #ifdef EXIT_USING_FILE - struct stat st; - if ( lstat( "/tmp/credentials_fetcher_exit.txt", &st ) != -1 ) - { - if (S_ISREG(st.st_mode)) - { - cf_daemon.got_systemd_shutdown_signal = 1; - } - } + struct stat st; + if ( lstat( "/tmp/credentials_fetcher_exit.txt", &st ) != -1 ) + { + if ( S_ISREG( st.st_mode ) ) + { + cf_daemon.got_systemd_shutdown_signal = 1; + } + } #endif } return EXIT_SUCCESS; } - diff --git a/package/credentials-fetcher.spec b/package/credentials-fetcher.spec index 8abf7ddb..81b4ee19 100644 --- a/package/credentials-fetcher.spec +++ b/package/credentials-fetcher.spec @@ -1,6 +1,6 @@ %global major_version 1 %global minor_version 3 -%global patch_version 6 +%global patch_version 65 # For handling bump release by rpmdev-bumpspec and mass rebuild %global baserelease 0 @@ -12,7 +12,7 @@ Summary: credentials-fetcher is a daemon that refreshes tickets or tokens License: Apache-2.0 URL: https://github.com/aws/credentials-fetcher -Source0: credentials-fetcher-v.1.3.6.tar.gz +Source0: credentials-fetcher-v.1.3.65.tar.gz BuildRequires: cmake3 make chrpath openldap-clients grpc-devel gcc-c++ glib2-devel jsoncpp-devel BuildRequires: openssl-devel zlib-devel protobuf-devel re2-devel krb5-devel systemd-devel @@ -22,14 +22,7 @@ BuildRequires: systemd-rpm-macros grpc-plugins BuildRequires: aws-sdk-cpp-devel aws-sdk-cpp aws-sdk-cpp-static %endif -# fedora41 does not support .NET6 -%if 0%{?fedora} >= 41 BuildRequires: dotnet-sdk-8.0 -Requires: dotnet-runtime-8.0 -%else -BuildRequires: dotnet-sdk-6.0 -Requires: dotnet-runtime-6.0 -%endif Requires: bind-utils openldap openldap-clients awscli jsoncpp # No one likes you i686 @@ -71,8 +64,7 @@ ctest3 %license LICENSE # https://docs.fedoraproject.org/en-US/packaging-guidelines/LicensingGuidelines/ %doc CONTRIBUTING.md NOTICE README.md -%attr(0700, -, -) %{_sbindir}/credentials_fetcher_utf16_private.exe -%attr(0700, -, -) %{_sbindir}/credentials_fetcher_utf16_private.runtimeconfig.json +%attr(0700, -, -) %{_sbindir}/credentials_fetcher_utf16_private %attr(0755, -, -) %{_sbindir}/krb5.conf %changelog diff --git a/renewal/src/renewal.cpp b/renewal/src/renewal.cpp index 76695b96..0884bff5 100644 --- a/renewal/src/renewal.cpp +++ b/renewal/src/renewal.cpp @@ -47,6 +47,7 @@ int krb_ticket_renew_handler( Daemon cf_daemon ) { std::list krb_ticket_info_list = read_meta_data_json( file_path ); + std::string log_message; // refresh the kerberos tickets for the service accounts, if tickets ready for // renewal @@ -69,9 +70,9 @@ int krb_ticket_renew_handler( Daemon cf_daemon ) if ( gmsa_ticket_result.first != 0 ) { std::pair status; - cf_logger.logger( - LOG_ERR, "ERROR: Cannot get gMSA krb ticket using account %s", - krb_ticket->service_account_name.c_str() ); + log_message = "ERROR: Cannot get gMSA krb ticket using account " + + krb_ticket->service_account_name; + cf_logger.logger( LOG_ERR, log_message.c_str() ); if ( domainless_user.find( "awsdomainlessusersecret" ) != std::string::npos ) { @@ -87,9 +88,9 @@ int krb_ticket_renew_handler( Daemon cf_daemon ) } if ( status.first < 0 ) { - cf_logger.logger( LOG_ERR, - "Error %d: Cannot get machine krb ticket", - status ); + log_message = "Error " + std::to_string( status.first ) + + ": Cannot get machine krb ticket"; + cf_logger.logger( LOG_ERR, log_message.c_str() ); } else { @@ -100,7 +101,8 @@ int krb_ticket_renew_handler( Daemon cf_daemon ) } else { - cf_logger.logger( LOG_INFO, "gMSA ticket is at %s", krb_cc_name.c_str() ); + log_message = "gMSA ticket is at " + krb_cc_name; + cf_logger.logger( LOG_INFO, log_message.c_str() ); } } } diff --git a/rpm/credentials-fetcher-1.3.61-0.amzn2023.x86_64.rpm b/rpm/credentials-fetcher-1.3.61-0.amzn2023.x86_64.rpm new file mode 100644 index 00000000..681fdeb7 Binary files /dev/null and b/rpm/credentials-fetcher-1.3.61-0.amzn2023.x86_64.rpm differ diff --git a/sample/java-sample-app/README.md b/sample/java-sample-app/README.md new file mode 100644 index 00000000..6f060fa5 --- /dev/null +++ b/sample/java-sample-app/README.md @@ -0,0 +1,53 @@ +# SQL Server Kerberos Authentication Java Application + +A simple Java application that connects to Microsoft SQL Server using Kerberos authentication and executes a simple Select command on the database. + +## Prerequisites + +- Java 11 or higher +- Microsoft JDBC Driver for SQL Server (JDBC driver used to create this app is ```mssql-jdbc-12.8.1.jre8.jar```) +- Valid Kerberos ticket (can be verified using `klist`) +- SQL Server configured for Kerberos authentication + +## Files Required + +1. `SQLServerKerberosConnection.class` - The compiled Java class file +2. `mssql-jdbc-12.8.1.jre8.jar` - Microsoft JDBC driver for SQL Server + +## Environment Setup + +1. Ensure you have a valid Kerberos ticket: +``` +klist +``` + +2. set the ```KRB5CCNAME``` in the environment to the krb5cc directory of a ticket. For example: +``` +export KRB5CCNAME=/var/credentials-fetcher/krbdir//WebApp01/krb5cc +``` +Verify that the variable has been set correctly using ```echo $KRB5CCNAME``` + +3. Compile and run the Java file like so +``` +javac --release 11 -cp .:mssql-jdbc-12.8.1.jre8.jar SQLServerKerberosConnection.java +java -cp .:mssql-jdbc-12.8.1.jre8.jar SQLServerKerberosConnection +``` + +4. You should see the following output +``` +Connected successfully using Kerberos authentication. ++---------------------------+---------------------------+---------------------------+---------------------------+---------------------------+ +| EmpID | EmpName | Designation | Department | JoiningDate | ++---------------------------+---------------------------+---------------------------+---------------------------+---------------------------+ +| 1 | CHIN YEN | LAB ASSISTANT | LAB | 2022-03-05 03:57:09.967 | ++---------------------------+---------------------------+---------------------------+---------------------------+---------------------------+ +| 2 | MIKE PEARL | SENIOR ACCOUNTANT | ACCOUNTS | 2022-03-05 03:57:09.967 | ++---------------------------+---------------------------+---------------------------+---------------------------+---------------------------+ +| 3 | GREEN FIELD | ACCOUNTANT | ACCOUNTS | 2022-03-05 03:57:09.967 | ++---------------------------+---------------------------+---------------------------+---------------------------+---------------------------+ +| 4 | DEWANE PAUL | PROGRAMMER | IT | 2022-03-05 03:57:09.967 | ++---------------------------+---------------------------+---------------------------+---------------------------+---------------------------+ +| 5 | MATTS | SR. PROGRAMMER | IT | 2022-03-05 03:57:09.967 | ++---------------------------+---------------------------+---------------------------+---------------------------+---------------------------+ +| 6 | PLANK OTO | ACCOUNTANT | ACCOUNTS | 2022-03-05 03:57:09.967 | +``` \ No newline at end of file diff --git a/sample/java-sample-app/SQLServerKerberosConnection.java b/sample/java-sample-app/SQLServerKerberosConnection.java new file mode 100644 index 00000000..1bae570f --- /dev/null +++ b/sample/java-sample-app/SQLServerKerberosConnection.java @@ -0,0 +1,64 @@ +import java.sql.*; + +// Replace user, domain name, and server name before compiling + +public class SQLServerKerberosConnection { + public static void main(String[] args) { + + System.setProperty("java.security.krb5.principal", "@"); + + String connectionUrl = "jdbc:sqlserver://:1433;" + + "databaseName=EmployeesDB;" + + "integratedSecurity=true;" + + "authenticationScheme=JavaKerberos;" + + "userName=@;" + + "serverSpn=MSSQLSvc/:1433;" + + "trustServerCertificate=true"; + + try { + // Ensure the JDBC driver is loaded + Class.forName("com.microsoft.sqlserver.jdbc.SQLServerDriver"); + + // Establish the connection + try (Connection connection = DriverManager.getConnection(connectionUrl)) { + System.out.println("Connected successfully using Kerberos authentication."); + + // Perform a simple query + try (Statement statement = connection.createStatement(); + ResultSet resultSet = statement.executeQuery("SELECT * from EmployeesDB.dbo.EmployeesTable")) { + + ResultSetMetaData metaData = resultSet.getMetaData(); + int columnCount = metaData.getColumnCount(); + + String[] columns = new String[columnCount]; + for (int i = 0; i < columnCount; i++) { + columns[i] = metaData.getColumnName(i + 1); + } + printRow(columns); + + // Display data rows + while (resultSet.next()) { + String[] row = new String[columnCount]; + for (int i = 0; i < columnCount; i++) { + row[i] = resultSet.getString(i + 1); + } + printRow(row); + } + } + } + } catch (ClassNotFoundException e) { + System.err.println("Error loading JDBC driver: " + e.getMessage()); + } catch (SQLException e) { + System.err.println("Error connecting to the database: " + e.getMessage()); + } + } + + + private static void printRow(String[] row) { + System.out.println("+---------------------------".repeat(row.length) + "+"); + for (String col : row) { + System.out.printf("| %-25s ", col != null ? col : "NULL"); + } + System.out.println("|"); + } +} diff --git a/setup-scripts/README.md b/setup-scripts/README.md new file mode 100644 index 00000000..c28122a0 --- /dev/null +++ b/setup-scripts/README.md @@ -0,0 +1,5 @@ +## Setup Scripts +Setup scripts are meant to compile and build credentials-fetcher along with +all its dependencies. These can be used to setup a development environment +to build and test changes. You can choose to either setup a docker container +or setup the dependencies on the instance itself. \ No newline at end of file diff --git a/docker/Dockerfile-ubuntu-20.04 b/setup-scripts/docker-scripts/Dockerfile-ubuntu-20.04 similarity index 96% rename from docker/Dockerfile-ubuntu-20.04 rename to setup-scripts/docker-scripts/Dockerfile-ubuntu-20.04 index 646d0ba8..6010426f 100644 --- a/docker/Dockerfile-ubuntu-20.04 +++ b/setup-scripts/docker-scripts/Dockerfile-ubuntu-20.04 @@ -7,7 +7,7 @@ RUN apt-get update \ apt install -y git clang wget curl autoconf \ libglib2.0-dev libboost-dev libkrb5-dev libsystemd-dev libssl-dev \ libboost-program-options-dev libboost-filesystem-dev byacc make libjsoncpp-dev \ - clang-12 + clang-12 libgtest-dev RUN ln -sf /usr/lib/llvm-12/bin/clang /usr/bin/clang \ && ln -sf /usr/lib/llvm-12/bin/clang++ /usr/bin/clang++ @@ -30,7 +30,7 @@ RUN cd /root && wget https://packages.microsoft.com/config/ubuntu/20.04/packages && dpkg -i packages-microsoft-prod.deb \ && rm packages-microsoft-prod.deb \ && apt-get update \ - && apt-get install -y dotnet-sdk-6.0 \ + && apt-get install -y dotnet-sdk-8.0 \ && ln -s '/usr/share/dotnet' '/usr/lib/dotnet' RUN git clone https://github.com/aws/credentials-fetcher /root/credentials-fetcher \ diff --git a/docker/Dockerfile-ubuntu-22.04 b/setup-scripts/docker-scripts/Dockerfile-ubuntu-22.04 similarity index 96% rename from docker/Dockerfile-ubuntu-22.04 rename to setup-scripts/docker-scripts/Dockerfile-ubuntu-22.04 index f10715e3..e90d0335 100644 --- a/docker/Dockerfile-ubuntu-22.04 +++ b/setup-scripts/docker-scripts/Dockerfile-ubuntu-22.04 @@ -6,7 +6,8 @@ RUN apt-get update \ && DEBIAN_FRONTEND="noninteractive" TZ="${TIME_ZONE}" \ apt install -y git clang wget curl autoconf \ libglib2.0-dev libboost-dev libkrb5-dev libsystemd-dev libssl-dev \ - libboost-program-options-dev libboost-filesystem-dev byacc make libjsoncpp-dev + libboost-program-options-dev libboost-filesystem-dev byacc make libjsoncpp-dev \ + libgtest-dev RUN git clone https://github.com/Kitware/CMake.git -b release /root/CMake \ && cd /root/CMake && ./configure && make -j4 && pwd && make install @@ -28,7 +29,7 @@ RUN cd /root && wget https://packages.microsoft.com/config/ubuntu/20.04/packages && apt remove 'dotnet*' 'aspnetcore*' 'netstandard*' \ && rm /etc/apt/sources.list.d/microsoft-prod.list \ && apt update \ - && apt-get install -y dotnet-sdk-6.0 + && apt-get install -y dotnet-sdk-8.0 #RUN git clone -b credentials-fetcher-credfile https://github.com/fordth/credentials-fetcher /root/credentials-fetcher \ RUN git clone https://github.com/aws/credentials-fetcher /root/credentials-fetcher \ diff --git a/setup-scripts/shell-scripts/ubuntu-22.04-setup.sh b/setup-scripts/shell-scripts/ubuntu-22.04-setup.sh new file mode 100644 index 00000000..78716fb9 --- /dev/null +++ b/setup-scripts/shell-scripts/ubuntu-22.04-setup.sh @@ -0,0 +1,111 @@ +#!/bin/bash + +# Run as root +if [ "$EUID" -ne 0 ] + then echo "Please run as root" + exit +fi + +# Set timezone +TIME_ZONE="UTC" + + +USER_DIR="/home/ubuntu" # Default directory + +echo "Do you want to use a different directory instead of /home/ubuntu? (y/n)" +read response + +if [[ $response =~ ^[Yy]$ ]]; then + echo "Please enter the directory path:" + read user_input + + if [ -d "$user_input" ]; then + USER_DIR="$user_input" + else + echo "Warning: The directory $user_input does not exist. Using default: $USER_DIR" + fi +fi + +cd "$USER_DIR" + +echo "Installing dependencies for credentials-fetcher" +apt-get update \ + && DEBIAN_FRONTEND="noninteractive" TZ="${TIME_ZONE}" \ + apt install -y git clang wget curl autoconf \ + libglib2.0-dev libboost-dev libkrb5-dev libsystemd-dev libssl-dev \ + libboost-program-options-dev libboost-filesystem-dev byacc make \ + libjsoncpp-dev libgtest-dev pip python3.10-venv \ + libsasl2-modules-gssapi-mit:amd64 ldap-utils krb5-config awscli + + +git clone https://github.com/Kitware/CMake.git -b release \ + && cd CMake && ./configure && make -j4 && pwd && make install + +if [ $? -ne 0 ]; then + echo "error: Cmake installation failed" + exit 1 +else + echo "CMake successfully installed, now installing krb5" +fi + +cd "$USER_DIR" + + +git clone https://github.com/krb5/krb5.git -b krb5-1.21.2-final \ + && cd krb5/src && autoconf && autoreconf && ./configure && make -j4 && make install + +if [ $? -ne 0 ]; then + echo "error: krb5 installation failed" + exit 1 +else + echo "krb5 successfully installed, now installing grpc" +fi + +cd "$USER_DIR" + +git clone --recurse-submodules -b v1.58.0 https://github.com/grpc/grpc && mkdir -p grpc/build && cd grpc/build && cmake -DgRPC_INSTALL=ON -DgRPC_BUILD_TESTS=OFF -DCMAKE_CXX_STANDARD=17 ../ && make -j4 && make install + +cd "$USER_DIR" + +mkdir -p grpc/cmake/build && cd grpc/cmake/build \ + && cmake -DgRPC_BUILD_TESTS=ON ../.. && make grpc_cli \ + && cp grpc_cli /usr/local/bin + +if [ $? -ne 0 ]; then + echo "error: grpc installation failed" + exit 1 +else + echo "grpc successfully installed, now installing Microsoft packages" +fi + +cd "$USER_DIR" + +wget https://packages.microsoft.com/config/ubuntu/20.04/packages-microsoft-prod.deb -O packages-microsoft-prod.deb \ + && DEBIAN_FRONTEND=noninteractive dpkg -i packages-microsoft-prod.deb \ + && rm packages-microsoft-prod.deb \ + && apt-get remove -y 'dotnet*' 'aspnetcore*' 'netstandard*' \ + && rm /etc/apt/sources.list.d/microsoft-prod.list \ + && apt-get update -y \ + && apt-get install -y dotnet-sdk-8.0 + +mkdir -p /usr/lib64/glib-2.0/ && ln -s '/usr/lib/x86_64-linux-gnu/glib-2.0/include/' '/usr/lib64/glib-2.0/include' && ln -s '/usr/include/jsoncpp/json/' '/usr/include/json' + +mkdir -p /var/credentials-fetcher/logging +mkdir -p /var/credentials-fetcher/socket +mkdir -p /var/credentials-fetcher/krbdir + +if [ $? -ne 0 ]; then + echo "error: Microsoft packages installation failed" + exit 1 +else + echo "Microsoft packages successfully installed. Please follow the instructions in the setup doc to clone the repo and build it" +fi + +export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib + +cd "$USER_DIR" +git clone -b dev https://github.com/aws/credentials-fetcher.git # update branch as needed +mkdir -p credentials-fetcher/build +cd credentials-fetcher/build +cmake ../ && make -j4 && make install + diff --git a/test/tester.cpp b/test/tester.cpp new file mode 100644 index 00000000..78ccb716 --- /dev/null +++ b/test/tester.cpp @@ -0,0 +1,7 @@ +#include + +#include "daemon.h" + +TEST(DaemonTest, InvalidCharacterTest) { + ASSERT_EQ(contains_invalid_characters("abcdef"), 0); +}