From 8e1559b776c430deb738a4991f470725de0aa413 Mon Sep 17 00:00:00 2001 From: Yogiraj Awati Date: Wed, 7 Mar 2018 21:55:01 -0800 Subject: [PATCH] Release commit v2.1.0 --- .github/PULL_REQUEST_TEMPLATE.md | 6 + .gitignore | 21 + CHANGELOG.md | 7 + CODE_OF_CONDUCT.md | 4 + CONTRIBUTING.md | 61 ++ LICENSE | 202 ++++++ NOTICE | 2 + README.md | 78 +++ Tool/src/packaging/debian/build_deb_linux.sh | 71 ++ Tool/src/packaging/debian/changelog | 6 + Tool/src/packaging/debian/changelog.Debian | 4 + Tool/src/packaging/debian/conffiles | 3 + Tool/src/packaging/debian/control | 9 + Tool/src/packaging/debian/debian-binary | 1 + Tool/src/packaging/debian/lintian-overrides | 7 + Tool/src/packaging/debian/postinst | 10 + Tool/src/packaging/debian/preinst | 13 + Tool/src/packaging/debian/prerm | 10 + Tool/src/packaging/debian/xray.conf | 7 + Tool/src/packaging/debian/xray.service | 17 + Tool/src/packaging/linux/README.md | 1 + Tool/src/packaging/linux/build_rpm_linux.sh | 27 + Tool/src/packaging/linux/build_zip_linux.sh | 13 + Tool/src/packaging/linux/xray.conf | 12 + Tool/src/packaging/linux/xray.service | 16 + Tool/src/packaging/linux/xray.spec | 64 ++ Tool/src/packaging/osx/build_zip_osx.sh | 13 + Tool/src/packaging/windows/build_zip_win.sh | 22 + Tool/src/versiongenerator/version-gen.go | 20 + VERSION | 1 + daemon/bufferpool/bufferpool.go | 111 ++++ daemon/bufferpool/bufferpool_test.go | 158 +++++ daemon/cfg.yaml | 29 + daemon/cfg/cfg.go | 359 ++++++++++ daemon/cfg/cfg_test.go | 625 ++++++++++++++++++ daemon/cli/cli.go | 93 +++ daemon/cli/cli_test.go | 223 +++++++ daemon/conn/conn.go | 127 ++++ daemon/conn/conn_test.go | 127 ++++ daemon/conn/xray_client.go | 82 +++ daemon/daemon.go | 336 ++++++++++ daemon/logger/log_config.go | 57 ++ daemon/logger/logger_test.go | 147 ++++ daemon/processor/batchprocessor.go | 143 ++++ daemon/processor/batchprocessor_test.go | 388 +++++++++++ daemon/processor/processor.go | 192 ++++++ daemon/processor/processor_test.go | 293 ++++++++ daemon/profiler/profiler.go | 45 ++ daemon/ringbuffer/ringbuffer.go | 81 +++ daemon/ringbuffer/ringbuffer_test.go | 139 ++++ daemon/socketconn/socketconn.go | 19 + daemon/socketconn/udp/udp.go | 57 ++ daemon/telemetry/telemetry.go | 338 ++++++++++ daemon/telemetry/telemetry_test.go | 156 +++++ daemon/tracesegment/tracesegment.go | 45 ++ daemon/tracesegment/tracesegment_test.go | 102 +++ daemon/tracesegment/tracesegment_test_util.go | 34 + daemon/tracing.go | 37 ++ daemon/tracing_windows.go | 64 ++ daemon/util/test/log_writer.go | 27 + daemon/util/test/mock_timer_client.go | 107 +++ daemon/util/test/mock_timer_client_test.go | 250 +++++++ daemon/util/timer/timer.go | 22 + daemon/util/util.go | 49 ++ daemon/util/util_test.go | 83 +++ glide.lock | 69 ++ glide.yaml | 12 + makefile | 93 +++ 68 files changed, 6047 insertions(+) create mode 100644 .github/PULL_REQUEST_TEMPLATE.md create mode 100644 .gitignore create mode 100644 CHANGELOG.md create mode 100644 CODE_OF_CONDUCT.md create mode 100644 CONTRIBUTING.md create mode 100644 LICENSE create mode 100644 NOTICE create mode 100644 README.md create mode 100755 Tool/src/packaging/debian/build_deb_linux.sh create mode 100644 Tool/src/packaging/debian/changelog create mode 100644 Tool/src/packaging/debian/changelog.Debian create mode 100644 Tool/src/packaging/debian/conffiles create mode 100644 Tool/src/packaging/debian/control create mode 100644 Tool/src/packaging/debian/debian-binary create mode 100644 Tool/src/packaging/debian/lintian-overrides create mode 100644 Tool/src/packaging/debian/postinst create mode 100644 Tool/src/packaging/debian/preinst create mode 100644 Tool/src/packaging/debian/prerm create mode 100644 Tool/src/packaging/debian/xray.conf create mode 100644 Tool/src/packaging/debian/xray.service create mode 100644 Tool/src/packaging/linux/README.md create mode 100755 Tool/src/packaging/linux/build_rpm_linux.sh create mode 100755 Tool/src/packaging/linux/build_zip_linux.sh create mode 100644 Tool/src/packaging/linux/xray.conf create mode 100644 Tool/src/packaging/linux/xray.service create mode 100644 Tool/src/packaging/linux/xray.spec create mode 100755 Tool/src/packaging/osx/build_zip_osx.sh create mode 100755 Tool/src/packaging/windows/build_zip_win.sh create mode 100644 Tool/src/versiongenerator/version-gen.go create mode 100644 VERSION create mode 100644 daemon/bufferpool/bufferpool.go create mode 100644 daemon/bufferpool/bufferpool_test.go create mode 100644 daemon/cfg.yaml create mode 100644 daemon/cfg/cfg.go create mode 100644 daemon/cfg/cfg_test.go create mode 100644 daemon/cli/cli.go create mode 100644 daemon/cli/cli_test.go create mode 100644 daemon/conn/conn.go create mode 100644 daemon/conn/conn_test.go create mode 100644 daemon/conn/xray_client.go create mode 100644 daemon/daemon.go create mode 100644 daemon/logger/log_config.go create mode 100644 daemon/logger/logger_test.go create mode 100644 daemon/processor/batchprocessor.go create mode 100644 daemon/processor/batchprocessor_test.go create mode 100644 daemon/processor/processor.go create mode 100644 daemon/processor/processor_test.go create mode 100644 daemon/profiler/profiler.go create mode 100644 daemon/ringbuffer/ringbuffer.go create mode 100644 daemon/ringbuffer/ringbuffer_test.go create mode 100644 daemon/socketconn/socketconn.go create mode 100644 daemon/socketconn/udp/udp.go create mode 100644 daemon/telemetry/telemetry.go create mode 100644 daemon/telemetry/telemetry_test.go create mode 100644 daemon/tracesegment/tracesegment.go create mode 100644 daemon/tracesegment/tracesegment_test.go create mode 100644 daemon/tracesegment/tracesegment_test_util.go create mode 100644 daemon/tracing.go create mode 100644 daemon/tracing_windows.go create mode 100644 daemon/util/test/log_writer.go create mode 100644 daemon/util/test/mock_timer_client.go create mode 100644 daemon/util/test/mock_timer_client_test.go create mode 100644 daemon/util/timer/timer.go create mode 100644 daemon/util/util.go create mode 100644 daemon/util/util_test.go create mode 100644 glide.lock create mode 100644 glide.yaml create mode 100644 makefile diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000..ab40d21 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,6 @@ +*Issue #, if available:* + +*Description of changes:* + + +By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license. diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..2c85393 --- /dev/null +++ b/.gitignore @@ -0,0 +1,21 @@ +bin +pkg +test_coverage +build +.idea +.DS_Store + +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +vendor diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..cc7432b --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,7 @@ +# Change Log +All notable changes to this project will be documented in this file. + +## 2.1.0 (2018-03-08) +- Open sourced the X-Ray daemon project +- To not upload telemetry data if no traces are recorded +- The daemon logs error to stderr if it fails to read provided configuration file diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..3b64466 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,4 @@ +## Code of Conduct +This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). +For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact +opensource-codeofconduct@amazon.com with any additional questions or comments. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..482e9b9 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,61 @@ +# Contributing Guidelines + +Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional +documentation, we greatly value feedback and contributions from our community. + +Please read through this document before submitting any issues or pull requests to ensure we have all the necessary +information to effectively respond to your bug report or contribution. + + +## Reporting Bugs/Feature Requests + +We welcome you to use the GitHub issue tracker to report bugs or suggest features. + +When filing an issue, please check [existing open](https://github.com/aws/aws-xray-daemon/issues), or [recently closed](https://github.com/aws/aws-xray-daemon/issues?utf8=%E2%9C%93&q=is%3Aissue%20is%3Aclosed%20), issues to make sure somebody else hasn't already +reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: + +* A reproducible test case or series of steps +* The version of our code being used +* Any modifications you've made relevant to the bug +* Anything unusual about your environment or deployment + + +## Contributing via Pull Requests +Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: + +1. You are working against the latest source on the *master* branch. +2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. +3. You open an issue to discuss any significant work - we would hate for your time to be wasted. + +To send us a pull request, please: + +1. Fork the repository. +2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. +3. Ensure local tests pass. +4. Commit to your fork using clear commit messages. +5. Send us a pull request, answering any default questions in the pull request interface. +6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. + +GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and +[creating a pull request](https://help.github.com/articles/creating-a-pull-request/). + + +## Finding contributions to work on +Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels ((enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](https://github.com/aws/aws-xray-daemon/labels/help%20wanted) issues is a great place to start. + + +## Code of Conduct +This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). +For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact +opensource-codeofconduct@amazon.com with any additional questions or comments. + + +## Security issue notifications +If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. + + +## Licensing + +See the [LICENSE](https://github.com/aws/aws-xray-daemon/blob/master/LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. + +We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes. diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/NOTICE b/NOTICE new file mode 100644 index 0000000..f31a5e3 --- /dev/null +++ b/NOTICE @@ -0,0 +1,2 @@ +AWS X-Ray Daemon +Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. diff --git a/README.md b/README.md new file mode 100644 index 0000000..267903c --- /dev/null +++ b/README.md @@ -0,0 +1,78 @@ +# AWS X-Ray Daemon + +The AWS X-Ray daemon is a software application that listens for traffic on UDP port 2000, gathers raw segment data, and relays it to the AWS X-Ray API. +The daemon works in conjunction with the AWS X-Ray SDKs and must be running so that data sent by the SDKs can reach the X-Ray service. For more information, + see [AWS X-Ray Daemon](https://docs.aws.amazon.com/xray/latest/devguide/xray-daemon.html). + +## Getting Help + +Use the following community resources for getting help with the AWS X-Ray Daemon. We use the GitHub issues for tracking bugs and feature requests. + +* Ask a question in the [AWS X-Ray Forum](https://forums.aws.amazon.com/forum.jspa?forumID=241&start=0). +* Open a support ticket with [AWS Support](http://docs.aws.amazon.com/awssupport/latest/user/getting-started.html). +* If you think you may have found a bug, open an [issue](https://github.com/aws/aws-xray-daemon/issues/new). +* For contributing guidelines refer [CONTRIBUTING.md](https://github.com/aws/aws-xray-daemon/blob/master/CONTRIBUTING.md). + +## Sending Segment Documents + +The X-Ray SDK sends segment documents to the daemon to avoid making calls to AWS directly. You can send the segment/subsegment in JSON over UDP port 2000 +to the X-Ray daemon, prepended by the daemon header : `{"format": "json", "version": 1}\n` + +``` +{"format": "json", "version": 1}\n{} +``` +For more details refer : [Link](https://docs.aws.amazon.com/xray/latest/devguide/xray-api-sendingdata.html) + +## Installing + +The AWS X-Ray Daemon is compatible with Go 1.7 and later. + +Install the daemon using the following command: + +``` +go get -u github.com/aws/aws-xray-daemon/... +``` + +You can also use [Glide](https://github.com/Masterminds/glide) to manage dependencies by using: + +``` +glide install +``` +## Daemon Usage (command line args) + +Usage: X-Ray [options] +1. -a --resource-arn Amazon Resource Name (ARN) of the AWS resource running the daemon. +2. -o --local-mode Don't check for EC2 instance metadata. +3. -m --buffer-memory Change the amount of memory in MB that buffers can use (minimum 3). +4. -n --region Send segments to the X-Ray service in a specific region. +5. -b --bind Overrides default UDP address (127.0.0.1:2000). +6. -r --role-arn Assume the specified IAM role to upload segments to a different account. +7. -c --config Load a configuration file from the specified path. +8. -f --log-file Output logs to the specified file path. +9. -l --log-level Log level, from most verbose to least: dev, debug, info, warn, error, prod (default). +10. -v --version Show AWS X-Ray daemon version. +11. -h --help Show this screen + +## Build + +`make build` would build binaries and .zip files in `/build` folder for Linux, MacOS, and Windows platforms. + +### Linux + +`make build-linux` would build binaries and .zip files in `/build` folder for the Linux platform. + +### MAC + +`make build-mac` would build binaries and .zip files in `/build` folder for the MacOS platform. + +### Windows + +`make build-windows` would build binaries and .zip files in `/build` folder for the Windows platform. + +## Testing + +`make test` will run unit tests for the X-Ray daemon. + +## License + +This library is licensed under the Apache 2.0 License. diff --git a/Tool/src/packaging/debian/build_deb_linux.sh b/Tool/src/packaging/debian/build_deb_linux.sh new file mode 100755 index 0000000..c94c716 --- /dev/null +++ b/Tool/src/packaging/debian/build_deb_linux.sh @@ -0,0 +1,71 @@ +#!/usr/bin/env bash +echo "****************************************" +echo "Creating deb file for Ubuntu Linux amd64" +echo "****************************************" + +echo "Creating debian folders" + +mkdir -p ${BGO_SPACE}/bin/debian_amd64/debian/usr/bin/ +mkdir -p ${BGO_SPACE}/bin/debian_amd64/debian/etc/init/ +mkdir -p ${BGO_SPACE}/bin/debian_amd64/debian/etc/amazon/xray/ +mkdir -p ${BGO_SPACE}/bin/debian_amd64/debian/var/lib/amazon/xray/ +mkdir -p ${BGO_SPACE}/bin/debian_amd64/debian/lib/systemd/system/ +mkdir -p ${BGO_SPACE}/bin/debian_amd64/debian/usr/share/lintian/overrides/ +mkdir -p ${BGO_SPACE}/bin/debian_amd64/debian/usr/share/doc/xray/ + +echo "Copying application files" + +cp ${BGO_SPACE}/build/xray/xray ${BGO_SPACE}/bin/debian_amd64/debian/usr/bin/ +cp ${BGO_SPACE}/build/xray/cfg.yaml ${BGO_SPACE}/bin/debian_amd64/debian/etc/amazon/xray/cfg.yaml +cp ${BGO_SPACE}/Tool/src/packaging/debian/xray.conf ${BGO_SPACE}/bin/debian_amd64/debian/etc/init/xray.conf +cp ${BGO_SPACE}/Tool/src/packaging/debian/xray.service ${BGO_SPACE}/bin/debian_amd64/debian/lib/systemd/system/xray.service + +echo "Copying debian package config files" + +cp ${BGO_SPACE}/Tool/src/packaging/debian/conffiles ${BGO_SPACE}/bin/debian_amd64/debian/ +cp ${BGO_SPACE}/Tool/src/packaging/debian/preinst ${BGO_SPACE}/bin/debian_amd64/debian/ +cp ${BGO_SPACE}/Tool/src/packaging/debian/postinst ${BGO_SPACE}/bin/debian_amd64/debian/ +cp ${BGO_SPACE}/Tool/src/packaging/debian/prerm ${BGO_SPACE}/bin/debian_amd64/debian/ +cp ${BGO_SPACE}/Tool/src/packaging/debian/lintian-overrides ${BGO_SPACE}/bin/debian_amd64/debian/usr/share/lintian/overrides/xray + +echo "Constructing the control file" + +echo 'Package: xray' > ${BGO_SPACE}/bin/debian_amd64/debian/control +echo 'Architecture: amd64' >> ${BGO_SPACE}/bin/debian_amd64/debian/control +echo -n 'Version: ' >> ${BGO_SPACE}/bin/debian_amd64/debian/control +cat ${BGO_SPACE}/VERSION | tr -d "\n" >> ${BGO_SPACE}/bin/debian_amd64/debian/control +echo '-1' >> ${BGO_SPACE}/bin/debian_amd64/debian/control +cat ${BGO_SPACE}/Tool/src/packaging/debian/control >> ${BGO_SPACE}/bin/debian_amd64/debian/control + +echo "Constructing the changelog file" + +echo -n 'xray (' > ${BGO_SPACE}/bin/debian_amd64/debian/usr/share/doc/xray/changelog +cat VERSION | tr -d "\n" >> ${BGO_SPACE}/bin/debian_amd64/debian/usr/share/doc/xray/changelog +echo '-1) precise-proposed; urgency=low' >> ${BGO_SPACE}/bin/debian_amd64/debian/usr/share/doc/xray/changelog +cat ${BGO_SPACE}/Tool/src/packaging/debian/changelog >> ${BGO_SPACE}/bin/debian_amd64/debian/usr/share/doc/xray/changelog + +cp ${BGO_SPACE}/Tool/src/packaging/debian/changelog.Debian ${BGO_SPACE}/bin/debian_amd64/debian/usr/share/doc/xray/ +cp ${BGO_SPACE}/Tool/src/packaging/debian/debian-binary ${BGO_SPACE}/bin/debian_amd64/debian/ + +echo "Setting permissioning as required by debian" + +cd ${BGO_SPACE}/bin/debian_amd64/; find ./debian -type d | xargs chmod 755; cd ~- + +echo "Compressing changelog" + +cd ${BGO_SPACE}/bin/debian_amd64/debian/usr/share/doc/xray/; export GZIP=-9; tar cvzf changelog.gz changelog --owner=0 --group=0 ; cd ~- +cd ${BGO_SPACE}/bin/debian_amd64/debian/usr/share/doc/xray/; export GZIP=-9; tar cvzf changelog.Debian.gz changelog.Debian --owner=0 --group=0; cd ~- + +rm ${BGO_SPACE}/bin/debian_amd64/debian/usr/share/doc/xray/changelog +rm ${BGO_SPACE}/bin/debian_amd64/debian/usr/share/doc/xray/changelog.Debian + +echo "Creating tar" +# the below permission is required by debian +cd ${BGO_SPACE}/bin/debian_amd64/debian/; tar czf data.tar.gz usr etc lib --owner=0 --group=0 ; cd ~- +cd ${BGO_SPACE}/bin/debian_amd64/debian/; tar czf control.tar.gz control conffiles preinst postinst prerm --owner=0 --group=0 ; cd ~- + +echo "Constructing the deb package" +ar r ${BGO_SPACE}/bin/xray.deb ${BGO_SPACE}/bin/debian_amd64/debian/debian-binary +ar r ${BGO_SPACE}/bin/xray.deb ${BGO_SPACE}/bin/debian_amd64/debian/control.tar.gz +ar r ${BGO_SPACE}/bin/xray.deb ${BGO_SPACE}/bin/debian_amd64/debian/data.tar.gz +cp ${BGO_SPACE}/bin/xray.deb ${BGO_SPACE}/build/xray/aws-xray-daemon-`cat ${BGO_SPACE}/VERSION`.deb diff --git a/Tool/src/packaging/debian/changelog b/Tool/src/packaging/debian/changelog new file mode 100644 index 0000000..93879f2 --- /dev/null +++ b/Tool/src/packaging/debian/changelog @@ -0,0 +1,6 @@ + + * Initial release + + -- Amazon.com, Inc. Fri, 05 Nov 2016 14:00:00 +1000 + + diff --git a/Tool/src/packaging/debian/changelog.Debian b/Tool/src/packaging/debian/changelog.Debian new file mode 100644 index 0000000..ea7ce25 --- /dev/null +++ b/Tool/src/packaging/debian/changelog.Debian @@ -0,0 +1,4 @@ +Old Changelog: +Amazon X-Ray Debian maintainer and upstream author are identical. +Therefore see also normal changelog file for Debian changes. + diff --git a/Tool/src/packaging/debian/conffiles b/Tool/src/packaging/debian/conffiles new file mode 100644 index 0000000..3389f20 --- /dev/null +++ b/Tool/src/packaging/debian/conffiles @@ -0,0 +1,3 @@ +/etc/init/xray.conf +/etc/amazon/xray/cfg.yaml +/lib/systemd/system/xray.service diff --git a/Tool/src/packaging/debian/control b/Tool/src/packaging/debian/control new file mode 100644 index 0000000..7707e75 --- /dev/null +++ b/Tool/src/packaging/debian/control @@ -0,0 +1,9 @@ +Section: admin +Depends: libc6 +Priority: optional +Copyright: Apache License, Version 2.0 +Suggests: aws-xray-doc +Maintainer: Amazon.com, Inc. +Description: Amazon X-Ray is a platform makes it easy for customers to analyze the behavior of distributed applications. + Please visit the AWS Developer Tools page for additional information. + diff --git a/Tool/src/packaging/debian/debian-binary b/Tool/src/packaging/debian/debian-binary new file mode 100644 index 0000000..cd5ac03 --- /dev/null +++ b/Tool/src/packaging/debian/debian-binary @@ -0,0 +1 @@ +2.0 diff --git a/Tool/src/packaging/debian/lintian-overrides b/Tool/src/packaging/debian/lintian-overrides new file mode 100644 index 0000000..de4539f --- /dev/null +++ b/Tool/src/packaging/debian/lintian-overrides @@ -0,0 +1,7 @@ +# The Go compiler is currently unable to produce read-only relocations +# (it produces static binaries). +hardening-no-relro /usr/bin/xray + +# Amazon Xray Debian maintainer and upstream author are identical. +# Therefore see normal changelog file for Debian changes. +syntax-error-in-debian-changelog diff --git a/Tool/src/packaging/debian/postinst b/Tool/src/packaging/debian/postinst new file mode 100644 index 0000000..b45c80d --- /dev/null +++ b/Tool/src/packaging/debian/postinst @@ -0,0 +1,10 @@ +echo "Starting xray daemon" +initSystem=$(cat /proc/1/comm) +if [ $initSystem = init ] +then + start xray || true +elif [ $initSystem = systemd ] +then + systemctl start xray + systemctl daemon-reload +fi \ No newline at end of file diff --git a/Tool/src/packaging/debian/preinst b/Tool/src/packaging/debian/preinst new file mode 100644 index 0000000..fe6d4c5 --- /dev/null +++ b/Tool/src/packaging/debian/preinst @@ -0,0 +1,13 @@ +echo "Preparing for install" +initSystem=$(cat /proc/1/comm) +useradd --system -s /bin/false xray +mkdir -m 755 /var/log/xray/ +chown xray /var/log/xray/ +if [ $initSystem = init ] +then + stop xray || true +elif [ $initSystem = systemd ] +then + systemctl stop xray + systemctl daemon-reload +fi diff --git a/Tool/src/packaging/debian/prerm b/Tool/src/packaging/debian/prerm new file mode 100644 index 0000000..0fe7b4b --- /dev/null +++ b/Tool/src/packaging/debian/prerm @@ -0,0 +1,10 @@ +echo "Stopping xray daemon" +initSystem=$(cat /proc/1/comm) +if [ $initSystem = init ] +then + stop xray || true +elif [ $initSystem = systemd ] +then + systemctl stop xray + systemctl daemon-reload +fi \ No newline at end of file diff --git a/Tool/src/packaging/debian/xray.conf b/Tool/src/packaging/debian/xray.conf new file mode 100644 index 0000000..ee326b1 --- /dev/null +++ b/Tool/src/packaging/debian/xray.conf @@ -0,0 +1,7 @@ +# Location /etc/init/xray.conf +# initctl start xray +start on runlevel [2345] +stop on runlevel [!2345] + +respawn +exec su -s /bin/bash -c "xray -f /var/log/xray/xray.log" xray diff --git a/Tool/src/packaging/debian/xray.service b/Tool/src/packaging/debian/xray.service new file mode 100644 index 0000000..dfbbc95 --- /dev/null +++ b/Tool/src/packaging/debian/xray.service @@ -0,0 +1,17 @@ +# Location: /etc/systemd/system/xray.service +# systemctl enable xray +# systemctl start xray +# systemctl | grep xray +# https://www.freedesktop.org/software/systemd/man/systemd.unit.html + +[Unit] +Description=xray + +[Service] +Type=simple +WorkingDirectory=/usr/bin/ +ExecStart=/bin/su -s /bin/bash -c "xray -f /var/log/xray/xray.log" xray +Restart=always + +[Install] +WantedBy=network-online.target diff --git a/Tool/src/packaging/linux/README.md b/Tool/src/packaging/linux/README.md new file mode 100644 index 0000000..752b039 --- /dev/null +++ b/Tool/src/packaging/linux/README.md @@ -0,0 +1 @@ +sudo yum install rpmdevtools diff --git a/Tool/src/packaging/linux/build_rpm_linux.sh b/Tool/src/packaging/linux/build_rpm_linux.sh new file mode 100755 index 0000000..677e893 --- /dev/null +++ b/Tool/src/packaging/linux/build_rpm_linux.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash +echo "*************************************************" +echo "Creating rpm file for Amazon Linux and RHEL amd64" +echo "*************************************************" + +rm -rf ${BGO_SPACE}/bin/linux_amd64/linux + +echo "Creating rpmbuild workspace" +mkdir -p ${BGO_SPACE}/bin/linux_amd64/linux/rpmbuild/{RPMS,SRPMS,BUILD,COORD_SOURCES,SPECS,DATA_SOURCES} +mkdir -p ${BGO_SPACE}/bin/linux_amd64/linux/usr/bin/ +mkdir -p ${BGO_SPACE}/bin/linux_amd64/linux/etc/amazon/xray/ +mkdir -p ${BGO_SPACE}/bin/linux_amd64/linux/etc/init/ +mkdir -p ${BGO_SPACE}/bin/linux_amd64/linux/etc/systemd/system/ + +echo "Copying application files" +cp ${BGO_SPACE}/build/xray/xray ${BGO_SPACE}/bin/linux_amd64/linux/usr/bin/ +cp ${BGO_SPACE}/daemon/cfg.yaml ${BGO_SPACE}/bin/linux_amd64/linux/etc/amazon/xray/cfg.yaml +cp ${BGO_SPACE}/Tool/src/packaging/linux/xray.conf ${BGO_SPACE}/bin/linux_amd64/linux/etc/init/ +cp ${BGO_SPACE}/Tool/src/packaging/linux/xray.service ${BGO_SPACE}/bin/linux_amd64/linux/etc/systemd/system/ + +echo "Creating the rpm package" +SPEC_FILE="${BGO_SPACE}/Tool/src/packaging/linux/xray.spec" +BUILD_ROOT="${BGO_SPACE}/bin/linux_amd64/linux" +setarch x86_64 rpmbuild --define "rpmversion `cat ${BGO_SPACE}/VERSION`" --define "_topdir bin/linux_amd64/linux/rpmbuild" -bb --buildroot ${BUILD_ROOT} ${SPEC_FILE} + +echo "Copying rpm files to bin" +cp ${BGO_SPACE}/bin/linux_amd64/linux/rpmbuild/RPMS/x86_64/*.rpm ${BGO_SPACE}/build/xray/aws-xray-daemon-`cat ${BGO_SPACE}/VERSION`.rpm diff --git a/Tool/src/packaging/linux/build_zip_linux.sh b/Tool/src/packaging/linux/build_zip_linux.sh new file mode 100755 index 0000000..96aff70 --- /dev/null +++ b/Tool/src/packaging/linux/build_zip_linux.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +echo "****************************************" +echo "Creating zip file for Linux amd64" +echo "****************************************" + +BUILD_FOLDER=${BGO_SPACE}/build/xray + +if [ -f ${BUILD_FOLDER}/xray-linux.zip ] +then + rm ${BUILD_FOLDER}/xray-linux.zip +fi +cd ${BUILD_FOLDER} +zip aws-xray-daemon-linux-`cat ${BGO_SPACE}/VERSION`.zip xray cfg.yaml diff --git a/Tool/src/packaging/linux/xray.conf b/Tool/src/packaging/linux/xray.conf new file mode 100644 index 0000000..615dbb6 --- /dev/null +++ b/Tool/src/packaging/linux/xray.conf @@ -0,0 +1,12 @@ +# Location /etc/init/xray.conf +# initctl start xray +start on (runlevel [345] and started network) +stop on (runlevel [!345] or stopping network) + +respawn + +script + echo $$ > /var/run/xray.pid + exec su -s /bin/bash -c "xray -f /var/log/xray/xray.log" xray +end script + diff --git a/Tool/src/packaging/linux/xray.service b/Tool/src/packaging/linux/xray.service new file mode 100644 index 0000000..d8e99f0 --- /dev/null +++ b/Tool/src/packaging/linux/xray.service @@ -0,0 +1,16 @@ +# Location: /etc/systemd/system/xray.service +# systemctl enable xray +# systemctl start xray +# systemctl | grep xray +# https://www.freedesktop.org/software/systemd/man/systemd.unit.html + +[Unit] +Description=xray + +[Service] +WorkingDirectory=/usr/bin/ +ExecStart=/bin/su -s /bin/bash -c "xray -f /var/log/xray/xray.log" +KillMode=process + +[Install] +WantedBy=network-online.target diff --git a/Tool/src/packaging/linux/xray.spec b/Tool/src/packaging/linux/xray.spec new file mode 100644 index 0000000..60b4686 --- /dev/null +++ b/Tool/src/packaging/linux/xray.spec @@ -0,0 +1,64 @@ +Name: xray +Version: %rpmversion +Release: 1 +Summary: X-Ray daemon + +Group: Amazon/Tools +License: Apache License, Version 2.0 +URL: http://docs.aws.amazon.com/ + +%description +This package provides daemon to send trace segments to xray dataplane + +%files +/usr/bin/xray + +%config(noreplace) /etc/amazon/xray/cfg.yaml +%config(noreplace) /etc/init/xray.conf +%config(noreplace) /etc/systemd/system/xray.service + +%pre +# First time install create user and folders +if [ $1 -eq 1 ]; then + useradd --system -s /bin/false xray + mkdir -m 755 /var/log/xray/ + chown xray /var/log/xray/ +else +# Stop the agent before the upgrade + if [ $1 -ge 2 ]; then + if [[ `/sbin/init --version` =~ upstart ]]; then + /sbin/stop xray + elif [[ `systemctl` =~ -\.mount ]]; then + systemctl stop xray + systemctl daemon-reload + fi + fi +fi + +%preun +# Stop the agent after uninstall +if [ $1 -eq 0 ] ; then + if [[ `/sbin/init --version` =~ upstart ]]; then + /sbin/stop xray + sleep 1 + elif [[ `systemctl` =~ -\.mount ]]; then + systemctl stop xray + systemctl daemon-reload + fi +fi + +%posttrans +# Start the agent after initial install or upgrade +if [ $1 -ge 0 ]; then + if [[ `/sbin/init --version` =~ upstart ]]; then + /sbin/start xray + elif [[ `systemctl` =~ -\.mount ]]; then + systemctl start xray + systemctl daemon-reload + fi +fi + +%clean +# rpmbuild deletes $buildroot after building, specifying clean section to make +# sure it is not deleted + diff --git a/Tool/src/packaging/osx/build_zip_osx.sh b/Tool/src/packaging/osx/build_zip_osx.sh new file mode 100755 index 0000000..694b85e --- /dev/null +++ b/Tool/src/packaging/osx/build_zip_osx.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +echo "****************************************" +echo "Creating zip file for OS-X amd64" +echo "****************************************" + +BUILD_FOLDER=${BGO_SPACE}/build/xray + +if [ -f ${BUILD_FOLDER}/xray-osx.zip ] +then + rm ${BUILD_FOLDER}/xray-osx.zip +fi +cd ${BUILD_FOLDER} +zip aws-xray-daemon-macos-`cat ${BGO_SPACE}/VERSION`.zip xray_mac cfg.yaml diff --git a/Tool/src/packaging/windows/build_zip_win.sh b/Tool/src/packaging/windows/build_zip_win.sh new file mode 100755 index 0000000..987f0ae --- /dev/null +++ b/Tool/src/packaging/windows/build_zip_win.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash +echo "****************************************" +echo "Creating zip file for Windows amd64" +echo "****************************************" + +BUILD_FOLDER=${BGO_SPACE}/build/xray + +echo "Constructing the zip package" + +if [ -f ${BUILD_FOLDER}/aws-xray-daemon-windows-service-`cat ${BGO_SPACE}/VERSION`.zip ] +then + rm ${BUILD_FOLDER}/aws-xray-daemon-windows-service-`cat ${BGO_SPACE}/VERSION`.zip +fi + +if [ -f ${BUILD_FOLDER}/aws-xray-daemon-windows-process-`cat ${BGO_SPACE}/VERSION`.zip ] +then + rm ${BUILD_FOLDER}/aws-xray-daemon-windows-process-`cat ${BGO_SPACE}/VERSION`.zip +fi + +cd ${BUILD_FOLDER} +zip aws-xray-daemon-windows-service-`cat ${BGO_SPACE}/VERSION`.zip xray.exe cfg.yaml +zip aws-xray-daemon-windows-process-`cat ${BGO_SPACE}/VERSION`.zip xray_windows.exe cfg.yaml diff --git a/Tool/src/versiongenerator/version-gen.go b/Tool/src/versiongenerator/version-gen.go new file mode 100644 index 0000000..b103e48 --- /dev/null +++ b/Tool/src/versiongenerator/version-gen.go @@ -0,0 +1,20 @@ +package main + +import ( + "fmt" + "io/ioutil" + "log" + "path/filepath" + "github.com/aws/aws-xray-daemon/daemon/conn" +) + +// version-gen is a simple program that generates the daemon version number and writes to VERSION file. +func main() { + + fmt.Printf("AWS X-Ray daemon version: %v\n", conn.GetVersionNumber()) + + // Write X-Ray daemon version to VERSION file. + if err := ioutil.WriteFile(filepath.Join("VERSION"), []byte(conn.GetVersionNumber()), 0600); err != nil { + log.Fatalf("Error writing to VERSION file. %v", err) + } +} diff --git a/VERSION b/VERSION new file mode 100644 index 0000000..50aea0e --- /dev/null +++ b/VERSION @@ -0,0 +1 @@ +2.1.0 \ No newline at end of file diff --git a/daemon/bufferpool/bufferpool.go b/daemon/bufferpool/bufferpool.go new file mode 100644 index 0000000..fe9b30c --- /dev/null +++ b/daemon/bufferpool/bufferpool.go @@ -0,0 +1,111 @@ +// Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and limitations under the License. + +package bufferpool + +import ( + "errors" + "math" + "sync" +) + +// BufferPool is a structure for storing trace segments. +type BufferPool struct { + // Slice of byte slices to store trace segments. + Buffers []*[]byte + lock sync.Mutex + + // Map to track available buffers in the pool. + bufferHeadHash map[*byte]bool +} + +// Init initializes new BufferPool with bufferLimit buffers, each of bufferSize. +func Init(bufferLimit int, bufferSize int) *BufferPool { + bufferHeadHash := make(map[*byte]bool) + bufferArray := make([]*[]byte, bufferLimit) + for i := 0; i < bufferLimit; i++ { + buf := make([]byte, bufferSize) + bufferArray[i] = &buf + bufferHeadHash[getBufferPointer(&buf)] = true + } + bufferPool := BufferPool{ + Buffers: bufferArray, + lock: sync.Mutex{}, + bufferHeadHash: bufferHeadHash, + } + return &bufferPool +} + +// Get returns available buffer of BufferPool b, nil if not any. +func (b *BufferPool) Get() *[]byte { + b.lock.Lock() + buffers := b.Buffers + buffersLen := len(buffers) + var buf *[]byte + if buffersLen > 0 { + buf = buffers[buffersLen-1] + b.Buffers = buffers[:buffersLen-1] + delete(b.bufferHeadHash, getBufferPointer(buf)) + } + b.lock.Unlock() + return buf +} + +// Return adds buffer buf to BufferPool b. +func (b *BufferPool) Return(buf *[]byte) { + b.lock.Lock() + // Rejecting buffer if already in pool + if b.isBufferAlreadyInPool(buf) { + b.lock.Unlock() + return + } + buffers := b.Buffers + buffersCap := cap(buffers) + buffersLen := len(buffers) + if buffersLen < buffersCap { + buffers = append(buffers, buf) + b.Buffers = buffers + b.bufferHeadHash[getBufferPointer(buf)] = true + } + b.lock.Unlock() +} + +// CurrentBuffersLen returns length of buffers. +func (b *BufferPool) CurrentBuffersLen() int { + b.lock.Lock() + len := len(b.Buffers) + b.lock.Unlock() + return len +} + +func getBufferPointer(buf *[]byte) *byte { + bufVal := *buf + // Using first element as pointer to the whole array as Go array is continuous array + // This might fail if someone return slice of original buffer that was fetched + return &bufVal[0] +} + +func (b *BufferPool) isBufferAlreadyInPool(buf *[]byte) bool { + bufPointer := getBufferPointer(buf) + _, ok := b.bufferHeadHash[bufPointer] + return ok +} + +// GetPoolBufferCount returns number of buffers that can fit in the given buffer pool limit +// where each buffer is of size receiveBufferSize. +func GetPoolBufferCount(bufferPoolLimitMB int, receiveBufferSize int) (int, error) { + if receiveBufferSize <= 0 { + return 0, errors.New("receive buffer size cannot be less than or equal to zero") + } + if bufferPoolLimitMB <= 0 { + return 0, errors.New("process limit MB cannot be less than or equal to zero") + } + processLimitBytes := bufferPoolLimitMB * 1024 * 1024 + return int(math.Floor(float64(processLimitBytes / receiveBufferSize))), nil +} diff --git a/daemon/bufferpool/bufferpool_test.go b/daemon/bufferpool/bufferpool_test.go new file mode 100644 index 0000000..a43b79a --- /dev/null +++ b/daemon/bufferpool/bufferpool_test.go @@ -0,0 +1,158 @@ +// Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and limitations under the License. + +package bufferpool + +import ( + "math" + "testing" + + "github.com/stretchr/testify/assert" +) + +type bufferPoolTestCase struct { + processorSizeMB int + bufferSizeKB int +} + +func TestBufferPoolGet(t *testing.T) { + testCases := []int{10, 200, 1000, 5000, 10000} + for _, bufferLimit := range testCases { + + bufferSize := 256 * 1024 + + bufferPool := Init(bufferLimit, bufferSize) + + // First Fetch + buf := bufferPool.Get() + + assert.EqualValues(t, bufferPool.CurrentBuffersLen(), bufferLimit-1) + assert.NotNil(t, buf) + + // Try to get all. Minus 1 due to fetch above + for i := 0; i < bufferLimit-1; i++ { + buf = bufferPool.Get() + + assert.EqualValues(t, bufferPool.CurrentBuffersLen(), bufferLimit-1-(i+1)) + assert.NotNil(t, buf) + } + + // No more buffer left hence returned nil + buf = bufferPool.Get() + + assert.Nil(t, buf) + assert.EqualValues(t, bufferPool.CurrentBuffersLen(), 0) + } +} + +func TestBufferReturn(t *testing.T) { + bufferLimit := 10 + bufferSize := 256 * 1024 + bufferPool := Init(bufferLimit, bufferSize) + buf := make([]byte, bufferSize) + + bufferPool.Return(&buf) + + // This return should be rejected as pool is already full + assert.EqualValues(t, bufferPool.CurrentBuffersLen(), bufferLimit) + + // Fetch one and return buffer + bufferPool.Get() + assert.EqualValues(t, bufferPool.CurrentBuffersLen(), bufferLimit-1) + + bufferPool.Return(&buf) + assert.EqualValues(t, bufferPool.CurrentBuffersLen(), bufferLimit) + + // Fetch two and return same buffer returned before which should be rejected + returnedBuf1 := bufferPool.Get() + returnedBuf2 := bufferPool.Get() + + assert.NotNil(t, returnedBuf1) + assert.NotNil(t, returnedBuf2) + assert.EqualValues(t, bufferPool.CurrentBuffersLen(), bufferLimit-2) + + bufferPool.Return(returnedBuf1) + bufferPool.Return(returnedBuf1) + + assert.EqualValues(t, bufferPool.CurrentBuffersLen(), bufferLimit-1) +} + +func TestBufferGetMultipleRoutine(t *testing.T) { + testCases := []int{100, 1000, 2132} + for _, bufferLimit := range testCases { + bufferSize := 256 * 1024 + routines := 5 + pool := Init(bufferLimit, bufferSize) + + routineFunc := func(c chan int, pool *BufferPool) { + count := 0 + for { + buf := pool.Get() + if buf == nil { + break + } + count++ + } + c <- count + } + chans := make([]chan int, routines) + for i := 0; i < routines; i++ { + c := make(chan int) + chans[i] = c + go routineFunc(c, pool) + } + + totalFetched := 0 + for i := 0; i < routines; i++ { + bufFetched := <-chans[i] + totalFetched += bufFetched + } + + assert.EqualValues(t, bufferLimit, totalFetched) + buf := pool.Get() + assert.Nil(t, buf) + } +} + +func TestGetPoolBufferCount(t *testing.T) { + testCases := []bufferPoolTestCase{ + {processorSizeMB: 100, bufferSizeKB: 256}, + {processorSizeMB: 16, bufferSizeKB: 125}, + {processorSizeMB: 16, bufferSizeKB: 256}, + {processorSizeMB: 250, bufferSizeKB: 512}, + {processorSizeMB: 5, bufferSizeKB: 50}, + } + + for _, testCase := range testCases { + processSizeMB := testCase.processorSizeMB + bufferSize := testCase.bufferSizeKB + + bufferCount, err := GetPoolBufferCount(processSizeMB, bufferSize) + + assert.Nil(t, err) + expected := int(math.Floor(float64((processSizeMB * 1024 * 1024) / bufferSize))) + assert.EqualValues(t, expected, bufferCount) + } +} + +func TestGetPoolBufferCountNegativeProcessorSize(t *testing.T) { + bufferCount, err := GetPoolBufferCount(-123, 24512) + + assert.EqualValues(t, 0, bufferCount) + assert.NotNil(t, err) + assert.EqualValues(t, err.Error(), "process limit MB cannot be less than or equal to zero") +} + +func TestGetPoolBufferCountNegativeBufferSize(t *testing.T) { + bufferCount, err := GetPoolBufferCount(123, -24512) + + assert.EqualValues(t, 0, bufferCount) + assert.NotNil(t, err) + assert.EqualValues(t, err.Error(), "receive buffer size cannot be less than or equal to zero") +} diff --git a/daemon/cfg.yaml b/daemon/cfg.yaml new file mode 100644 index 0000000..f85c740 --- /dev/null +++ b/daemon/cfg.yaml @@ -0,0 +1,29 @@ +# Maximum buffer size in MB (minimum 3). Choose 0 to use 1% of host memory. +TotalBufferSizeMB: 0 +# Maximum number of concurrent calls to AWS X-Ray to upload segment documents. +Concurrency: 8 +# Send segments to AWS X-Ray service in a specific region +Region: "" +# Change the X-Ray service endpoint to which the daemon sends segment documents. +Endpoint: "" +Socket: + # Change the address and port on which the daemon listens for UDP packets containing segment documents. + UDPAddress: "127.0.0.1:2000" +Logging: + LogRotation: true + # Change the log level, from most verbose to least: dev, debug, info, warn, error, prod (default). + LogLevel: "prod" + # Output logs to the specified file path. + LogPath: "" +# Turn on local mode to skip EC2 instance metadata check. +LocalMode: false +# Amazon Resource Name (ARN) of the AWS resource running the daemon. +ResourceARN: "" +# Assume an IAM role to upload segments to a different account. +RoleARN: "" +# Disable TLS certificate verification. +NoVerifySSL: false +# Upload segments to AWS X-Ray through a proxy. +ProxyAddress: "" +# Daemon configuration file format version. +Version: 1 \ No newline at end of file diff --git a/daemon/cfg/cfg.go b/daemon/cfg/cfg.go new file mode 100644 index 0000000..52435ac --- /dev/null +++ b/daemon/cfg/cfg.go @@ -0,0 +1,359 @@ +// Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and limitations under the License. + +package cfg + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "reflect" + + "gopkg.in/yaml.v2" + + log "github.com/cihub/seelog" +) + +var configLocations = []string{ + "/etc/amazon/xray/cfg.yaml", + "cfg.yaml", + "github.com/aws/aws-xray-daemon/daemon/cfg.yaml", +} + +// LogFile represents log file passed through command line argument. +var LogFile string + +// LogFormat defines format for logger. +var LogFormat = "%Date(2006-01-02T15:04:05Z07:00) [%Level] %Msg%n" + +// Config defines configuration structure for cli parameters. +type Config struct { + // Maximum buffer size in MB (minimum 3). Choose 0 to use 1% of host memory. + TotalBufferSizeMB int `yaml:"TotalBufferSizeMB"` + + // Maximum number of concurrent calls to AWS X-Ray to upload segment documents. + Concurrency int `yaml:"Concurrency"` + + // X-Ray service endpoint to which the daemon sends segment documents. + Endpoint string `yaml:"Endpoint"` + + // Send segments to AWS X-Ray service in a specific region. + Region string `yaml:"Region"` + + Socket struct { + // Address and port on which the daemon listens for UDP packets containing segment documents. + UDPAddress string `yaml:"UDPAddress"` + } `yaml:"Socket"` + + // Structure for logging. + Logging struct { + // LogRotation, if true, will rotate log after 50 MB size of current log file. + LogRotation bool `yaml:"LogRotation"` + // The log level, from most verbose to least: dev, debug, info, warn, error, prod (default). + LogLevel string `yaml:"LogLevel"` + // Logs to the specified file path. + LogPath string `yaml:"LogPath"` + } `yaml:"Logging"` + + // Local mode to skip EC2 instance metadata check. + LocalMode bool `yaml:"LocalMode"` + + // Amazon Resource Name (ARN) of the AWS resource running the daemon. + ResourceARN string `yaml:"ResourceARN"` + + // IAM role to upload segments to a different account. + RoleARN string `yaml:"RoleARN"` + + // Enable or disable TLS certificate verification. + NoVerifySSL bool `yaml:"NoVerifySSL"` + + // Upload segments to AWS X-Ray through a proxy. + ProxyAddress string `yaml:"ProxyAddress"` + + // Daemon configuration file format version. + Version int `yaml:"Version"` +} + +// DefaultConfig returns default configuration for X-Ray daemon. +func DefaultConfig() *Config { + return &Config{ + TotalBufferSizeMB: 0, + Concurrency: 8, + Endpoint: "", + Region: "", + Socket: struct { + UDPAddress string `yaml:"UDPAddress"` + }{ + UDPAddress: "127.0.0.1:2000", + }, + Logging: struct { + LogRotation bool `yaml:"LogRotation"` + LogLevel string `yaml:"LogLevel"` + LogPath string `yaml:"LogPath"` + }{ + LogRotation: true, + LogLevel: "prod", + LogPath: "", + }, + LocalMode: false, + ResourceARN: "", + RoleARN: "", + NoVerifySSL: false, + ProxyAddress: "", + Version: 1, + } +} + +// ParameterConfig is a configuration used by daemon. +type ParameterConfig struct { + SegmentChannel struct { + // Size of trace segments channel. + Std int + } + + Socket struct { + // Socket buffer size. + BufferSizeKB int + } + + // Number of go routines daemon.poll() to spawn. + ReceiverRoutines int + + Processor struct { + // Size of the batch segments processed by Processor. + BatchSize int + + // Idle timeout in milliseconds used while sending batch segments. + IdleTimeoutMillisecond int + + // MaxIdleConnPerHost, controls the maximum idle + // (keep-alive) HTTP connections to keep per-host. + MaxIdleConnPerHost int + + // Used to set Http client timeout in seconds. + RequestTimeout int + BatchProcessorQueueSize int + } +} + +// ParameterConfigValue returns instance of ParameterConfig, initialized with default values. +var ParameterConfigValue = &ParameterConfig{ + SegmentChannel: struct { + Std int + }{ + Std: 250, + }, + Socket: struct { + BufferSizeKB int + }{ + BufferSizeKB: 64, + }, + ReceiverRoutines: 2, + Processor: struct { + BatchSize int + IdleTimeoutMillisecond int + MaxIdleConnPerHost int + RequestTimeout int + BatchProcessorQueueSize int + }{ + BatchSize: 50, + IdleTimeoutMillisecond: 1000, + MaxIdleConnPerHost: 8, + RequestTimeout: 2, + BatchProcessorQueueSize: 20, + }, +} + +// LoadConfig returns configuration from a valid configFile else default configuration. +func LoadConfig(configFile string) *Config { + if configFile == "" { + for _, val := range configLocations { + if _, err := os.Stat(val); os.IsNotExist(err) { + continue + } + return merge(val) + } + return DefaultConfig() + } + return merge(configFile) +} + +func loadConfigFromFile(configPath string) *Config { + bytes, err := ioutil.ReadFile(configPath) + if err != nil { + errorAndExit("", err) + } + return loadConfigFromBytes(bytes) +} + +func loadConfigFromBytes(bytes []byte) *Config { + c := &Config{} + err := yaml.Unmarshal(bytes, c) + if err != nil { + errorAndExit("", err) + } + return c +} + +func errorAndExit(serr string, err error) { + createLogWritersAndLog(serr, err) + rescueStderr := os.Stderr + _, w, _ := os.Pipe() + os.Stderr = w + + w.Close() + os.Stderr = rescueStderr + os.Exit(1) +} + +// createLogWritersAndLog writes to stderr and provided log file. +func createLogWritersAndLog(serr string, err error) { + var stderrWriter = os.Stderr + var writer io.Writer + + stderrLogger, _ := log.LoggerFromWriterWithMinLevelAndFormat(stderrWriter, log.ErrorLvl, LogFormat) + writeToLogger(stderrLogger, serr, err) + + if LogFile == "" { + return + } + writer, _ = log.NewFileWriter(LogFile) + fileLogger, _ := log.LoggerFromWriterWithMinLevelAndFormat(writer, log.ErrorLvl, LogFormat) + writeToLogger(fileLogger, serr, err) +} + +func writeToLogger(fileLogger log.LoggerInterface, serr string, err error) { + log.ReplaceLogger(fileLogger) + if serr != "" { + log.Errorf("%v", serr) + } else if err != nil { + log.Errorf("Error occur when using config flag: %v", err) + } +} + +func configFlagArray(config yaml.MapSlice) []string { + var configArray []string + for i := 0; i < len(config); i++ { + if config[i].Value == nil || reflect.TypeOf(config[i].Value).String() != "yaml.MapSlice" { + configArray = append(configArray, fmt.Sprint(config[i].Key)) + } else { + configItem := yaml.MapSlice{} + configItem = config[i].Value.(yaml.MapSlice) + for j := 0; j < len(configItem); j++ { + configArray = append(configArray, fmt.Sprintf("%v.%v", config[i].Key, configItem[j].Key)) + } + } + } + return configArray +} + +func validConfigArray() []string { + validConfig := yaml.MapSlice{} + validConfigBytes, verr := yaml.Marshal(DefaultConfig()) + if verr != nil { + errorAndExit("", verr) + } + yerr := yaml.Unmarshal(validConfigBytes, &validConfig) + if yerr != nil { + errorAndExit("", yerr) + } + return configFlagArray(validConfig) +} + +func userConfigArray(configPath string) []string { + fileBytes, rerr := ioutil.ReadFile(configPath) + if rerr != nil { + errorAndExit("", rerr) + } + userConfig := yaml.MapSlice{} + uerr := yaml.Unmarshal(fileBytes, &userConfig) + if uerr != nil { + errorAndExit("", uerr) + } + return configFlagArray(userConfig) +} + +// ConfigValidation validates provided configuration file, invalid configuration will exit the process. +func ConfigValidation(configPath string) { + validConfigArray := validConfigArray() + userConfigArray := userConfigArray(configPath) + + notSupportFlag := []string{"Profile.CPU", "Profile.Memory", "Socket.BufferSizeKB", "Logging.LogFormat", "Processor.BatchProcessorQueueSize"} + needMigrateFlag := []string{"LogRotation", "Processor.Region", "Processor.Endpoint", "Processor.Routine", "MemoryLimit"} + for i := 0; i < len(userConfigArray); i++ { + if !contains(userConfigArray, "Version") { + errorAndExit("Config Version is missing. Use X-Ray Daemon Config Migration Script to update the config file. Please refer to AWS X-Ray Documentation for more information.", nil) + } + if !contains(validConfigArray, userConfigArray[i]) { + if contains(notSupportFlag, userConfigArray[i]) { + errorMessage := fmt.Sprintf("%v flag is not supported any more. Please refer to AWS X-Ray Documentation for more information.", userConfigArray[i]) + errorAndExit(errorMessage, nil) + } else if contains(needMigrateFlag, userConfigArray[i]) { + errorMessage := fmt.Sprintf("%v flag is not supported. Use X-Ray Daemon Config Migration Script to update the config file. Please refer to AWS X-Ray Documentation for more information.", userConfigArray[i]) + errorAndExit(errorMessage, nil) + } else { + errorMessage := fmt.Sprintf("%v flag is invalid. Please refer to AWS X-Ray Documentation for more information.", userConfigArray[i]) + errorAndExit(errorMessage, nil) + } + } + } +} + +func contains(s []string, e string) bool { + for _, a := range s { + if a == e { + return true + } + } + return false +} + +func merge(configFile string) *Config { + userConfig := loadConfigFromFile(configFile) + if userConfig.Version != 1 { + errorAndExit("Config Version Setting is not correct. Use X-Ray Daemon Config Migration Script to update the config file. Please refer to AWS X-Ray Documentation for more information.", nil) + } + + userConfig.Socket.UDPAddress = getStringValue(userConfig.Socket.UDPAddress, DefaultConfig().Socket.UDPAddress) + userConfig.TotalBufferSizeMB = getIntValue(userConfig.TotalBufferSizeMB, DefaultConfig().TotalBufferSizeMB) + userConfig.ResourceARN = getStringValue(userConfig.ResourceARN, DefaultConfig().ResourceARN) + userConfig.RoleARN = getStringValue(userConfig.RoleARN, DefaultConfig().RoleARN) + userConfig.Concurrency = getIntValue(userConfig.Concurrency, DefaultConfig().Concurrency) + userConfig.Endpoint = getStringValue(userConfig.Endpoint, DefaultConfig().Endpoint) + userConfig.Region = getStringValue(userConfig.Region, DefaultConfig().Region) + userConfig.Logging.LogRotation = getBoolValue(userConfig.Logging.LogRotation, DefaultConfig().Logging.LogRotation) + userConfig.Logging.LogLevel = getStringValue(userConfig.Logging.LogLevel, DefaultConfig().Logging.LogLevel) + userConfig.Logging.LogPath = getStringValue(userConfig.Logging.LogPath, DefaultConfig().Logging.LogPath) + userConfig.NoVerifySSL = getBoolValue(userConfig.NoVerifySSL, DefaultConfig().NoVerifySSL) + userConfig.LocalMode = getBoolValue(userConfig.LocalMode, DefaultConfig().LocalMode) + userConfig.ProxyAddress = getStringValue(userConfig.ProxyAddress, DefaultConfig().ProxyAddress) + return userConfig +} + +func getStringValue(configValue string, defaultValue string) string { + if configValue == "" { + return defaultValue + } + return configValue +} + +func getIntValue(configValue, defaultValue int) int { + if configValue == 0 { + return defaultValue + } + return configValue +} + +func getBoolValue(configValue, defaultValue bool) bool { + if configValue == false { + return defaultValue + } + return configValue +} diff --git a/daemon/cfg/cfg_test.go b/daemon/cfg/cfg_test.go new file mode 100644 index 0000000..12e51ee --- /dev/null +++ b/daemon/cfg/cfg_test.go @@ -0,0 +1,625 @@ +// Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and limitations under the License. + +package cfg + +import ( + "errors" + "io/ioutil" + "os" + "os/exec" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +var errFile = "error.log" +var tstFileName = "test_config.yaml" +var tstFilePath string + +func setupTestCase() { + LogFile = errFile +} + +func tearTestCase() { + LogFile = "" + os.Remove(errFile) +} + +func setupTestFile(cnfg string) (string, error) { + goPath := os.Getenv("PWD") + if goPath == "" { + panic("GOPATH not set") + } + tstFilePath = goPath + "/" + tstFileName + f, err := os.Create(tstFilePath) + if err != nil { + panic(err) + } + f.WriteString(cnfg) + f.Close() + return goPath, err +} + +func clearTestFile() { + os.Remove(tstFilePath) +} +func TestLoadConfigFromBytes(t *testing.T) { + configString := + `Socket: + UDPAddress: "127.0.0.1:2000" +TotalBufferSizeMB: 16 +Region: "us-east-1" +Endpoint: "https://xxxx.xxxx.com" +ResourceARN: "" +RoleARN: "" +Concurrency: 8 +Logging: + LogRotation: true + LogPath: "" + LogLevel: "prod" +NoVerifySSL: false +LocalMode: false +ProxyAddress: "" +Version: 1` + + c := loadConfigFromBytes([]byte(configString)) + + assert.EqualValues(t, c.Socket.UDPAddress, "127.0.0.1:2000") + assert.EqualValues(t, c.TotalBufferSizeMB, 16) + assert.EqualValues(t, c.Region, "us-east-1") + assert.EqualValues(t, c.Endpoint, "https://xxxx.xxxx.com") + assert.EqualValues(t, c.ResourceARN, "") + assert.EqualValues(t, c.RoleARN, "") + assert.EqualValues(t, c.Concurrency, 8) + assert.EqualValues(t, c.Logging.LogLevel, "prod") + assert.EqualValues(t, c.Logging.LogPath, "") + assert.EqualValues(t, c.Logging.LogRotation, true) + assert.EqualValues(t, c.NoVerifySSL, false) + assert.EqualValues(t, c.LocalMode, false) + assert.EqualValues(t, c.ProxyAddress, "") + assert.EqualValues(t, c.Version, 1) +} + +func TestLoadConfigFromBytesTypeError(t *testing.T) { + configString := + `TotalBufferSizeMB: NotExist` + + // Only run the failing part when a specific env variable is set + if os.Getenv("Test_Bytes") == "1" { + loadConfigFromBytes([]byte(configString)) + return + } + + // Start the actual test in a different subprocess + cmd := exec.Command(os.Args[0], "-test.run=TestLoadConfigFromBytesTypeError") + cmd.Env = append(os.Environ(), "Test_Bytes=1") + if err := cmd.Start(); err != nil { + t.Fatal(err) + } + + // Check that the program exited + err := cmd.Wait() + if e, ok := err.(*exec.ExitError); !ok || e.Success() { + t.Fatalf("Process ran with err %v, want exit status 1", err) + } +} + +func TestLoadConfigFromFile(t *testing.T) { + configString := + `Socket: + UDPAddress: "127.0.0.1:2000" +TotalBufferSizeMB: 16 +Region: "us-east-1" +Endpoint: "https://xxxx.xxxx.com" +ResourceARN: "" +RoleARN: "" +Concurrency: 8 +Logging: + LogRotation: true + LogPath: "" + LogLevel: "prod" +NoVerifySSL: false +LocalMode: false +ProxyAddress: "" +Version: 1` + setupTestFile(configString) + + c := loadConfigFromFile(tstFilePath) + + assert.EqualValues(t, c.Socket.UDPAddress, "127.0.0.1:2000") + assert.EqualValues(t, c.TotalBufferSizeMB, 16) + assert.EqualValues(t, c.Region, "us-east-1") + assert.EqualValues(t, c.Endpoint, "https://xxxx.xxxx.com") + assert.EqualValues(t, c.ResourceARN, "") + assert.EqualValues(t, c.RoleARN, "") + assert.EqualValues(t, c.Concurrency, 8) + assert.EqualValues(t, c.Logging.LogLevel, "prod") + assert.EqualValues(t, c.Logging.LogPath, "") + assert.EqualValues(t, c.Logging.LogRotation, true) + assert.EqualValues(t, c.NoVerifySSL, false) + assert.EqualValues(t, c.LocalMode, false) + assert.EqualValues(t, c.ProxyAddress, "") + assert.EqualValues(t, c.Version, 1) + + clearTestFile() +} + +func TestLoadConfigFromFileDoesNotExist(t *testing.T) { + setupTestCase() + testFile := "test_config_does_not_exist_121213.yaml" + + // Only run the failing part when a specific env variable is set + if os.Getenv("Test_Bytes") == "1" { + loadConfigFromFile(testFile) + return + } + + // Start the actual test in a different subprocess + cmd := exec.Command(os.Args[0], "-test.run=TestLoadConfigFromFileDoesNotExist") + cmd.Env = append(os.Environ(), "Test_Bytes=1") + if err := cmd.Start(); err != nil { + t.Fatal(err) + } + + // Check that the program exited + err := cmd.Wait() + if e, ok := err.(*exec.ExitError); !ok || e.Success() { + t.Fatalf("Process ran with err %v, want exit status 1", err) + } + tearTestCase() +} + +func TestLoadConfig(t *testing.T) { + configString := + `Socket: + UDPAddress: "127.0.0.1:2000" +TotalBufferSizeMB: 16 +Region: "us-east-1" +Endpoint: "https://xxxx.xxxx.com" +ResourceARN: "" +RoleARN: "" +Concurrency: 8 +Logging: + LogRotation: true + LogPath: "" + LogLevel: "prod" +NoVerifySSL: false +LocalMode: false +ProxyAddress: "" +Version: 1` + setupTestFile(configString) + configLocations = append(configLocations, tstFilePath) + + c := LoadConfig("") + + assert.EqualValues(t, c.Socket.UDPAddress, "127.0.0.1:2000") + assert.EqualValues(t, c.TotalBufferSizeMB, 16) + assert.EqualValues(t, c.Region, "us-east-1") + assert.EqualValues(t, c.Endpoint, "https://xxxx.xxxx.com") + assert.EqualValues(t, c.ResourceARN, "") + assert.EqualValues(t, c.RoleARN, "") + assert.EqualValues(t, c.Concurrency, 8) + assert.EqualValues(t, c.Logging.LogLevel, "prod") + assert.EqualValues(t, c.Logging.LogPath, "") + assert.EqualValues(t, c.Logging.LogRotation, true) + assert.EqualValues(t, c.NoVerifySSL, false) + assert.EqualValues(t, c.LocalMode, false) + assert.EqualValues(t, c.ProxyAddress, "") + assert.EqualValues(t, c.Version, 1) + clearTestFile() +} + +func TestLoadConfigFileNotPresent(t *testing.T) { + configLocations = []string{"test_config_does_not_exist_989078070.yaml"} + + c := LoadConfig("") + + assert.NotNil(t, c) + // If files config files are not present return default config + assert.EqualValues(t, DefaultConfig(), c) +} + +func TestMergeUserConfigWithDefaultConfig(t *testing.T) { + configString := + `Socket: + UDPAddress: "127.0.0.1:3000" +TotalBufferSizeMB: 8 +Region: "us-east-2" +Endpoint: "https://xxxx.xxxx.com" +ResourceARN: "" +RoleARN: "" +Concurrency: 8 +Version: 1` + setupTestFile(configString) + c := merge(tstFilePath) + + assert.EqualValues(t, c.Socket.UDPAddress, "127.0.0.1:3000") + assert.EqualValues(t, c.TotalBufferSizeMB, 8) + assert.EqualValues(t, c.Region, "us-east-2") + assert.EqualValues(t, c.Endpoint, "https://xxxx.xxxx.com") + assert.EqualValues(t, c.ResourceARN, "") + assert.EqualValues(t, c.RoleARN, "") + assert.EqualValues(t, c.Concurrency, 8) + assert.EqualValues(t, c.Logging.LogLevel, "prod") + assert.EqualValues(t, c.Logging.LogPath, "") + assert.EqualValues(t, c.Logging.LogRotation, true) + assert.EqualValues(t, c.NoVerifySSL, false) + assert.EqualValues(t, c.LocalMode, false) + assert.EqualValues(t, c.ProxyAddress, "") + assert.EqualValues(t, c.Version, 1) + clearTestFile() +} + +func TestConfigVersionNotSet(t *testing.T) { + setupTestCase() + configString := + `Socket: + UDPAddress: "127.0.0.1:3000" +TotalBufferSizeMB: 8 +Region: "us-east-2" +Endpoint: "https://xxxx.xxxx.com" +ResourceARN: "" +RoleARN: "" +Concurrency: 8` + + goPath, err := setupTestFile(configString) + + // Only run the failing part when a specific env variable is set + if os.Getenv("TEST_CONFIG_VERSION_NOT_SET") == "1" { + ConfigValidation(tstFilePath) + return + } + + // Start the actual test in a different subprocess + cmd := exec.Command(os.Args[0], "-test.run=TestConfigVersionNotSet") + cmd.Env = append(os.Environ(), "TEST_CONFIG_VERSION_NOT_SET=1") + if cmdErr := cmd.Start(); cmdErr != nil { + t.Fatal(cmdErr) + } + + // Check that the program exited + error := cmd.Wait() + if e, ok := error.(*exec.ExitError); !ok || e.Success() { + t.Fatalf("Process ran with err %v, want exit status 1", err) + } + + // Check if the log message is what we expected + if _, logErr := os.Stat(goPath + "/" + errFile); os.IsNotExist(logErr) { + t.Fatal(logErr) + } + gotBytes, err := ioutil.ReadFile(goPath + "/" + errFile) + if err != nil { + t.Fatal(err) + } + got := string(gotBytes) + expected := "Config Version is missing." + if !strings.Contains(got, expected) { + t.Fatalf("Unexpected log message. Got %s but should contain %s", got, expected) + } + clearTestFile() + tearTestCase() +} + +func TestUseMemoryLimitInConfig(t *testing.T) { + setupTestCase() + configString := + `Socket: + UDPAddress: "127.0.0.1:3000" +MemoryLimit: 8 +Region: "us-east-2" +Endpoint: "https://xxxx.xxxx.com" +ResourceARN: "" +RoleARN: "" +Concurrency: 8 +Version: 1` + + goPath, err := setupTestFile(configString) + + // Only run the failing part when a specific env variable is set + if os.Getenv("TEST_USE_MEMORYLIMIT_FLAG") == "1" { + ConfigValidation(tstFilePath) + return + } + + // Start the actual test in a different subprocess + cmd := exec.Command(os.Args[0], "-test.run=TestUseMemoryLimitInConfig") + cmd.Env = append(os.Environ(), "TEST_USE_MEMORYLIMIT_FLAG=1") + if cmdErr := cmd.Start(); cmdErr != nil { + t.Fatal(cmdErr) + } + + // Check that the program exited + error := cmd.Wait() + if e, ok := error.(*exec.ExitError); !ok || e.Success() { + t.Fatalf("Process ran with err %v, want exit status 1", err) + } + + // Check if the log message is what we expected + if _, logErr := os.Stat(goPath + "/" + errFile); os.IsNotExist(logErr) { + t.Fatal(logErr) + } + gotBytes, err := ioutil.ReadFile(goPath + "/" + errFile) + if err != nil { + t.Fatal(err) + } + got := string(gotBytes) + expected := "MemoryLimit flag is not supported." + if !strings.Contains(got, expected) { + t.Fatalf("Unexpected log message. Got %s but should contain %s", got, expected) + } + clearTestFile() + tearTestCase() +} + +func TestConfigValidationForNotSupportFlags(t *testing.T) { + setupTestCase() + configString := + `Socket: + BufferSizeKB: 128 +Version: 1` + + goPath, err := setupTestFile(configString) + + // Only run the failing part when a specific env variable is set + if os.Getenv("TEST_NOT_SUPPORT_FLAG") == "1" { + ConfigValidation(tstFilePath) + return + } + + // Start the actual test in a different subprocess + cmd := exec.Command(os.Args[0], "-test.run=TestConfigValidationForNotSupportFlags") + cmd.Env = append(os.Environ(), "TEST_NOT_SUPPORT_FLAG=1") + if cmdErr := cmd.Start(); cmdErr != nil { + t.Fatal(cmdErr) + } + + // Check that the program exited + error := cmd.Wait() + if e, ok := error.(*exec.ExitError); !ok || e.Success() { + t.Fatalf("Process ran with err %v, want exit status 1", err) + } + + // Check if the log message is what we expected + if _, logErr := os.Stat(goPath + "/" + errFile); os.IsNotExist(logErr) { + t.Fatal(logErr) + } + gotBytes, err := ioutil.ReadFile(goPath + "/" + errFile) + if err != nil { + t.Fatal(err) + } + got := string(gotBytes) + expected := "Socket.BufferSizeKB flag is not supported any more." + if !strings.Contains(got, expected) { + t.Fatalf("Unexpected log message. Got %s but should contain %s", got, expected) + } + clearTestFile() + tearTestCase() +} + +func TestConfigValidationForNeedMigrationFlag(t *testing.T) { + setupTestCase() + configString := + `Processor: + Region: "" +Version: 1` + + goPath, err := setupTestFile(configString) + + // Only run the failing part when a specific env variable is set + if os.Getenv("TEST_NEED_MIGRATION_FLAG") == "1" { + ConfigValidation(tstFilePath) + return + } + + // Start the actual test in a different subprocess + cmd := exec.Command(os.Args[0], "-test.run=TestConfigValidationForNeedMigrationFlag") + cmd.Env = append(os.Environ(), "TEST_NEED_MIGRATION_FLAG=1") + if cmdErr := cmd.Start(); cmdErr != nil { + t.Fatal(cmdErr) + } + + // Check that the program exited + error := cmd.Wait() + if e, ok := error.(*exec.ExitError); !ok || e.Success() { + t.Fatalf("Process ran with err %v, want exit status 1", err) + } + + // Check if the log message is what we expected + if _, logErr := os.Stat(goPath + "/" + errFile); os.IsNotExist(logErr) { + t.Fatal(logErr) + } + gotBytes, err := ioutil.ReadFile(goPath + "/" + errFile) + if err != nil { + t.Fatal(err) + } + got := string(gotBytes) + expected := "Processor.Region flag is not supported. Use X-Ray Daemon Config Migration Script to update the config file." + if !strings.Contains(got, expected) { + t.Fatalf("Unexpected log message. Got %s but should contain %s", got, expected) + } + clearTestFile() + tearTestCase() +} + +func TestConfigValidationForInvalidFlag(t *testing.T) { + setupTestCase() + configString := `ABCDE: true +Version: 1` + + goPath := os.Getenv("PWD") + if goPath == "" { + panic("GOPATH not set") + } + testFile := goPath + "/test_config.yaml" + f, err := os.Create(testFile) + if err != nil { + panic(err) + } + f.WriteString(configString) + f.Close() + + // Only run the failing part when a specific env variable is set + if os.Getenv("TEST_INVALID_FLAG") == "1" { + ConfigValidation(testFile) + return + } + + // Start the actual test in a different subprocess + cmd := exec.Command(os.Args[0], "-test.run=TestConfigValidationForInvalidFlag") + cmd.Env = append(os.Environ(), "TEST_INVALID_FLAG=1") + if cmdErr := cmd.Start(); cmdErr != nil { + t.Fatal(cmdErr) + } + + // Check that the program exited + error := cmd.Wait() + if e, ok := error.(*exec.ExitError); !ok || e.Success() { + t.Fatalf("Process ran with err %v, want exit status 1", err) + } + + // Check if the log message is what we expected + if _, logErr := os.Stat(goPath + "/" + errFile); os.IsNotExist(logErr) { + t.Fatal(logErr) + } + gotBytes, err := ioutil.ReadFile(goPath + "/" + errFile) + if err != nil { + t.Fatal(err) + } + got := string(gotBytes) + expected := "ABCDE flag is invalid." + if !strings.Contains(got, expected) { + t.Fatalf("Unexpected log message. Got %s but should contain %s", got, expected) + } + os.Remove(testFile) + tearTestCase() +} + +func TestValidConfigArray(t *testing.T) { + validString := []string{"TotalBufferSizeMB", "Concurrency", "Endpoint", "Region", "Socket.UDPAddress", "Logging.LogRotation", "Logging.LogLevel", "Logging.LogPath", + "LocalMode", "ResourceARN", "RoleARN", "NoVerifySSL", "ProxyAddress", "Version"} + testString := validConfigArray() + if len(validString) != len(testString) { + t.Fatalf("Unexpect test array length. Got %v but should be %v", len(testString), len(validString)) + } + for i, v := range validString { + if v != testString[i] { + t.Fatalf("Unexpect Flag in test array. Got %v but should be %v", testString[i], v) + } + } +} + +func TestUserConfigArray(t *testing.T) { + configString := + `Socket: + UDPAddress: "127.0.0.1:3000" +MemoryLimit: 8 +Region: "us-east-2" +Endpoint: "https://xxxx.xxxx.com" +ResourceARN: "" +RoleARN: "" +Version: 1` + + setupTestFile(configString) + + validString := []string{"Socket.UDPAddress", "MemoryLimit", "Region", "Endpoint", "ResourceARN", "RoleARN", "Version"} + testString := userConfigArray(tstFilePath) + if len(validString) != len(testString) { + t.Fatalf("Unexpect test array length. Got %v but should be %v", len(testString), len(validString)) + } + for i, v := range validString { + if v != testString[i] { + t.Fatalf("Unexpect Flag in test array. Got %v but should be %v", testString[i], v) + } + } + clearTestFile() +} + +func TestErrorAndExitForGivenString(t *testing.T) { + setupTestCase() + // Only run the failing part when a specific env variable is set + if os.Getenv("TEST_STRING_ERROR") == "1" { + errorAndExit("error occurred", nil) + return + } + + // Start the actual test in a different subprocess + cmd := exec.Command(os.Args[0], "-test.run=TestErrorAndExitForGivenString") + cmd.Env = append(os.Environ(), "TEST_STRING_ERROR=1") + if err := cmd.Start(); err != nil { + t.Fatal(err) + } + + // Check that the program exited + error := cmd.Wait() + if e, ok := error.(*exec.ExitError); !ok || e.Success() { + t.Fatalf("Process ran with err %v, want exit status 1", e) + } + + // Check if the log message is what we expected + goPath := os.Getenv("PWD") + if goPath == "" { + panic("GOPATH not set") + } + if _, err := os.Stat(goPath + "/" + errFile); os.IsNotExist(err) { + t.Fatal(err) + } + gotBytes, err := ioutil.ReadFile(goPath + "/" + errFile) + if err != nil { + t.Fatal(err) + } + got := string(gotBytes) + expected := "error occurred" + if !strings.Contains(got, expected) { + t.Fatalf("Unexpected log message. Got %s but should contain %s", got, expected) + } + tearTestCase() +} + +func TestErrorAndExitForGivenError(t *testing.T) { + setupTestCase() + if os.Getenv("TEST_ERROR") == "1" { + err := errors.New("this is an error") + errorAndExit("", err) + return + } + + // Start the actual test in a different subprocess + cmd := exec.Command(os.Args[0], "-test.run=TestErrorAndExitForGivenError") + cmd.Env = append(os.Environ(), "TEST_ERROR=1") + if err := cmd.Start(); err != nil { + t.Fatal(err) + } + + // Check that the program exited + error := cmd.Wait() + if e, ok := error.(*exec.ExitError); !ok || e.Success() { + t.Fatalf("Process ran with err %v, want exit status 1", e) + } + + // Check if the log message is what we expected + goPath := os.Getenv("PWD") + if goPath == "" { + panic("GOPATH not set") + } + if _, err := os.Stat(goPath + "/" + errFile); os.IsNotExist(err) { + t.Fatal(err) + } + gotBytes, err := ioutil.ReadFile(goPath + "/" + errFile) + if err != nil { + t.Fatal(err) + } + got := string(gotBytes) + expected := "this is an error" + if !strings.Contains(got, expected) { + t.Fatalf("Unexpected log message. Got %s but should contain %s", got, expected) + } + tearTestCase() +} diff --git a/daemon/cli/cli.go b/daemon/cli/cli.go new file mode 100644 index 0000000..67d31fb --- /dev/null +++ b/daemon/cli/cli.go @@ -0,0 +1,93 @@ +// Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and limitations under the License. + +package cli + +import ( + "flag" + "fmt" + "os" +) + +// Flag is used for cli parameters. +type Flag struct { + // A set of flags used for cli configuration. + fs *flag.FlagSet + + // String array used to display flag information on cli. + cliStrings []string +} + +// NewFlag returns a new flag with provided flag name. +func NewFlag(name string) *Flag { + flag := &Flag{ + cliStrings: make([]string, 0, 19), + fs: flag.NewFlagSet(name, flag.ExitOnError), + } + return flag +} + +// IntVarF defines 2 int flags for specified name and shortName with default value, and usage string. +// The argument ptr points to an int variable in which to store the value of the flag. +func (f *Flag) IntVarF(ptr *int, name string, shortName string, value int, usage string) { + f.fs.IntVar(ptr, name, value, usage) + f.fs.IntVar(ptr, shortName, value, usage) + s := fmt.Sprintf("\t-%v\t--%v\t%v", shortName, name, usage) + f.cliStrings = append(f.cliStrings, s) +} + +// StringVarF defines 2 string flags for specified name and shortName, default value, and usage string. +// The argument ptr points to a string variable in which to store the value of the flag. +func (f *Flag) StringVarF(ptr *string, name string, shortName string, value string, usage string) { + f.fs.StringVar(ptr, name, value, usage) + f.fs.StringVar(ptr, shortName, value, usage) + var s string + if len(name) <= 4 { + s = fmt.Sprintf("\t-%v\t--%v\t\t%v", shortName, name, usage) + } else { + s = fmt.Sprintf("\t-%v\t--%v\t%v", shortName, name, usage) + } + f.cliStrings = append(f.cliStrings, s) +} + +// BoolVarF defines 2 bool flags with specified name and shortName, default value, and usage string. +// The argument ptr points to a bool variable in which to store the value of the flag. +func (f *Flag) BoolVarF(ptr *bool, name string, shortName string, value bool, usage string) { + f.fs.BoolVar(ptr, name, value, usage) + f.fs.BoolVar(ptr, shortName, value, usage) + s := fmt.Sprintf("\t-%v\t--%v\t%v", shortName, name, usage) + f.cliStrings = append(f.cliStrings, s) +} + +// Format function formats Flag f for cli display. +func (f *Flag) Format() []string { + var cliDisplay = make([]string, 0, 20) + s := fmt.Sprint("Usage: X-Ray [options]") + cliDisplay = append(cliDisplay, s) + for val := range f.cliStrings { + cliDisplay = append(cliDisplay, f.cliStrings[val]) + } + s = fmt.Sprint("\t-h\t--help\t\tShow this screen") + cliDisplay = append(cliDisplay, s) + return cliDisplay +} + +// ParseFlags parses flag definitions from the command line, which should not +// include the command name. Must be called after all flags in the FlagSet +// are defined and before flags are accessed by the program. +// The return value will be ErrHelp if -help or -h were set but not defined. +func (f *Flag) ParseFlags() { + f.fs.Usage = func() { + display := f.Format() + for val := range display { + fmt.Println(display[val]) + } + } + f.fs.Parse(os.Args[1:]) +} diff --git a/daemon/cli/cli_test.go b/daemon/cli/cli_test.go new file mode 100644 index 0000000..b8aa62e --- /dev/null +++ b/daemon/cli/cli_test.go @@ -0,0 +1,223 @@ +// Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and limitations under the License. + +package cli + +import ( + "math/rand" + "os" + "strconv" + "testing" + + "github.com/stretchr/testify/assert" +) + +type CLIArgs struct { + StorageShort []string // store the shorthand flag + StorageLong []string // store the flag name + StorageUsage []string // store the flag usage + StorageFlagInt []int // store the flag int value + StorageFlagString []string // store the flag string value + StorageFlagBool []bool // store the flag bool value +} + +// generate the random string for given length +func RandStr(strSize int) string { + alphaNum := "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + var bytes = make([]byte, strSize) + rand.Read(bytes) + for i, b := range bytes { + bytes[i] = alphaNum[b%byte(len(alphaNum))] + } + return string(bytes) +} + +// store the given number into an variable +func InitialVar(paras []int) []int { + passLen := make([]int, 0, len(paras)) + for i := 0; i < len(paras); i++ { + passLen = append(passLen, paras[i]) + } + return passLen +} + +// mock commandline input +func SetUpInputs(args []string, f *Flag) { + a := os.Args[1:] + if args != nil { + a = args + } + f.fs.Parse(a) +} + +func (cli *CLIArgs) DefineFlagsArray(arrayLen int, strSize []int, strSizeFlag []int) *CLIArgs { + cli.StorageShort = make([]string, 0, arrayLen) + cli.StorageLong = make([]string, 0, arrayLen) + cli.StorageUsage = make([]string, 0, arrayLen) + cli.StorageFlagInt = make([]int, 0, arrayLen) + cli.StorageFlagString = make([]string, 0, arrayLen) + cli.StorageFlagBool = make([]bool, 0, arrayLen) + mShort := make(map[string]bool, arrayLen) + mLong := make(map[string]bool, arrayLen) + mUsage := make(map[string]bool, arrayLen) + for i := 0; i < len(strSize); i++ { + for j := 0; j < arrayLen; j++ { + if strSize[i] == strSizeFlag[0] { + for { + s := RandStr(strSize[i]) + _, ok := mShort[s] + if !ok { + mShort[s] = true + break + } + } + } + if strSize[i] == strSizeFlag[1] { + for { + s := RandStr(strSize[i]) + _, ok := mLong[s] + if !ok { + mLong[s] = true + break + } + } + } + if strSize[i] == strSizeFlag[2] { + for { + s := RandStr(strSize[i]) + _, ok := mUsage[s] + if !ok { + mUsage[s] = true + break + } + } + } + } + } + for k := range mShort { + cli.StorageShort = append(cli.StorageShort, k) + } + for k := range mLong { + cli.StorageLong = append(cli.StorageLong, k) + } + for k := range mUsage { + cli.StorageUsage = append(cli.StorageUsage, k) + } + for i := 0; i < arrayLen; i++ { + cli.StorageFlagInt = append(cli.StorageFlagInt, 0) + } + for i := 0; i < arrayLen; i++ { + cli.StorageFlagString = append(cli.StorageFlagString, "&") + } + for i := 0; i < arrayLen; i++ { + cli.StorageFlagBool = append(cli.StorageFlagBool, true) + } + return cli +} + +func (cli *CLIArgs) InitialFlags(f *Flag) *CLIArgs { + for i := 0; i < 10; i++ { + f.IntVarF(&cli.StorageFlagInt[i], cli.StorageLong[i], cli.StorageShort[i], -1, cli.StorageUsage[i]) + } + for i := 10; i < 20; i++ { + f.StringVarF(&cli.StorageFlagString[i-10], cli.StorageLong[i], cli.StorageShort[i], "*", cli.StorageUsage[i]) + } + for i := 20; i < 30; i++ { + f.BoolVarF(&cli.StorageFlagBool[i-20], cli.StorageLong[i], cli.StorageShort[i], false, cli.StorageUsage[i]) + } + + return cli +} + +func TestSettingsFromFlags(t *testing.T) { + f := NewFlag("Test Flag") + paras := []int{1, 5, 10} // generate the random string, the length are 1, 5, 10 + varSize := InitialVar(paras) + c := CLIArgs{} + cli := c.DefineFlagsArray(30, paras, varSize) + cli = c.InitialFlags(f) + + var num [10]string + var str [10]string + var bo [10]string + input := make([]string, 0, 60) + inputFlags := make([]string, 0, 30) + inputFlagsValue := make([]string, 0, 30) + + // generate the commandline input + for i := 0; i < 10; i++ { + num[i] = strconv.Itoa(rand.Intn(100)) + str[i] = RandStr(rand.Intn(5) + 1) + bo[i] = strconv.FormatBool(true) + } + for i := 0; i < 30; i++ { + if i < 10 { + marked := "-" + cli.StorageShort[i] + input = append(input, marked) + inputFlags = append(inputFlags, marked) + input = append(input, num[i]) + inputFlagsValue = append(inputFlagsValue, num[i]) + } + if i >= 10 && i < 20 { + marked := "-" + cli.StorageShort[i] + input = append(input, marked) + inputFlags = append(inputFlags, marked) + input = append(input, str[i-10]) + inputFlagsValue = append(inputFlagsValue, str[i-10]) + + } + if i >= 20 && i < 30 { + inputFlags = append(inputFlags, "-"+cli.StorageShort[i]) + marked := "-" + cli.StorageShort[i] + "=" + bo[i-20] + input = append(input, marked) + inputFlagsValue = append(inputFlagsValue, bo[i-20]) + } + } + + // test the default value + SetUpInputs([]string{""}, f) + + for i := 0; i < 30; i++ { + if i < 10 { + assert.Equal(t, -1, cli.StorageFlagInt[i], "Failed to get the default value") + } + if i >= 10 && i < 20 { + assert.Equal(t, "*", cli.StorageFlagString[i-10], "Failed to get the default value") + } + if i >= 20 && i < 30 { + assert.Equal(t, false, cli.StorageFlagBool[i-20], "Failed to get the default value") + } + } + + // test commandline parse value + SetUpInputs(input, f) + + for i := 0; i < 30; i++ { + if i < 10 { + assert.Equal(t, inputFlagsValue[i], strconv.Itoa(cli.StorageFlagInt[i]), "Failed to parse the value") + } + if i >= 10 && i < 20 { + assert.Equal(t, inputFlagsValue[i], cli.StorageFlagString[i-10], "Failed to parse the value") + } + if i >= 20 && i < 30 { + assert.Equal(t, inputFlagsValue[i], strconv.FormatBool(cli.StorageFlagBool[i-20]), "Failed to parse the value") + } + } + + // test flag usage + for i := 0; i < 30; i++ { + assert.Equal(t, cli.StorageUsage[i], f.fs.Lookup(cli.StorageShort[i]).Usage, "Failed to give the usage of the flag") + } + + // test the display of usage + s := f.Format() + for i := 0; i < 30; i++ { + assert.Equal(t, f.cliStrings[i], s[i+1], "Failed to match the format") + } +} diff --git a/daemon/conn/conn.go b/daemon/conn/conn.go new file mode 100644 index 0000000..ac7784e --- /dev/null +++ b/daemon/conn/conn.go @@ -0,0 +1,127 @@ +// Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and limitations under the License. + +package conn + +import ( + "crypto/tls" + "net/http" + "net/url" + "os" + "time" + "github.com/aws/aws-xray-daemon/daemon/cfg" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/sts" + log "github.com/cihub/seelog" + "golang.org/x/net/http2" +) + +// getNewHTTPClient returns new HTTP client instance with provided configuration. +func getNewHTTPClient(maxIdle int, requestTimeout int, noVerify bool, proxyAddress string) *http.Client { + tls := &tls.Config{ + InsecureSkipVerify: noVerify, + } + + finalProxyAddress := getProxyAddress(proxyAddress) + proxyURL := getProxyURL(finalProxyAddress) + transport := &http.Transport{ + MaxIdleConnsPerHost: maxIdle, + TLSClientConfig: tls, + Proxy: http.ProxyURL(proxyURL), + } + + // is not enabled by default as we configure TLSClientConfig for supporting SSL to data plane. + // http2.ConfigureTransport will setup transport layer to use HTTP2 + http2.ConfigureTransport(transport) + http := &http.Client{ + Transport: transport, + Timeout: time.Second * time.Duration(requestTimeout), + } + return http +} + +func getProxyAddress(proxyAddress string) string { + var finalProxyAddress string + if proxyAddress != "" { + finalProxyAddress = proxyAddress + } else if proxyAddress == "" && os.Getenv("HTTPS_PROXY") != "" { + finalProxyAddress = os.Getenv("HTTPS_PROXY") + } else { + finalProxyAddress = "" + } + return finalProxyAddress +} + +func getProxyURL(finalProxyAddress string) *url.URL { + var proxyURL *url.URL + var err error + if finalProxyAddress != "" { + proxyURL, err = url.Parse(finalProxyAddress) + if err != nil { + log.Errorf("Bad proxy URL: %v", err) + os.Exit(1) + } + } else { + proxyURL = nil + } + return proxyURL +} + +// GetAWSConfigSession returns AWS config and session instances. +func GetAWSConfigSession(c *cfg.Config, roleArn string, region string, noMetadata bool) (*aws.Config, *session.Session) { + http := getNewHTTPClient(cfg.ParameterConfigValue.Processor.MaxIdleConnPerHost, cfg.ParameterConfigValue.Processor.RequestTimeout, c.NoVerifySSL, c.ProxyAddress) + var s *session.Session + s = newAWSSession(s, roleArn) + var awsRegion string + regionEnv := os.Getenv("AWS_REGION") + if region == "" && regionEnv != "" { + awsRegion = regionEnv + log.Debugf("Fetch region %v from environment variables", awsRegion) + } else if region != "" { + awsRegion = region + log.Debugf("Fetch region %v from commandline argument", awsRegion) + } else if noMetadata != true { + s, _ = session.NewSession() + region, err := ec2metadata.New(s).Region() + log.Debugf("Fetch region %v from ec2 metadata", region) + if err != nil { + log.Errorf("Unable to retrieve the region from the EC2 instance %v\n", err) + } + awsRegion = region + } + if awsRegion == "" { + log.Error("Cannot fetch region variable from config file, environment variables and ec2 metadata.") + os.Exit(1) + } + config := &aws.Config{ + Region: aws.String(awsRegion), + DisableParamValidation: aws.Bool(true), + MaxRetries: aws.Int(2), + Endpoint: aws.String(c.Endpoint), + HTTPClient: http, + } + return config, s +} + +func newAWSSession(s *session.Session, roleArn string) *session.Session { + if roleArn == "" { + s, _ = session.NewSession() + } else { + t, _ := session.NewSession() + sts := stscreds.NewCredentialsWithClient(sts.New(t), roleArn) + s, _ = session.NewSession(&aws.Config{ + Credentials: sts, + }) + } + return s +} diff --git a/daemon/conn/conn_test.go b/daemon/conn/conn_test.go new file mode 100644 index 0000000..41595fc --- /dev/null +++ b/daemon/conn/conn_test.go @@ -0,0 +1,127 @@ +// Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and limitations under the License. + +package conn + +import ( + "os" + "os/exec" + "strings" + "testing" + + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/stretchr/testify/assert" +) + +func TestLoadEnvConfigCreds(t *testing.T) { + env := stashEnv() + defer popEnv(env) + + cases := struct { + Env map[string]string + Val credentials.Value + }{ + Env: map[string]string{ + "AWS_ACCESS_KEY": "AKID", + "AWS_SECRET_KEY": "SECRET", + "AWS_SESSION_TOKEN": "TOKEN", + }, + Val: credentials.Value{ + AccessKeyID: "AKID", SecretAccessKey: "SECRET", SessionToken: "TOKEN", + ProviderName: "EnvConfigCredentials", + }, + } + + for k, v := range cases.Env { + os.Setenv(k, v) + } + var s *session.Session + cfg := newAWSSession(s, "") + value, err := cfg.Config.Credentials.Get() + + assert.Nil(t, err, "Expect no error") + assert.Equal(t, cases.Val, value, "Expect the credentials value to match") + + cfgA := newAWSSession(s, "ROLEARN") + valueA, _ := cfgA.Config.Credentials.Get() + + assert.Equal(t, "", valueA.AccessKeyID, "Expect the value to be empty") + assert.Equal(t, "", valueA.SecretAccessKey, "Expect the value to be empty") + assert.Equal(t, "", valueA.SessionToken, "Expect the value to be empty") + assert.Equal(t, "", valueA.ProviderName, "Expect the value to be empty") +} + +func TestGetProxyUrlProxyAddressNotValid(t *testing.T) { + errorAddress := [3]string{"http://[%10::1]", "http://%41:8080/", "http://a b.com/"} + for _, address := range errorAddress { + // Only run the failing part when a specific env variable is set + if os.Getenv("Test_PROXY_URL") == "1" { + getProxyURL(address) + return + } + // Start the actual test in a different subprocess + cmd := exec.Command(os.Args[0], "-test.run=TestGetProxyUrlProxyAddressNotValid") + cmd.Env = append(os.Environ(), "Test_PROXY_URL=1") + if err := cmd.Start(); err != nil { + t.Fatal(err) + } + // Check that the program exited + err := cmd.Wait() + if e, ok := err.(*exec.ExitError); !ok || e.Success() { + t.Fatalf("Process ran with err %v, want exit status 1", err) + } + } +} + +func TestGetProxyAddressFromEnvVariable(t *testing.T) { + env := stashEnv() + defer popEnv(env) + os.Setenv("HTTPS_PROXY", "https://127.0.0.1:8888") + + assert.Equal(t, os.Getenv("HTTPS_PROXY"), getProxyAddress(""), "Expect function return value should be same with Environment value") +} + +func TestGetProxyAddressFromConfigFile(t *testing.T) { + env := stashEnv() + defer popEnv(env) + + assert.Equal(t, "https://127.0.0.1:8888", getProxyAddress("https://127.0.0.1:8888"), "Expect function return value should be same with input value") +} + +func TestGetProxyAddressWhenNotExist(t *testing.T) { + env := stashEnv() + defer popEnv(env) + + assert.Equal(t, "", getProxyAddress(""), "Expect function return value to be empty") +} + +func TestGetProxyAddressPriority(t *testing.T) { + env := stashEnv() + defer popEnv(env) + os.Setenv("HTTPS_PROXY", "https://127.0.0.1:8888") + + assert.Equal(t, "https://127.0.0.1:9999", getProxyAddress("https://127.0.0.1:9999"), "Expect function return value to be same with input") +} + +func stashEnv() []string { + env := os.Environ() + os.Clearenv() + + return env +} + +func popEnv(env []string) { + os.Clearenv() + + for _, e := range env { + p := strings.SplitN(e, "=", 2) + os.Setenv(p[0], p[1]) + } +} diff --git a/daemon/conn/xray_client.go b/daemon/conn/xray_client.go new file mode 100644 index 0000000..855e461 --- /dev/null +++ b/daemon/conn/xray_client.go @@ -0,0 +1,82 @@ +// Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and limitations under the License. + +package conn + +import ( + "os" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/xray" + log "github.com/cihub/seelog" +) + +// Version number of the X-Ray daemon. +var versionNumber = "2.1.0" + +// XRay defines X-Ray api call structure. +type XRay interface { + PutTraceSegments(input *xray.PutTraceSegmentsInput) (*xray.PutTraceSegmentsOutput, error) + PutTelemetryRecords(input *xray.PutTelemetryRecordsInput) (*xray.PutTelemetryRecordsOutput, error) +} + +// XRayClient represents X-Ray client. +type XRayClient struct { + xRay *xray.XRay +} + +// GetVersionNumber returns version number of X-Ray daemon. +func GetVersionNumber() string { + return versionNumber +} + +// PutTraceSegments makes PutTraceSegments api call on X-Ray client. +func (c XRayClient) PutTraceSegments(input *xray.PutTraceSegmentsInput) (*xray.PutTraceSegmentsOutput, error) { + return c.xRay.PutTraceSegments(input) +} + +// PutTelemetryRecords makes PutTelemetryRecords api call on X-Ray client. +func (c XRayClient) PutTelemetryRecords(input *xray.PutTelemetryRecordsInput) (*xray.PutTelemetryRecordsOutput, error) { + return c.xRay.PutTelemetryRecords(input) +} + +// NewXRay creates a new instance of the XRay client with a aws configuration and session . +func NewXRay(awsConfig *aws.Config, s *session.Session) XRay { + return requestXray(awsConfig, s) +} + +// IsTimeoutError checks whether error is timeout error. +func IsTimeoutError(err error) bool { + awsError, ok := err.(awserr.Error) + if ok { + if strings.Contains(awsError.Error(), "net/http: request canceled") { + return true + } + } + return false +} + +func requestXray(awsConfig *aws.Config, s *session.Session) XRay { + x := xray.New(s, awsConfig) + log.Debugf("Using Endpoint: %s", x.Endpoint) + var XRayVersionUserAgentHandler = request.NamedHandler{ + Name: "tracing.XRayVersionUserAgentHandler", + Fn: request.MakeAddToUserAgentHandler("xray", GetVersionNumber(), os.Getenv("AWS_EXECUTION_ENV")), + } + x.Handlers.Build.PushBackNamed(XRayVersionUserAgentHandler) + + xRay := XRayClient{ + xRay: x, + } + return xRay +} diff --git a/daemon/daemon.go b/daemon/daemon.go new file mode 100644 index 0000000..9c94d54 --- /dev/null +++ b/daemon/daemon.go @@ -0,0 +1,336 @@ +// Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and limitations under the License. + +package main + +import ( + "encoding/json" + "fmt" + "io" + "math" + "net" + "os" + "runtime/pprof" + "sync/atomic" + "time" + "github.com/aws/aws-xray-daemon/daemon/bufferpool" + "github.com/aws/aws-xray-daemon/daemon/cfg" + "github.com/aws/aws-xray-daemon/daemon/cli" + "github.com/aws/aws-xray-daemon/daemon/conn" + "github.com/aws/aws-xray-daemon/daemon/logger" + "github.com/aws/aws-xray-daemon/daemon/processor" + "github.com/aws/aws-xray-daemon/daemon/profiler" + "github.com/aws/aws-xray-daemon/daemon/ringbuffer" + "github.com/aws/aws-xray-daemon/daemon/socketconn" + "github.com/aws/aws-xray-daemon/daemon/socketconn/udp" + "github.com/aws/aws-xray-daemon/daemon/telemetry" + "github.com/aws/aws-xray-daemon/daemon/tracesegment" + "github.com/aws/aws-xray-daemon/daemon/util" + + "github.com/aws/aws-sdk-go/aws" + log "github.com/cihub/seelog" + "github.com/shirou/gopsutil/mem" +) + +var receiverCount int +var processorCount int +var config *cfg.Config + +const protocolSeparator = "\n" + +// Log Rotation Size is 50 MB +const logRotationSize int64 = 50 * 1024 * 1024 + +var udpAddress string +var stdFlag int +var socketConnection string +var cpuProfile string +var memProfile string +var roleArn string +var receiveBufferSize int +var daemonProcessBufferMemoryMB int +var logFile string +var configFilePath string +var resourceARN string +var noMetadata bool +var version bool +var logLevel string +var regionFlag string + +// Daemon reads trace segments from X-Ray daemon address and +// send to X-Ray service. +type Daemon struct { + // Boolean channel, set to true if error is received reading from Socket. + done chan bool + + // Ring buffer, used to stored segments received. + std *ringbuffer.RingBuffer + + // Counter for segments read by daemon. + count uint64 + + // Instance of socket connection. + sock socketconn.SocketConn + + // Reference to buffer pool. + pool *bufferpool.BufferPool + + // Reference to Processor. + processor *processor.Processor +} + +func init() { + f, c := initCli("") + f.ParseFlags() + cfg.LogFile = logFile // storing log file passed through command line + // if config file is passed using command line argument parse flags again with default equal to config file + if configFilePath != "" { + cfg.ConfigValidation(configFilePath) + f, c = initCli(configFilePath) + f.ParseFlags() + } + if version { + fmt.Printf("AWS X-Ray daemon version: %v\n", conn.GetVersionNumber()) + os.Exit(0) + } + config = c +} + +func initCli(configFile string) (*cli.Flag, *cfg.Config) { + flag := cli.NewFlag("X-Ray Daemon") + cnfg := cfg.LoadConfig(configFile) + processorCount = cnfg.Concurrency + var ( + defaultDaemonProcessSpaceLimitMB = cnfg.TotalBufferSizeMB + defaultLogPath = cnfg.Logging.LogPath + defaultLogLevel = cnfg.Logging.LogLevel + defaultUDPAddress = cnfg.Socket.UDPAddress + defaultRoleARN = cnfg.RoleARN + defaultLocalMode = cnfg.LocalMode + defaultRegion = cnfg.Region + defaultResourceARN = cnfg.ResourceARN + ) + socketConnection = "UDP" + regionFlag = defaultRegion + flag.StringVarF(&resourceARN, "resource-arn", "a", defaultResourceARN, "Amazon Resource Name (ARN) of the AWS resource running the daemon.") + flag.BoolVarF(&noMetadata, "local-mode", "o", defaultLocalMode, "Don't check for EC2 instance metadata.") + flag.IntVarF(&daemonProcessBufferMemoryMB, "buffer-memory", "m", defaultDaemonProcessSpaceLimitMB, "Change the amount of memory in MB that buffers can use (minimum 3).") + flag.StringVarF(®ionFlag, "region", "n", defaultRegion, "Send segments to X-Ray service in a specific region.") + flag.StringVarF(&udpAddress, "bind", "b", defaultUDPAddress, "Overrides default UDP address (127.0.0.1:2000).") + flag.StringVarF(&roleArn, "role-arn", "r", defaultRoleARN, "Assume the specified IAM role to upload segments to a different account.") + flag.StringVarF(&configFilePath, "config", "c", "", "Load a configuration file from the specified path.") + flag.StringVarF(&logFile, "log-file", "f", defaultLogPath, "Output logs to the specified file path.") + flag.StringVarF(&logLevel, "log-level", "l", defaultLogLevel, "Log level, from most verbose to least: dev, debug, info, warn, error, prod (default).") + flag.BoolVarF(&version, "version", "v", false, "Show AWS X-Ray daemon version.") + return flag, cnfg +} + +func initDaemon(config *cfg.Config) *Daemon { + if logFile != "" { + var fileWriter io.Writer + if config.Logging.LogRotation { + // Empty Archive path as code does not archive logs + apath := "" + maxSize := logRotationSize + // Keep one rolled over log file around + maxRolls := 1 + archiveExplode := false + fileWriter, _ = log.NewRollingFileWriterSize(logFile, 0, apath, maxSize, maxRolls, 0, archiveExplode) + } else { + fileWriter, _ = log.NewFileWriter(logFile) + } + logger.LoadLogConfig(fileWriter, config, logLevel) + } else { + newWriter, _ := log.NewConsoleWriter() + logger.LoadLogConfig(newWriter, config, logLevel) + } + defer log.Flush() + + log.Infof("Initializing AWS X-Ray daemon %v", conn.GetVersionNumber()) + + parameterConfig := cfg.ParameterConfigValue + receiverCount = parameterConfig.ReceiverRoutines + stdFlag = parameterConfig.SegmentChannel.Std + receiveBufferSize = parameterConfig.Socket.BufferSizeKB * 1024 + cpuProfile = os.Getenv("XRAY_DAEMON_CPU_PROFILE") + memProfile = os.Getenv("XRAY_DAEMON_MEMORY_PROFILE") + + profiler.EnableCPUProfile(&cpuProfile) + defer pprof.StopCPUProfile() + + var sock socketconn.SocketConn + + sock = udp.New(receiveBufferSize, udpAddress) + + memoryLimit := evaluateBufferMemory(daemonProcessBufferMemoryMB) + log.Infof("Using buffer memory limit of %v MB", memoryLimit) + bufferLimit, err := bufferpool.GetPoolBufferCount(memoryLimit, receiveBufferSize) + if err != nil { + log.Errorf("%v", err) + os.Exit(1) + } + log.Infof("%v segment buffers allocated", bufferLimit) + bufferPool := bufferpool.Init(bufferLimit, receiveBufferSize) + std := ringbuffer.New(stdFlag, bufferPool) + if config.Endpoint != "" { + log.Debugf("Using Endpoint read from Config file: %s", config.Endpoint) + } + awsConfig, session := conn.GetAWSConfigSession(config, roleArn, regionFlag, noMetadata) + log.Infof("Using region: %v", aws.StringValue(awsConfig.Region)) + + log.Debugf("ARN of the AWS resource running the daemon: %v", resourceARN) + telemetry.Init(awsConfig, session, resourceARN, noMetadata) + + // If calculated number of buffer is lower than our default, use calculated one. Otherwise, use default value. + parameterConfig.Processor.BatchSize = util.GetMinIntValue(parameterConfig.Processor.BatchSize, bufferLimit) + + daemon := &Daemon{ + done: make(chan bool), + std: std, + pool: bufferPool, + count: 0, + sock: sock, + processor: processor.New(awsConfig, session, processorCount, std, bufferPool, parameterConfig), + } + + return daemon +} + +func runDaemon(daemon *Daemon) { + for i := 0; i < receiverCount; i++ { + go daemon.poll() + } +} + +func (d *Daemon) close() { + for i := 0; i < receiverCount; i++ { + <-d.done + } + // Signal routines to finish + // This will push telemetry and customer segments in parallel + d.std.Close() + telemetry.T.Quit <- true + + <-d.processor.Done + <-telemetry.T.Done + + profiler.MemSnapShot(&memProfile) + log.Debugf("Trace segment: received: %d, truncated: %d, processed: %d", atomic.LoadUint64(&d.count), d.std.TruncatedCount(), d.processor.ProcessedCount()) + log.Debugf("Shutdown finished. Current epoch in nanoseconds: %v", time.Now().UnixNano()) +} + +func (d *Daemon) stop() { + d.sock.Close() +} + +// Returns number of bytes read from socket connection. +func (d *Daemon) read(buf *[]byte) int { + bufVal := *buf + rlen, err := d.sock.Read(bufVal) + switch err := err.(type) { + case net.Error: + if !err.Temporary() { + d.done <- true + return -1 + } + log.Errorf("daemon: net: err: %v", err) + return 0 + case error: + log.Errorf("daemon: socket: err: %v", err) + return 0 + } + return rlen +} + +func (d *Daemon) poll() { + separator := []byte(protocolSeparator) + fallBackBuffer := make([]byte, receiveBufferSize) + splitBuf := make([][]byte, 2) + + for { + bufPointer := d.pool.Get() + fallbackPointerUsed := false + if bufPointer == nil { + log.Debug("Pool does not have any buffer.") + bufPointer = &fallBackBuffer + fallbackPointerUsed = true + } + rlen := d.read(bufPointer) + if rlen > 0 { + telemetry.T.SegmentReceived(1) + } + if rlen == 0 { + if !fallbackPointerUsed { + d.pool.Return(bufPointer) + } + continue + } + if fallbackPointerUsed { + log.Warn("Segment dropped. Consider increasing memory limit") + telemetry.T.SegmentSpillover(1) + continue + } else if rlen == -1 { + return + } + + buf := *bufPointer + bufMessage := buf[0:rlen] + + slices := util.SplitHeaderBody(&bufMessage, &separator, &splitBuf) + if len(slices[1]) == 0 { + log.Warnf("Missing header or segment: %s", string(slices[0])) + d.pool.Return(bufPointer) + telemetry.T.SegmentRejected(1) + continue + } + + header := slices[0] + payload := slices[1] + headerInfo := tracesegment.Header{} + json.Unmarshal(header, &headerInfo) + + switch headerInfo.IsValid() { + case true: + default: + log.Warnf("Invalid header: %s", string(header)) + d.pool.Return(bufPointer) + telemetry.T.SegmentRejected(1) + continue + } + + ts := &tracesegment.TraceSegment{ + Raw: &payload, + PoolBuf: bufPointer, + } + + atomic.AddUint64(&d.count, 1) + d.std.Send(ts) + } +} + +func evaluateBufferMemory(cliBufferMemory int) int { + var bufferMemoryMB int + if cliBufferMemory > 0 { + bufferMemoryMB = cliBufferMemory + } else { + vm, err := mem.VirtualMemory() + if err != nil { + log.Errorf("%v", err) + os.Exit(1) + } + bufferMemoryLimitPercentageOfTotal := 0.01 + totalBytes := vm.Total + bufferMemoryMB = int(math.Floor(bufferMemoryLimitPercentageOfTotal * float64(totalBytes) / float64(1024*1024))) + } + if bufferMemoryMB < 3 { + log.Error("Not enough Buffers Memory Allocated. Min Buffers Memory required: 3 MB.") + os.Exit(1) + } + return bufferMemoryMB +} diff --git a/daemon/logger/log_config.go b/daemon/logger/log_config.go new file mode 100644 index 0000000..da70175 --- /dev/null +++ b/daemon/logger/log_config.go @@ -0,0 +1,57 @@ +// Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and limitations under the License. + +package logger + +import ( + "io" + "github.com/aws/aws-xray-daemon/daemon/cfg" + + log "github.com/cihub/seelog" +) + +// LoadLogConfig configures Logger. +func LoadLogConfig(writer io.Writer, c *cfg.Config, loglevel string) { + var level log.LogLevel + + switch c.Logging.LogLevel { + case "dev": + level = log.TraceLvl + case "debug": + level = log.DebugLvl + case "info": + level = log.InfoLvl + case "warn": + level = log.WarnLvl + case "error": + level = log.ErrorLvl + case "prod": + level = log.InfoLvl + } + + if loglevel != c.Logging.LogLevel { + switch loglevel { + case "dev": + level = log.TraceLvl + case "debug": + level = log.DebugLvl + case "info": + level = log.InfoLvl + case "warn": + level = log.WarnLvl + case "error": + level = log.ErrorLvl + case "prod": + level = log.InfoLvl + } + } + + logger, _ := log.LoggerFromWriterWithMinLevelAndFormat(writer, level, cfg.LogFormat) + log.ReplaceLogger(logger) +} diff --git a/daemon/logger/logger_test.go b/daemon/logger/logger_test.go new file mode 100644 index 0000000..fd2a74d --- /dev/null +++ b/daemon/logger/logger_test.go @@ -0,0 +1,147 @@ +// Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and limitations under the License. + +package logger + +import ( + "bytes" + "fmt" + "testing" + "time" + "github.com/aws/aws-xray-daemon/daemon/cfg" + + "github.com/cihub/seelog" + "github.com/stretchr/testify/assert" +) + +type TestCase struct { + Level seelog.LogLevel + Message string + Params []interface{} + Output string +} + +func generateTestCase(t *testing.T, level seelog.LogLevel, formatID string, message string, params ...interface{}) TestCase { + testCase := TestCase{ + Level: level, + Message: message, + Params: params, + } + var levelStr string + switch level { + case seelog.ErrorLvl: + levelStr = "Error" + + case seelog.InfoLvl: + levelStr = "Info" + + case seelog.DebugLvl: + levelStr = "Debug" + + case seelog.WarnLvl: + levelStr = "Warn" + + case seelog.TraceLvl: + levelStr = "Trace" + + case seelog.CriticalLvl: + levelStr = "Critical" + + default: + assert.Fail(t, "Unexpected log level", level) + } + + msg := fmt.Sprintf(testCase.Message, testCase.Params...) + testCase.Output = fmt.Sprintf("%s [%v] %v\n", time.Now().Format(formatID), levelStr, msg) + return testCase +} + +func TestLogger(t *testing.T) { + var testCases []TestCase + + formatID := "2006-01-02T15:04:05Z07:00" + for _, logLevel := range []seelog.LogLevel{seelog.DebugLvl, seelog.InfoLvl, seelog.ErrorLvl, seelog.WarnLvl, seelog.TraceLvl, seelog.CriticalLvl} { + testCases = append(testCases, generateTestCase(t, logLevel, formatID, "(some message without parameters)")) + testCases = append(testCases, generateTestCase(t, logLevel, formatID, "(some message with %v as param)", []interface{}{"|a param|"})) + } + + for _, testCase := range testCases { + testLogger(t, testCase) + } +} + +func testLogger(t *testing.T, testCase TestCase) { + // create seelog logger that outputs to buffer + var out bytes.Buffer + config := &cfg.Config{ + Logging: struct { + LogRotation bool `yaml:"LogRotation"` + LogLevel string `yaml:"LogLevel"` + LogPath string `yaml:"LogPath"` + }{ + LogRotation: true, + LogLevel: "dev", + LogPath: "/var/tmp/xray.log", + }, + } + // call loadlogconfig method under test + loglevel := "dev" + LoadLogConfig(&out, config, loglevel) + // exercise logger + switch testCase.Level { + case seelog.ErrorLvl: + if len(testCase.Params) > 0 { + seelog.Errorf(testCase.Message, testCase.Params...) + } else { + seelog.Error(testCase.Message) + } + + case seelog.InfoLvl: + if len(testCase.Params) > 0 { + seelog.Infof(testCase.Message, testCase.Params...) + } else { + seelog.Info(testCase.Message) + } + + case seelog.DebugLvl: + if len(testCase.Params) > 0 { + seelog.Debugf(testCase.Message, testCase.Params...) + } else { + seelog.Debug(testCase.Message) + } + + case seelog.WarnLvl: + if len(testCase.Params) > 0 { + seelog.Warnf(testCase.Message, testCase.Params...) + } else { + seelog.Warn(testCase.Message) + } + + case seelog.TraceLvl: + if len(testCase.Params) > 0 { + seelog.Tracef(testCase.Message, testCase.Params...) + } else { + seelog.Trace(testCase.Message) + } + + case seelog.CriticalLvl: + if len(testCase.Params) > 0 { + seelog.Criticalf(testCase.Message, testCase.Params...) + } else { + seelog.Critical(testCase.Message) + } + + default: + assert.Fail(t, "Unexpected log level", testCase.Level) + } + seelog.Flush() + + // check result + assert.Equal(t, testCase.Output, out.String()) +} diff --git a/daemon/processor/batchprocessor.go b/daemon/processor/batchprocessor.go new file mode 100644 index 0000000..2ebc2f2 --- /dev/null +++ b/daemon/processor/batchprocessor.go @@ -0,0 +1,143 @@ +// Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and limitations under the License. + +package processor + +import ( + "math" + "math/rand" + "time" + "github.com/aws/aws-xray-daemon/daemon/conn" + "github.com/aws/aws-xray-daemon/daemon/telemetry" + "github.com/aws/aws-xray-daemon/daemon/util/timer" + + "github.com/aws/aws-sdk-go/service/xray" + log "github.com/cihub/seelog" +) + +const ( + backoffCapSeconds = 30 + backoffMinAttempts = 10 + backoffBaseSeconds = 1 +) + +// Structure for trace segments batch. +type segmentsBatch struct { + // Boolean channel set to true when processing the batch segments is done. + done chan bool + + // String slice of trace segments. + batches chan []*string + + // Instance of XRay, used to send data to X-Ray service. + xRay conn.XRay + + // Random generator, used for back off logic in case of exceptions. + randGen *rand.Rand + + // Instance of timer. + timer timer.Timer +} + +func (s *segmentsBatch) send(batch []*string) { + select { + case s.batches <- batch: + + default: + select { + case batchTruncated := <-s.batches: + telemetry.T.SegmentSpillover(int64(len(batchTruncated))) + log.Warnf("Spilling over %v segments", len(batchTruncated)) + + default: + log.Debug("Segment batch: channel is de-queued") + } + log.Debug("Segment batch: retrying batch") + s.send(batch) + } +} + +func (s *segmentsBatch) poll() { + failedAttempt := 0 + for { + batch, ok := <-s.batches + if ok { + params := &xray.PutTraceSegmentsInput{ + TraceSegmentDocuments: batch, + } + start := time.Now() + // send segment to X-Ray service. + r, err := s.xRay.PutTraceSegments(params) + if err != nil { + telemetry.EvaluateConnectionError(err) + failedAttempt++ + backOffSeconds := s.backOff(failedAttempt) + log.Errorf("Sending segment batch failed with: %v", err) + log.Warnf("Delaying sending of additional batches by %v seconds", backOffSeconds) + if backOffSeconds > 0 { + <-s.timer.After(time.Second * time.Duration(backOffSeconds)) + } + continue + } else { + failedAttempt = 0 + telemetry.T.SegmentSent(int64(len(batch))) + } + elapsed := time.Since(start) + log.Infof("Successfully sent batch of %d segments (%1.3f seconds)", len(batch), elapsed.Seconds()) + for _, unprocessedSegment := range r.UnprocessedTraceSegments { + telemetry.T.SegmentRejected(1) + log.Errorf("Unprocessed segment: %v", unprocessedSegment) + log.Warn("Batch that contains unprocessed segments") + for i := 0; i < len(batch); i++ { + log.Warn(*batch[i]) + } + } + } else { + log.Debug("Segment batch: done!") + s.done <- true + break + } + } +} + +func (s *segmentsBatch) close() { + close(s.batches) +} + +func min(x, y int32) int32 { + if x < y { + return x + } + return y +} + +// Returns int32 number for Full Jitter Base +// If the computation result in value greater than Max Int31 it returns MAX Int31 value +func getValidJitterBase(backoffBase, attempt int) int32 { + base := float64(backoffBase) * math.Pow(2, float64(attempt)) + var baseInt int32 + if base > float64(math.MaxInt32/2) { + baseInt = math.MaxInt32 / 2 + } else { + baseInt = int32(base) + } + return baseInt +} + +func (s *segmentsBatch) backOff(attempt int) int32 { + if attempt <= backoffMinAttempts { + return 0 + } + // Attempts to be considered for Jitter Backoff + backoffAttempts := attempt - backoffMinAttempts + // As per Full Jitter described in https://www.awsarchitectureblog.com/2015/03/backoff.html + base := getValidJitterBase(backoffBaseSeconds, backoffAttempts) + randomBackoff := s.randGen.Int31n(base) + return min(backoffCapSeconds, randomBackoff) +} diff --git a/daemon/processor/batchprocessor_test.go b/daemon/processor/batchprocessor_test.go new file mode 100644 index 0000000..4123a94 --- /dev/null +++ b/daemon/processor/batchprocessor_test.go @@ -0,0 +1,388 @@ +// Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and limitations under the License. + +package processor + +import ( + "errors" + "fmt" + "math/rand" + "strings" + "testing" + "time" + "github.com/aws/aws-xray-daemon/daemon/util/test" + + "github.com/aws/aws-sdk-go/service/xray" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +var doneMsg = "Segment batch: done!" + +type MockXRayClient struct { + mock.Mock + CallNoToPutTraceSegments int + input *xray.PutTraceSegmentsInput +} + +func (c *MockXRayClient) PutTraceSegments(input *xray.PutTraceSegmentsInput) (*xray.PutTraceSegmentsOutput, error) { + c.input = input + c.CallNoToPutTraceSegments++ + args := c.Called(nil) + errorStr := args.String(0) + var err error + output := &xray.PutTraceSegmentsOutput{} + if errorStr == "Send unprocessed" { + segmentID := "Test-Segment-Id-1242113" + output.UnprocessedTraceSegments = append(output.UnprocessedTraceSegments, &xray.UnprocessedTraceSegment{Id: &segmentID}) + } else if errorStr != "" { + err = errors.New(errorStr) + } + return output, err +} + +func (c *MockXRayClient) PutTelemetryRecords(input *xray.PutTelemetryRecordsInput) (*xray.PutTelemetryRecordsOutput, error) { + return nil, nil +} + +func TestSendOneBatch(t *testing.T) { + s := segmentsBatch{ + batches: make(chan []*string, 1), + } + testMessage := "Test Message" + batch := []*string{&testMessage} + + s.send(batch) + + returnedBatch := <-s.batches + assert.EqualValues(t, len(returnedBatch), 1) + + batchString := *returnedBatch[0] + assert.EqualValues(t, batchString, testMessage) +} + +func TestSendBatchChannelTruncate(t *testing.T) { + log := test.LogSetup() + s := segmentsBatch{ + batches: make(chan []*string, 1), + } + testMessage := "Test Message" + batch := []*string{&testMessage} + testMessage2 := "Test Message 2" + batch2 := []*string{&testMessage2} + + s.send(batch) + s.send(batch2) + + returnedBatch := <-s.batches + + assert.EqualValues(t, len(returnedBatch), 1) + assert.EqualValues(t, *returnedBatch[0], testMessage2) + assert.True(t, strings.Contains(log.Logs[0], "Spilling over")) + assert.True(t, strings.Contains(log.Logs[1], "retrying batch")) +} + +func TestPollSendSuccess(t *testing.T) { + log := test.LogSetup() + xRay := new(MockXRayClient) + xRay.On("PutTraceSegments", nil).Return("").Once() + s := segmentsBatch{ + batches: make(chan []*string, 1), + xRay: xRay, + done: make(chan bool), + } + testMessage := "Test Message" + batch := []*string{&testMessage} + s.send(batch) + + go s.poll() + close(s.batches) + <-s.done + + assert.EqualValues(t, xRay.CallNoToPutTraceSegments, 1) + assert.True(t, strings.Contains(log.Logs[0], fmt.Sprintf("Successfully sent batch of %v", 1))) + assert.True(t, strings.Contains(log.Logs[1], doneMsg)) +} + +func TestPoolSendFailedOnceMoreThanMin(t *testing.T) { + seed := int64(122321) + randGen := rand.New(rand.NewSource(seed)) + timer := test.MockTimerClient{} + log := test.LogSetup() + xRay := new(MockXRayClient) + xRay.On("PutTraceSegments", nil).Return("Error") + s := segmentsBatch{ + batches: make(chan []*string, 1), + xRay: xRay, + done: make(chan bool), + randGen: rand.New(rand.NewSource(seed)), + timer: &timer, + } + testMessage := "Test Message" + batch := []*string{&testMessage} + // First failure + backoff := randGen.Int31n(backoffBaseSeconds * 2) + + go s.poll() + for i := 0; i < backoffMinAttempts; i++ { + s.send(batch) + timer.IncrementDuration(time.Second) + time.Sleep(time.Millisecond) + } + s.send(batch) + close(s.batches) + + time.Sleep(time.Millisecond) + timer.IncrementDuration(time.Second * time.Duration(backoff)) + + assert.EqualValues(t, xRay.CallNoToPutTraceSegments, backoffMinAttempts+1) + // Backed off only once after min failed attempts are exhausted + assert.EqualValues(t, 1, timer.AfterCalledTimes()) + + <-s.done + + assert.True(t, strings.Contains(log.Logs[len(log.Logs)-1], doneMsg)) + timer.Dispose() +} + +func TestPoolSendFailedTwiceMoreThanMin(t *testing.T) { + seed := int64(122321) + randGen := rand.New(rand.NewSource(seed)) + timer := test.MockTimerClient{} + log := test.LogSetup() + xRay := new(MockXRayClient) + xRay.On("PutTraceSegments", nil).Return("Error") + s := segmentsBatch{ + batches: make(chan []*string, 1), + xRay: xRay, + done: make(chan bool), + randGen: rand.New(rand.NewSource(seed)), + timer: &timer, + } + testMessage := "Test Message" + batch := []*string{&testMessage} + // First failure + backoff := randGen.Int31n(backoffBaseSeconds * 2) + + go s.poll() + for i := 0; i < backoffMinAttempts; i++ { + s.send(batch) + timer.IncrementDuration(time.Second) + time.Sleep(time.Millisecond) + } + s.send(batch) + + time.Sleep(time.Millisecond) + timer.IncrementDuration(time.Second * time.Duration(backoff)) + + assert.EqualValues(t, xRay.CallNoToPutTraceSegments, backoffMinAttempts+1) + assert.EqualValues(t, 1, timer.AfterCalledTimes()) + + backoff2 := randGen.Int31n(backoffBaseSeconds * 4) + + s.send(batch) + + time.Sleep(time.Millisecond) + timer.IncrementDuration(time.Second * time.Duration(backoff2)) + + assert.EqualValues(t, xRay.CallNoToPutTraceSegments, backoffMinAttempts+2) + assert.EqualValues(t, 2, timer.AfterCalledTimes()) + + close(s.batches) + <-s.done + assert.True(t, strings.Contains(log.Logs[len(log.Logs)-1], doneMsg)) + timer.Dispose() +} + +func TestPoolSendFailedTwiceAndSucceedThird(t *testing.T) { + seed := int64(122321) + randGen := rand.New(rand.NewSource(seed)) + timer := test.MockTimerClient{} + log := test.LogSetup() + xRay := new(MockXRayClient) + xRay.On("PutTraceSegments", nil).Return("Error").Times(backoffMinAttempts + 2) + xRay.On("PutTraceSegments", nil).Return("").Once() + + s := segmentsBatch{ + batches: make(chan []*string, 1), + xRay: xRay, + done: make(chan bool), + randGen: rand.New(rand.NewSource(seed)), + timer: &timer, + } + testMessage := "Test Message" + batch := []*string{&testMessage} + + // First failure. + backoff := randGen.Int31n(backoffBaseSeconds * 2) + + go s.poll() + for i := 0; i < backoffMinAttempts; i++ { + s.send(batch) + timer.IncrementDuration(time.Second) + time.Sleep(time.Millisecond) + } + s.send(batch) + + time.Sleep(time.Millisecond) + timer.IncrementDuration(time.Second * time.Duration(backoff)) + + assert.EqualValues(t, xRay.CallNoToPutTraceSegments, backoffMinAttempts+1) + assert.EqualValues(t, 1, timer.AfterCalledTimes()) + + // Second failure. + backoff2 := randGen.Int31n(backoffBaseSeconds * 4) + + s.send(batch) + + time.Sleep(time.Millisecond) + timer.IncrementDuration(time.Second * time.Duration(backoff2)) + + assert.EqualValues(t, xRay.CallNoToPutTraceSegments, backoffMinAttempts+2) + assert.EqualValues(t, 2, timer.AfterCalledTimes()) + + // Third success. + s.send(batch) + + time.Sleep(time.Millisecond) + timer.IncrementDuration(time.Second) + + assert.EqualValues(t, xRay.CallNoToPutTraceSegments, backoffMinAttempts+3) + assert.EqualValues(t, 2, timer.AfterCalledTimes()) // no backoff logic triggered. + + close(s.batches) + <-s.done + + assert.True(t, strings.Contains(log.Logs[len(log.Logs)-2], fmt.Sprintf("Successfully sent batch of %v", 1))) + assert.True(t, strings.Contains(log.Logs[len(log.Logs)-1], doneMsg)) + timer.Dispose() +} + +func TestPutTraceSegmentsParameters(t *testing.T) { + log := test.LogSetup() + xRay := new(MockXRayClient) + xRay.On("PutTraceSegments", nil).Return("").Once() + + s := segmentsBatch{ + batches: make(chan []*string, 1), + xRay: xRay, + done: make(chan bool), + } + testMessage := "Test Message" + batch := []*string{&testMessage} + s.send(batch) + + go s.poll() + + close(s.batches) + <-s.done + actualInput := xRay.input + + expectedInput := &xray.PutTraceSegmentsInput{ + TraceSegmentDocuments: batch, + } + + assert.EqualValues(t, actualInput, expectedInput) + assert.EqualValues(t, xRay.CallNoToPutTraceSegments, 1) + assert.True(t, strings.Contains(log.Logs[0], fmt.Sprintf("Successfully sent batch of %v", 1))) + assert.True(t, strings.Contains(log.Logs[1], doneMsg)) +} + +func TestPoolSendReturnUnprocessed(t *testing.T) { + log := test.LogSetup() + xRay := new(MockXRayClient) + xRay.On("PutTraceSegments", nil).Return("Send unprocessed").Once() + s := segmentsBatch{ + batches: make(chan []*string, 1), + xRay: xRay, + done: make(chan bool), + } + testMessage := "Test Message" + batch := []*string{&testMessage} + s.send(batch) + + go s.poll() + close(s.batches) + <-s.done + + assert.EqualValues(t, xRay.CallNoToPutTraceSegments, 1) + assert.True(t, strings.Contains(log.Logs[0], fmt.Sprintf("Successfully sent batch of %v", 1))) + assert.True(t, strings.Contains(log.Logs[1], "Unprocessed segment")) +} + +type minTestCase struct { + x int32 + y int32 + result int32 +} + +func TestMin(t *testing.T) { + testCases := []minTestCase{ + {x: 23, y: 54, result: 23}, + {x: 1121, y: 21, result: 21}, + {x: -12123, y: -4343, result: -12123}, + {x: 77, y: 77, result: 77}, + {x: 0, y: 0, result: 0}, + {x: 0, y: -54, result: -54}, + {x: -6543, y: 0, result: -6543}, + } + for _, c := range testCases { + r := min(c.x, c.y) + + assert.EqualValues(t, c.result, r, fmt.Sprintf("Min Test: X: %v, Y: %v, Expected: %v", c.x, c.y, c.result)) + } +} + +func TestGetValidJitterBase(t *testing.T) { + testCases := []struct { + backoffBase int + attempt int + expectedValue int32 + }{ + {backoffBase: 1, attempt: 1, expectedValue: 2}, + {backoffBase: 2, attempt: 2, expectedValue: 8}, + {backoffBase: 1, attempt: 25, expectedValue: 33554432}, + {backoffBase: 5, attempt: 30, expectedValue: 1073741823}, + {backoffBase: 1, attempt: 100, expectedValue: 1073741823}, + } + for _, tc := range testCases { + backoffBase := tc.backoffBase + attempt := tc.attempt + + base := getValidJitterBase(backoffBase, attempt) + + assert.EqualValues(t, tc.expectedValue, base) + } +} + +func TestBackoff(t *testing.T) { + failedAttempts := []int{1, 2, 5, 7, 10, 23, 100, 1000, 343212} + seedRandom := rand.New(rand.NewSource(time.Now().Unix())) + for _, fa := range failedAttempts { + seed := int64(seedRandom.Int63()) + randGen := rand.New(rand.NewSource(seed)) + s := segmentsBatch{ + randGen: rand.New(rand.NewSource(seed)), + } + + backoffSec := s.backOff(fa) + + var backoffExpected int32 + + if fa > backoffMinAttempts { + randomBackoff := randGen.Int31n(getValidJitterBase(backoffBaseSeconds, fa-backoffMinAttempts)) + backoffExpected = randomBackoff + } + + if backoffCapSeconds < backoffExpected { + backoffExpected = backoffCapSeconds + } + assert.EqualValues(t, backoffExpected, backoffSec, fmt.Sprintf("Test Case: Failed Attempt: %v, Rand Seed: %v", fa, seed)) + } +} diff --git a/daemon/processor/processor.go b/daemon/processor/processor.go new file mode 100644 index 0000000..eb421ee --- /dev/null +++ b/daemon/processor/processor.go @@ -0,0 +1,192 @@ +// Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and limitations under the License. + +package processor + +import ( + "sync/atomic" + "time" + + log "github.com/cihub/seelog" + + "github.com/aws/aws-xray-daemon/daemon/bufferpool" + "github.com/aws/aws-xray-daemon/daemon/ringbuffer" + "github.com/aws/aws-xray-daemon/daemon/tracesegment" + + "math/rand" + "os" + "github.com/aws/aws-xray-daemon/daemon/cfg" + "github.com/aws/aws-xray-daemon/daemon/conn" + "github.com/aws/aws-xray-daemon/daemon/util/timer" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" +) + +// Processor buffers segments and send to X-Ray service. +type Processor struct { + // Boolean channel, set to true when processor has no segments in priority and standard ring buffer. + Done chan bool + + // Ring buffer to store trace segments. + std *ringbuffer.RingBuffer + + // Buffer pool instance. + pool *bufferpool.BufferPool + + // Counter for segments received. + count uint64 + + // timer client used for setting idle timer. + timerClient timer.Timer + + // segmentsBatch is used to process received segments batch. + traceSegmentsBatch *segmentsBatch + + // Number of go routines to spawn for traceSegmentsBatch.poll(). + batchProcessorCount int + + // Channel for Time. + idleTimer <-chan time.Time + + // Size of the batch segments processed by Processor. + batchSize int + + // Idle timeout in milliseconds used while sending batch segments. + sendIdleTimeout time.Duration +} + +// New creates new instance of Processor. +func New(awsConfig *aws.Config, s *session.Session, segmentBatchProcessorCount int, std *ringbuffer.RingBuffer, + pool *bufferpool.BufferPool, c *cfg.ParameterConfig) *Processor { + batchesChan := make(chan []*string, c.Processor.BatchProcessorQueueSize) + segmentBatchDoneChan := make(chan bool) + tsb := &segmentsBatch{ + batches: batchesChan, + done: segmentBatchDoneChan, + randGen: rand.New(rand.NewSource(time.Now().UnixNano())), + timer: &timer.Client{}, + } + x := conn.NewXRay(awsConfig, s) + if x == nil { + log.Error("X-Ray client returned nil") + os.Exit(1) + } + tsb.xRay = x + doneChan := make(chan bool) + log.Debugf("Batch size: %v", c.Processor.BatchSize) + p := &Processor{ + Done: doneChan, + std: std, + pool: pool, + count: 0, + timerClient: &timer.Client{}, + batchProcessorCount: segmentBatchProcessorCount, + traceSegmentsBatch: tsb, + batchSize: c.Processor.BatchSize, + sendIdleTimeout: time.Millisecond * time.Duration(c.Processor.IdleTimeoutMillisecond), + } + + for i := 0; i < p.batchProcessorCount; i++ { + go p.traceSegmentsBatch.poll() + } + + go p.poll() + + return p +} + +func (p *Processor) poll() { + batch := make([]*tracesegment.TraceSegment, 0, p.batchSize) + p.SetIdleTimer() + + for { + select { + case segment, ok := <-p.std.Channel: + if ok { + batch = p.receiveTraceSegment(segment, batch) + } else { + p.std.Empty = true + } + case <-p.idleTimer: + if len(batch) > 0 { + log.Debug("processor: sending partial batch") + batch = p.sendBatchAsync(batch) + } else { + p.SetIdleTimer() + } + } + + if p.std.Empty { + break + } + } + + if len(batch) > 0 { + batch = p.sendBatchAsync(batch) + } + p.traceSegmentsBatch.close() + for i := 0; i < p.batchProcessorCount; i++ { + <-p.traceSegmentsBatch.done + } + log.Debug("processor: done!") + p.Done <- true +} + +func (p *Processor) receiveTraceSegment(ts *tracesegment.TraceSegment, batch []*tracesegment.TraceSegment) []*tracesegment.TraceSegment { + atomic.AddUint64(&p.count, 1) + batch = append(batch, ts) + + if len(batch) >= p.batchSize { + log.Debug("processor: sending complete batch") + batch = p.sendBatchAsync(batch) + } else if p.pool.CurrentBuffersLen() == 0 { + log.Debug("processor: sending partial batch due to load on buffer pool") + batch = p.sendBatchAsync(batch) + } + + return batch +} + +// Resizing slice doesn't make a copy of the underlying array and hence memory is not +// garbage collected. (http://blog.golang.org/go-slices-usage-and-internals) +func (p *Processor) flushBatch(batch []*tracesegment.TraceSegment) []*tracesegment.TraceSegment { + for i := 0; i < len(batch); i++ { + batch[i] = nil + } + batch = batch[0:0] + + return batch +} + +func (p *Processor) sendBatchAsync(batch []*tracesegment.TraceSegment) []*tracesegment.TraceSegment { + log.Debugf("processor: segment batch size: %d. capacity: %d", len(batch), cap(batch)) + + segmentDocuments := []*string{} + for _, segment := range batch { + rawBytes := *segment.Raw + x := string(rawBytes[:]) + segmentDocuments = append(segmentDocuments, &x) + p.pool.Return(segment.PoolBuf) + } + p.traceSegmentsBatch.send(segmentDocuments) + // Reset Idle Timer + p.SetIdleTimer() + return p.flushBatch(batch) +} + +// ProcessedCount returns number of trace segment received. +func (p *Processor) ProcessedCount() uint64 { + return atomic.LoadUint64(&p.count) +} + +// SetIdleTimer sets idle timer for the processor instance. +func (p *Processor) SetIdleTimer() { + p.idleTimer = p.timerClient.After(p.sendIdleTimeout) +} diff --git a/daemon/processor/processor_test.go b/daemon/processor/processor_test.go new file mode 100644 index 0000000..523b85b --- /dev/null +++ b/daemon/processor/processor_test.go @@ -0,0 +1,293 @@ +// Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and limitations under the License. + +package processor + +import ( + "fmt" + "strings" + "testing" + "time" + "github.com/aws/aws-xray-daemon/daemon/bufferpool" + "github.com/aws/aws-xray-daemon/daemon/ringbuffer" + "github.com/aws/aws-xray-daemon/daemon/telemetry" + "github.com/aws/aws-xray-daemon/daemon/tracesegment" + "github.com/aws/aws-xray-daemon/daemon/util/test" + + "github.com/stretchr/testify/assert" +) + +func init() { + telemetry.T = telemetry.GetTestTelemetry() +} + +func TestFlushBatch(t *testing.T) { + variousTests := []int{0, 10, 100, 324} + for _, testCase := range variousTests { + processor := Processor{} + segments := make([]*tracesegment.TraceSegment, testCase) + for i := 0; i < testCase; i++ { + segmentVal := tracesegment.GetTestTraceSegment() + segments[i] = &segmentVal + } + + segmentsFlushed := processor.flushBatch(segments) + + assert.Equal(t, len(segmentsFlushed), 0) + assert.Equal(t, cap(segmentsFlushed), testCase) + for _, segmentVal := range segmentsFlushed { + assert.Nil(t, segmentVal) + } + } +} + +func TestSendBatchSuccess(t *testing.T) { + timer := test.MockTimerClient{} + variousTests := []int{0, 50, 40} + for _, testCase := range variousTests { + writer := test.LogSetup() + segments := make([]*tracesegment.TraceSegment, testCase) + for i := 0; i < testCase; i++ { + segmentVal := tracesegment.GetTestTraceSegment() + segments[i] = &segmentVal + } + processor := Processor{ + pool: bufferpool.Init(testCase+1, 100), + timerClient: &timer, + traceSegmentsBatch: &segmentsBatch{ + batches: make(chan []*string, 1), + }, + } + // Empty Pool + for i := 0; i < testCase+1; i++ { + processor.pool.Get() + } + assert.EqualValues(t, processor.pool.CurrentBuffersLen(), 0) + + returnedSegment := processor.sendBatchAsync(segments) + + assert.EqualValues(t, cap(returnedSegment), cap(segments)) + assert.EqualValues(t, len(returnedSegment), 0) + for _, segmentVal := range returnedSegment { + assert.Nil(t, segmentVal) + } + assert.True(t, strings.Contains(writer.Logs[0], fmt.Sprintf("segment batch size: %v", testCase))) + select { + case batch := <-processor.traceSegmentsBatch.batches: + assert.NotNil(t, batch) + default: + assert.Fail(t, "Expected batch to be in batch channel") + } + // Asserting the buffer pool was returned + assert.EqualValues(t, processor.pool.CurrentBuffersLen(), testCase) + } + timer.Dispose() +} + +func TestPollingFewSegmentsExit(t *testing.T) { + pool := bufferpool.Init(1, 100) + stdChan := ringbuffer.New(20, pool) + doneChan := make(chan bool) + timer := &test.MockTimerClient{} + writer := test.LogSetup() + processor := &Processor{ + timerClient: timer, + std: stdChan, + count: 0, + Done: doneChan, + pool: pool, + traceSegmentsBatch: &segmentsBatch{ + batches: make(chan []*string, 1), + }, + sendIdleTimeout: time.Second, + batchSize: 50, + } + + go processor.poll() + + // Increment for Send Batch to proceed + timer.IncrementDuration(time.Duration(10)) + segment := tracesegment.GetTestTraceSegment() + stdChan.Send(&segment) + stdChan.Close() + + <-processor.Done + + assert.EqualValues(t, processor.ProcessedCount(), 1) + assert.True(t, strings.Contains(writer.Logs[0], "segment batch size: 1")) + assert.True(t, strings.Contains(writer.Logs[1], "processor: done!")) + + timer.Dispose() +} + +func TestPollingFewSegmentsIdleTimeout(t *testing.T) { + pool := bufferpool.Init(1, 100) + stdChan := ringbuffer.New(20, pool) + doneChan := make(chan bool) + timer := &test.MockTimerClient{} + + writer := test.LogSetup() + processor := &Processor{ + timerClient: timer, + std: stdChan, + count: 0, + Done: doneChan, + pool: pool, + traceSegmentsBatch: &segmentsBatch{ + batches: make(chan []*string, 1), + }, + sendIdleTimeout: time.Second, + batchSize: 50, + } + + go processor.poll() + + // Sleep to process go routine initialization + time.Sleep(time.Millisecond) + // Adding segment to priChan + segment := tracesegment.GetTestTraceSegment() + stdChan.Send(&segment) + // Sleep to see to it the chan is processed before timeout is triggered + time.Sleep(time.Millisecond) + // Trigger Ideal Timeout to trigger PutSegments + timer.IncrementDuration(processor.sendIdleTimeout) + time.Sleep(time.Millisecond) + // Sleep so that time.After trigger batch send and not closing of the channel + stdChan.Close() + + <-doneChan + + assert.True(t, strings.Contains(writer.Logs[0], "sending partial batch")) + assert.True(t, strings.Contains(writer.Logs[1], "segment batch size: 1")) + assert.True(t, strings.Contains(writer.Logs[2], "processor: done!")) + + timer.Dispose() +} + +func TestPollingBatchBufferFull(t *testing.T) { + batchSize := 50 + pool := bufferpool.Init(1, 100) + // Setting stdChan to batchSize so that it does not spill over + stdChan := ringbuffer.New(batchSize, pool) + doneChan := make(chan bool) + timer := &test.MockTimerClient{} + + writer := test.LogSetup() + segmentProcessorCount := 1 + processor := &Processor{ + timerClient: timer, + std: stdChan, + count: 0, + Done: doneChan, + batchProcessorCount: segmentProcessorCount, + pool: pool, + traceSegmentsBatch: &segmentsBatch{ + batches: make(chan []*string, 1), + done: make(chan bool), + }, + batchSize: batchSize, + } + + go processor.poll() + + for i := 0; i < batchSize; i++ { + // Adding segment to priChan + segment := tracesegment.GetTestTraceSegment() + stdChan.Send(&segment) + + } + stdChan.Close() + processor.traceSegmentsBatch.done <- true + + <-doneChan + + assert.EqualValues(t, processor.ProcessedCount(), batchSize) + assert.True(t, strings.Contains(writer.Logs[0], "sending complete batch")) + assert.True(t, strings.Contains(writer.Logs[1], fmt.Sprintf("segment batch size: %v", batchSize))) + assert.True(t, strings.Contains(writer.Logs[2], "processor: done!")) + + timer.Dispose() +} + +func TestPollingBufferPoolExhaustedForcingSent(t *testing.T) { + pool := bufferpool.Init(1, 100) + batchSize := 50 + // Exhaust the buffer pool + pool.Get() + assert.EqualValues(t, pool.CurrentBuffersLen(), 0) + stdChan := ringbuffer.New(batchSize, pool) + doneChan := make(chan bool) + timer := &test.MockTimerClient{} + + writer := test.LogSetup() + segmentProcessorCount := 1 + processor := &Processor{ + timerClient: timer, + std: stdChan, + count: 0, + Done: doneChan, + batchProcessorCount: segmentProcessorCount, + pool: pool, + traceSegmentsBatch: &segmentsBatch{ + batches: make(chan []*string, 1), + done: make(chan bool), + }, + sendIdleTimeout: time.Second, + batchSize: batchSize, + } + + go processor.poll() + + segment := tracesegment.GetTestTraceSegment() + stdChan.Send(&segment) + stdChan.Close() + processor.traceSegmentsBatch.done <- true + + <-doneChan + + assert.EqualValues(t, processor.ProcessedCount(), 1) + assert.True(t, strings.Contains(writer.Logs[0], "sending partial batch due to load on buffer pool")) + assert.True(t, strings.Contains(writer.Logs[1], fmt.Sprintf("segment batch size: %v", 1))) + assert.True(t, strings.Contains(writer.Logs[2], "processor: done!")) + + timer.Dispose() +} + +func TestPollingIdleTimerIsInitiatedAfterElapseWithNoSegments(t *testing.T) { + timer := &test.MockTimerClient{} + pool := bufferpool.Init(1, 100) + batchSize := 50 + stdChan := ringbuffer.New(batchSize, pool) + processor := &Processor{ + Done: make(chan bool), + timerClient: timer, + std: stdChan, + pool: pool, + traceSegmentsBatch: &segmentsBatch{ + batches: make(chan []*string, 1), + }, + sendIdleTimeout: time.Second, + batchSize: batchSize, + } + + go processor.poll() + + // Sleep for routine to be initiated + time.Sleep(time.Millisecond) + // Trigger Idle Timeout + timer.IncrementDuration(processor.sendIdleTimeout) + // sleep so that routine exist after timeout is tiggered + time.Sleep(time.Millisecond) + stdChan.Close() + <-processor.Done + + // Called twice once at poll start and then after the timeout was triggered + assert.EqualValues(t, timer.AfterCalledTimes(), 2) + timer.Dispose() +} diff --git a/daemon/profiler/profiler.go b/daemon/profiler/profiler.go new file mode 100644 index 0000000..660a33f --- /dev/null +++ b/daemon/profiler/profiler.go @@ -0,0 +1,45 @@ +// Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and limitations under the License. + +package profiler + +import ( + "os" + "runtime/pprof" + + log "github.com/cihub/seelog" +) + +// EnableCPUProfile enables CPU profiling. +func EnableCPUProfile(cpuProfile *string) { + if *cpuProfile != "" { + f, err := os.Create(*cpuProfile) + if err != nil { + log.Errorf("error: %v", err) + } + pprof.StartCPUProfile(f) + log.Info("Start CPU Profiling") + } +} + +// MemSnapShot creates memory profile. +func MemSnapShot(memProfile *string) { + if *memProfile != "" { + f, err := os.Create(*memProfile) + if err != nil { + log.Errorf("Could not create memory profile: %v", err) + } + if err := pprof.WriteHeapProfile(f); err != nil { + log.Errorf("Could not write memory profile: %v", err) + } + f.Close() + log.Info("Finish memory profiling") + return + } +} diff --git a/daemon/ringbuffer/ringbuffer.go b/daemon/ringbuffer/ringbuffer.go new file mode 100644 index 0000000..de6ff0e --- /dev/null +++ b/daemon/ringbuffer/ringbuffer.go @@ -0,0 +1,81 @@ +// Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and limitations under the License. + +package ringbuffer + +import ( + log "github.com/cihub/seelog" + + "os" + "github.com/aws/aws-xray-daemon/daemon/bufferpool" + "github.com/aws/aws-xray-daemon/daemon/telemetry" + "github.com/aws/aws-xray-daemon/daemon/tracesegment" +) + +// RingBuffer is used to store trace segment received on X-Ray daemon address. +type RingBuffer struct { + // Channel used to store trace segment received on X-Ray daemon address. + Channel <-chan *tracesegment.TraceSegment + c chan *tracesegment.TraceSegment + + // Boolean, set to true of buffer is empty + Empty bool + + // Counter for trace segments truncated. + count uint64 + + // Reference to BufferPool. + pool *bufferpool.BufferPool +} + +// New returns new instance of RingBuffer configured with BufferPool pool. +func New(size int, pool *bufferpool.BufferPool) *RingBuffer { + if size == 0 { + log.Error("The initial size of a queue should be larger than 0") + os.Exit(1) + } + channel := make(chan *tracesegment.TraceSegment, size) + + return &RingBuffer{ + Channel: channel, + c: channel, + Empty: false, + count: 0, + pool: pool, + } +} + +// Send sends trace segment s to trace segment channel. +func (r *RingBuffer) Send(s *tracesegment.TraceSegment) { + select { + case r.c <- s: + default: + var segmentTruncated *tracesegment.TraceSegment + select { + case segmentTruncated = <-r.c: + r.count++ + r.pool.Return(segmentTruncated.PoolBuf) + log.Warn("Segment buffer is full. Dropping oldest segment document.") + telemetry.T.SegmentSpillover(1) + default: + log.Debug("Buffers: channel was de-queued") + } + r.Send(s) + } +} + +// Close closes the RingBuffer. +func (r *RingBuffer) Close() { + close(r.c) +} + +// TruncatedCount returns trace segment truncated count. +func (r *RingBuffer) TruncatedCount() uint64 { + return r.count +} diff --git a/daemon/ringbuffer/ringbuffer_test.go b/daemon/ringbuffer/ringbuffer_test.go new file mode 100644 index 0000000..d31cf0f --- /dev/null +++ b/daemon/ringbuffer/ringbuffer_test.go @@ -0,0 +1,139 @@ +// Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and limitations under the License. + +package ringbuffer + +import ( + "math/rand" + "os" + "os/exec" + "strings" + "testing" + "github.com/aws/aws-xray-daemon/daemon/bufferpool" + "github.com/aws/aws-xray-daemon/daemon/telemetry" + "github.com/aws/aws-xray-daemon/daemon/tracesegment" + "github.com/aws/aws-xray-daemon/daemon/util/test" + + "github.com/stretchr/testify/assert" +) + +func init() { + telemetry.T = telemetry.GetTestTelemetry() +} + +func TestRingBufferNew(t *testing.T) { + bufferLimit := 100 + bufferSize := 256 * 1024 + bufferPool := bufferpool.Init(bufferLimit, bufferSize) + // Only run the failing part when a specific env variable is set + if os.Getenv("Test_New") == "1" { + New(0, bufferPool) + return + } + // Start the actual test in a different subprocess + cmd := exec.Command(os.Args[0], "-test.run=TestRingBufferNew") + cmd.Env = append(os.Environ(), "Test_New=1") + if err := cmd.Start(); err != nil { + t.Fatal(err) + } + // Check that the program exited + err := cmd.Wait() + if e, ok := err.(*exec.ExitError); !ok || e.Success() { + t.Fatalf("Process ran with err %v, want exit status 1", err) + } + + randomFlag := rand.Intn(100) + ringBuffer := New(randomFlag, bufferPool) + + assert.Equal(t, randomFlag, cap(ringBuffer.c), "The size of buffered channel should be same with the given number") + assert.Equal(t, randomFlag, cap(ringBuffer.Channel), "The size of buffered channel should be same with the given number") + assert.Equal(t, false, ringBuffer.Empty, "The ringBuffer is not empty") + assert.Equal(t, uint64(0), ringBuffer.count, "The truncated count should be 0") + assert.Equal(t, bufferPool, ringBuffer.pool, "The value of bufferpool should be same with the given value") + +} + +func TestRingBufferCloseChannel(t *testing.T) { + bufferLimit := 100 + bufferSize := 256 * 1024 + bufferPool := bufferpool.Init(bufferLimit, bufferSize) + randomFlag := rand.Intn(100) + ringBuffer := New(randomFlag, bufferPool) + ringBuffer.Close() + for i := 0; i < cap(ringBuffer.c); i++ { + v, ok := <-ringBuffer.c + + assert.Equal(t, (*tracesegment.TraceSegment)(nil), v, "The value should be nil") + assert.Equal(t, false, ok, "The value should be false if the channel is closed") + } +} + +func TestRingBufferSend(t *testing.T) { + bufferLimit := 100 + bufferSize := 256 * 1024 + bufferPool := bufferpool.Init(bufferLimit, bufferSize) + randomFlag := rand.Intn(100) + ringBuffer := New(randomFlag, bufferPool) + segment := tracesegment.GetTestTraceSegment() + for i := 0; i < randomFlag; i++ { + ringBuffer.Send(&segment) + } + for i := 0; i < cap(ringBuffer.c); i++ { + v, ok := <-ringBuffer.c + + assert.Equal(t, &segment, v, "The value should be same with the send segment") + assert.Equal(t, true, ok, "The channel is open") + } +} + +func TestRingBufferTruncatedCount(t *testing.T) { + log := test.LogSetup() + bufferLimit := 100 + bufferSize := 256 * 1024 + bufferPool := bufferpool.Init(bufferLimit, bufferSize) + segment := tracesegment.GetTestTraceSegment() + ringBuffer := New(100, bufferPool) + randomFlag := rand.Intn(100) + for i := 0; i < 100+randomFlag; i++ { + ringBuffer.Send(&segment) + } + num := ringBuffer.TruncatedCount() + + assert.Equal(t, num, uint64(randomFlag), "The truncated count should be same with the generated random number") + for i := 0; i < randomFlag; i++ { + assert.True(t, strings.Contains(log.Logs[i], "Segment buffer is full. Dropping oldest segment document.")) + } +} + +func TestRingBufferSendTruncated(t *testing.T) { + log := test.LogSetup() + bufferLimit := 100 + bufferSize := 256 * 1024 + bufferPool := bufferpool.Init(bufferLimit, bufferSize) + randomFlag := rand.Intn(100) + 2 + ringBuffer := New(randomFlag, bufferPool) + var segment []tracesegment.TraceSegment + for i := 0; i < randomFlag; i++ { + segment = append(segment, tracesegment.GetTestTraceSegment()) + ringBuffer.Send(&segment[i]) + } + s1 := tracesegment.GetTestTraceSegment() + ringBuffer.Send(&s1) + + assert.Equal(t, &segment[1], <-ringBuffer.c, "Truncate the first segment in the original buffered channel") + assert.Equal(t, randomFlag, cap(ringBuffer.c), "The buffered channel still full after truncating") + assert.True(t, strings.Contains(log.Logs[0], "Segment buffer is full. Dropping oldest segment document.")) + + s2 := tracesegment.GetTestTraceSegment() + ringBuffer.Send(&s2) + + assert.Equal(t, &segment[2], <-ringBuffer.c, "Truncate the second segment that in the original buffered channel") + assert.Equal(t, randomFlag, cap(ringBuffer.c), "The buffered channel still full after truncating") + assert.True(t, strings.Contains(log.Logs[0], "Segment buffer is full. Dropping oldest segment document.")) +} diff --git a/daemon/socketconn/socketconn.go b/daemon/socketconn/socketconn.go new file mode 100644 index 0000000..40588b9 --- /dev/null +++ b/daemon/socketconn/socketconn.go @@ -0,0 +1,19 @@ +// Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and limitations under the License. + +package socketconn + +// SocketConn is an interface for socket connection. +type SocketConn interface { + // Reads a packet from the connection, copying the payload into b. It returns number of bytes copied. + Read(b []byte) (int, error) + + // Closes the connection. + Close() +} diff --git a/daemon/socketconn/udp/udp.go b/daemon/socketconn/udp/udp.go new file mode 100644 index 0000000..d71d232 --- /dev/null +++ b/daemon/socketconn/udp/udp.go @@ -0,0 +1,57 @@ +// Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and limitations under the License. + +package udp + +import "net" +import ( + "os" + "github.com/aws/aws-xray-daemon/daemon/socketconn" + + log "github.com/cihub/seelog" +) + +// UDP defines UDP socket connection. +type UDP struct { + socket *net.UDPConn +} + +// New returns new instance of UDP. +func New(receiveBufferSize int, udpAddress string) socketconn.SocketConn { + log.Debugf("Listening on UDP %v", udpAddress) + addr, err := net.ResolveUDPAddr("udp", udpAddress) + if err != nil { + log.Errorf("%v", err) + os.Exit(1) + } + sock, err := net.ListenUDP("udp", addr) + if err != nil { + log.Errorf("%v", err) + os.Exit(1) + } + err = sock.SetReadBuffer(receiveBufferSize) + if err != nil { + log.Errorf("%v", err) + os.Exit(1) + } + return UDP{ + socket: sock, + } +} + +// Read returns number of bytes read from the UDP connection. +func (conn UDP) Read(b []byte) (int, error) { + rlen, _, err := conn.socket.ReadFromUDP(b) + return rlen, err +} + +// Close closes current UDP connection. +func (conn UDP) Close() { + conn.socket.Close() +} diff --git a/daemon/telemetry/telemetry.go b/daemon/telemetry/telemetry.go new file mode 100644 index 0000000..2bd1243 --- /dev/null +++ b/daemon/telemetry/telemetry.go @@ -0,0 +1,338 @@ +// Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and limitations under the License. + +package telemetry + +import ( + "sync/atomic" + "time" + "github.com/aws/aws-xray-daemon/daemon/conn" + "github.com/aws/aws-xray-daemon/daemon/util/timer" + "unsafe" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/xray" + log "github.com/cihub/seelog" +) + +const dataCutoffIntervalSecs = 60 +const bufferSize = 30 +const requestSize = 10 + +// T is instance of Telemetry. +var T *Telemetry + +// Telemetry is used to record X-Ray daemon health. +type Telemetry struct { + // Instance of XRay. + client conn.XRay + timer timer.Timer + + // Amazon Resource Name (ARN) of the AWS resource running the daemon. + resourceARN string + + // Instance id of the EC2 instance running X-Ray daemon. + instanceID string + + // Host name of the EC2 instance running X-Ray daemon. + hostname string + + // Self pointer. + currentRecord *xray.TelemetryRecord + + // Timer channel. + timerChan <-chan time.Time + + // Boolean channel, set to true when Quit channel is set to true. + Done chan bool + + // Boolean channel, set to true when daemon is closed, + Quit chan bool + + // Channel of TelemetryRecord used to send to X-Ray service. + recordChan chan *xray.TelemetryRecord + + // When segment is received, postTelemetry is set to true, + // indicating send telemetry data for the received segment. + postTelemetry bool +} + +// Init instantiates a new instance of Telemetry. +func Init(awsConfig *aws.Config, s *session.Session, resourceARN string, noMetadata bool) { + T = newT(awsConfig, s, resourceARN, noMetadata) + log.Debug("Telemetry initiated") +} + +// EvaluateConnectionError processes error with respect to request failure status code. +func EvaluateConnectionError(err error) { + requestFailure, ok := err.(awserr.RequestFailure) + if ok { + statusCode := requestFailure.StatusCode() + if statusCode >= 500 && statusCode < 600 { + T.Connection5xx(1) + } else if statusCode >= 400 && statusCode < 500 { + T.Connection4xx(1) + } else { + T.ConnectionOther(1) + } + } else { + if conn.IsTimeoutError(err) { + T.ConnectionTimeout(1) + } else { + awsError, ok := err.(awserr.Error) + if ok { + if awsError.Code() == "RequestError" { + T.ConnectionUnknownHost(1) + } + } else { + T.ConnectionOther(1) + } + } + } +} + +// GetTestTelemetry returns an empty telemetry record. +func GetTestTelemetry() *Telemetry { + return &Telemetry{ + currentRecord: getEmptyTelemetryRecord(), + } +} + +// SegmentReceived increments SegmentsReceivedCount for the Telemetry record. +func (t *Telemetry) SegmentReceived(count int64) { + atomic.AddInt64(t.currentRecord.SegmentsReceivedCount, count) + // Only send telemetry data when we receive any segment or else skip any telemetry data + t.postTelemetry = true +} + +// SegmentSent increments SegmentsSentCount for the Telemetry record. +func (t *Telemetry) SegmentSent(count int64) { + atomic.AddInt64(t.currentRecord.SegmentsSentCount, count) +} + +// SegmentSpillover increments SegmentsSpilloverCount for the Telemetry record. +func (t *Telemetry) SegmentSpillover(count int64) { + atomic.AddInt64(t.currentRecord.SegmentsSpilloverCount, count) +} + +// SegmentRejected increments SegmentsRejectedCount for the Telemetry record. +func (t *Telemetry) SegmentRejected(count int64) { + atomic.AddInt64(t.currentRecord.SegmentsRejectedCount, count) +} + +// ConnectionTimeout increments TimeoutCount for the Telemetry record. +func (t *Telemetry) ConnectionTimeout(count int64) { + atomic.AddInt64(t.currentRecord.BackendConnectionErrors.TimeoutCount, count) +} + +// ConnectionRefusal increments ConnectionRefusedCount for the Telemetry record. +func (t *Telemetry) ConnectionRefusal(count int64) { + atomic.AddInt64(t.currentRecord.BackendConnectionErrors.ConnectionRefusedCount, count) +} + +// Connection4xx increments HTTPCode4XXCount for the Telemetry record. +func (t *Telemetry) Connection4xx(count int64) { + atomic.AddInt64(t.currentRecord.BackendConnectionErrors.HTTPCode4XXCount, count) +} + +// Connection5xx increments HTTPCode5XXCount count for the Telemetry record. +func (t *Telemetry) Connection5xx(count int64) { + atomic.AddInt64(t.currentRecord.BackendConnectionErrors.HTTPCode5XXCount, count) +} + +// ConnectionUnknownHost increments unknown host BackendConnectionErrors count for the Telemetry record. +func (t *Telemetry) ConnectionUnknownHost(count int64) { + atomic.AddInt64(t.currentRecord.BackendConnectionErrors.UnknownHostCount, count) +} + +// ConnectionOther increments other BackendConnectionErrors count for the Telemetry record. +func (t *Telemetry) ConnectionOther(count int64) { + atomic.AddInt64(t.currentRecord.BackendConnectionErrors.OtherCount, count) +} + +func newT(awsConfig *aws.Config, s *session.Session, resourceARN string, noMetadata bool) *Telemetry { + timer := &timer.Client{} + hostname := "" + instanceID := "" + if !noMetadata { + metadataClient := ec2metadata.New(s) + hn, err := metadataClient.GetMetadata("hostname") + if err != nil { + log.Debugf("Get hostname metadata failed: %s", err) + } else { + hostname = hn + log.Debugf("Using %v hostname for telemetry records", hostname) + } + instID, err := metadataClient.GetMetadata("instance-id") + if err != nil { + log.Errorf("Get instance id metadata failed: %s", err) + } else { + instanceID = instID + log.Debugf("Using %v Instance Id for Telemetry records", instanceID) + } + } else { + log.Debug("No Metadata set for telemetry records") + } + record := getEmptyTelemetryRecord() + t := &Telemetry{ + timer: timer, + resourceARN: resourceARN, + instanceID: instanceID, + hostname: hostname, + currentRecord: record, + timerChan: getDataCutoffDelay(timer), + Done: make(chan bool), + Quit: make(chan bool), + recordChan: make(chan *xray.TelemetryRecord, bufferSize), + postTelemetry: false, + } + telemetryClient := conn.NewXRay(awsConfig, s) + t.client = telemetryClient + go t.pushData() + return t +} + +func getZeroInt64() *int64 { + var zero int64 + zero = 0 + return &zero +} + +func getEmptyTelemetryRecord() *xray.TelemetryRecord { + return &xray.TelemetryRecord{ + SegmentsReceivedCount: getZeroInt64(), + SegmentsRejectedCount: getZeroInt64(), + SegmentsSentCount: getZeroInt64(), + SegmentsSpilloverCount: getZeroInt64(), + BackendConnectionErrors: &xray.BackendConnectionErrors{ + HTTPCode4XXCount: getZeroInt64(), + HTTPCode5XXCount: getZeroInt64(), + ConnectionRefusedCount: getZeroInt64(), + OtherCount: getZeroInt64(), + TimeoutCount: getZeroInt64(), + UnknownHostCount: getZeroInt64(), + }, + } +} + +func (t *Telemetry) pushData() { + for { + quit := false + select { + case <-t.Quit: + quit = true + break + case <-t.timerChan: + } + emptyRecord := getEmptyTelemetryRecord() + recordToReport := unsafe.Pointer(emptyRecord) + recordToPushPointer := unsafe.Pointer(t.currentRecord) + // Rotation Logic: + // Swap current record to record to report. + // Record to report is set to empty record which is set to current record + t.currentRecord = (*xray.TelemetryRecord)(atomic.SwapPointer(&recordToReport, + recordToPushPointer)) + currentTime := time.Now() + record := (*xray.TelemetryRecord)(recordToReport) + record.Timestamp = ¤tTime + t.add(record) + t.sendAll() + if quit { + close(t.recordChan) + log.Debug("telemetry: done!") + t.Done <- true + break + } else { + t.timerChan = getDataCutoffDelay(t.timer) + } + } +} + +func (t *Telemetry) add(record *xray.TelemetryRecord) { + // Only send telemetry data when we receive first segment or else do not send any telemetry data. + if t.postTelemetry { + select { + case t.recordChan <- record: + default: + select { + case <-t.recordChan: + log.Debug("Telemetry Buffers truncated") + t.add(record) + default: + log.Debug("Telemetry Buffers dequeued") + } + } + } else { + log.Debug("Skipped telemetry data as no segments found") + } +} + +func (t *Telemetry) sendAll() { + records := t.collectAllRecords() + recordsNoSend, err := t.sendRecords(records) + if err != nil { + log.Debugf("Failed to send telemetry %v record(s). Re-queue records. %v", len(records), err) + // There might be possibility that new records might be archived during re-queue records. + // But as timer is set after records are send this will not happen + for _, record := range recordsNoSend { + t.add(record) + } + } +} + +func (t *Telemetry) collectAllRecords() []*xray.TelemetryRecord { + records := make([]*xray.TelemetryRecord, bufferSize) + records = records[:0] + var record *xray.TelemetryRecord + done := false + for !done { + select { + case record = <-t.recordChan: + recordLen := len(records) + if recordLen < bufferSize { + records = append(records, record) + } + default: + done = true + } + } + return records +} + +func (t *Telemetry) sendRecords(records []*xray.TelemetryRecord) ([]*xray.TelemetryRecord, error) { + if len(records) > 0 { + for i := 0; i < len(records); i = i + requestSize { + endIndex := len(records) + if endIndex > i+requestSize { + endIndex = i + requestSize + } + recordsToSend := records[i:endIndex] + input := xray.PutTelemetryRecordsInput{ + EC2InstanceId: &t.instanceID, + Hostname: &t.hostname, + ResourceARN: &t.resourceARN, + TelemetryRecords: recordsToSend, + } + _, err := t.client.PutTelemetryRecords(&input) + if err != nil { + EvaluateConnectionError(err) + return records[i:], err + } + } + log.Debugf("Send %v telemetry record(s)", len(records)) + } + return nil, nil +} + +func getDataCutoffDelay(timer timer.Timer) <-chan time.Time { + return timer.After(time.Duration(time.Second * dataCutoffIntervalSecs)) +} diff --git a/daemon/telemetry/telemetry_test.go b/daemon/telemetry/telemetry_test.go new file mode 100644 index 0000000..b4ab09f --- /dev/null +++ b/daemon/telemetry/telemetry_test.go @@ -0,0 +1,156 @@ +// Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and limitations under the License. + +package telemetry + +import ( + "errors" + "fmt" + "strings" + "testing" + "github.com/aws/aws-xray-daemon/daemon/util/test" + + "github.com/aws/aws-sdk-go/service/xray" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +type MockXRayClient struct { + mock.Mock + CallNoToPutTelemetryRecords int +} + +func (c *MockXRayClient) PutTraceSegments(input *xray.PutTraceSegmentsInput) (*xray.PutTraceSegmentsOutput, error) { + return nil, nil +} + +func (c *MockXRayClient) PutTelemetryRecords(input *xray.PutTelemetryRecordsInput) (*xray.PutTelemetryRecordsOutput, error) { + c.CallNoToPutTelemetryRecords++ + args := c.Called(nil) + errorStr := args.String(0) + var err error + output := &xray.PutTelemetryRecordsOutput{} + if errorStr != "" { + err = errors.New(errorStr) + } + return output, err +} + +func TestGetEmptyTelemetryRecord(t *testing.T) { + emptyRecord := getEmptyTelemetryRecord() + + assert.EqualValues(t, emptyRecord.SegmentsReceivedCount, new(int64)) + assert.EqualValues(t, emptyRecord.SegmentsRejectedCount, new(int64)) + assert.EqualValues(t, emptyRecord.SegmentsSentCount, new(int64)) + assert.EqualValues(t, emptyRecord.SegmentsSpilloverCount, new(int64)) + assert.EqualValues(t, emptyRecord.BackendConnectionErrors.ConnectionRefusedCount, new(int64)) + assert.EqualValues(t, emptyRecord.BackendConnectionErrors.HTTPCode4XXCount, new(int64)) + assert.EqualValues(t, emptyRecord.BackendConnectionErrors.HTTPCode5XXCount, new(int64)) + assert.EqualValues(t, emptyRecord.BackendConnectionErrors.OtherCount, new(int64)) + assert.EqualValues(t, emptyRecord.BackendConnectionErrors.TimeoutCount, new(int64)) + assert.EqualValues(t, emptyRecord.BackendConnectionErrors.UnknownHostCount, new(int64)) +} + +func TestAddTelemetryRecord(t *testing.T) { + log := test.LogSetup() + timer := &test.MockTimerClient{} + telemetry := &Telemetry{ + client: &MockXRayClient{}, + timer: timer, + resourceARN: "", + instanceID: "", + hostname: "", + currentRecord: getEmptyTelemetryRecord(), + timerChan: getDataCutoffDelay(timer), + Done: make(chan bool), + Quit: make(chan bool), + recordChan: make(chan *xray.TelemetryRecord, 1), + postTelemetry: true, + } + + telemetry.add(getEmptyTelemetryRecord()) + telemetry.add(getEmptyTelemetryRecord()) + + assert.True(t, strings.Contains(log.Logs[0], "Telemetry Buffers truncated")) +} + +func TestSendRecordSuccess(t *testing.T) { + log := test.LogSetup() + xRay := new(MockXRayClient) + xRay.On("PutTelemetryRecords", nil).Return("").Once() + timer := &test.MockTimerClient{} + telemetry := &Telemetry{ + client: xRay, + timer: timer, + resourceARN: "", + instanceID: "", + hostname: "", + currentRecord: getEmptyTelemetryRecord(), + timerChan: getDataCutoffDelay(timer), + Done: make(chan bool), + Quit: make(chan bool), + recordChan: make(chan *xray.TelemetryRecord, 1), + } + records := make([]*xray.TelemetryRecord, 1) + records[0] = getEmptyTelemetryRecord() + telemetry.sendRecords(records) + + assert.EqualValues(t, xRay.CallNoToPutTelemetryRecords, 1) + assert.True(t, strings.Contains(log.Logs[0], fmt.Sprintf("Send %v telemetry record(s)", 1))) +} + +func TestAddRecordWithPostSegmentFalse(t *testing.T) { + log := test.LogSetup() + timer := &test.MockTimerClient{} + telemetry := &Telemetry{ + client: &MockXRayClient{}, + timer: timer, + resourceARN: "", + instanceID: "", + hostname: "", + currentRecord: getEmptyTelemetryRecord(), + timerChan: getDataCutoffDelay(timer), + Done: make(chan bool), + Quit: make(chan bool), + recordChan: make(chan *xray.TelemetryRecord, 1), + } + + telemetry.add(getEmptyTelemetryRecord()) + + assert.True(t, strings.Contains(log.Logs[0], "Skipped telemetry data as no segments found")) +} + +func TestAddRecordBeforeFirstSegmentAndAfter(t *testing.T) { + log := test.LogSetup() + timer := &test.MockTimerClient{} + telemetry := &Telemetry{ + client: &MockXRayClient{}, + timer: timer, + resourceARN: "", + instanceID: "", + hostname: "", + currentRecord: getEmptyTelemetryRecord(), + timerChan: getDataCutoffDelay(timer), + Done: make(chan bool), + Quit: make(chan bool), + recordChan: make(chan *xray.TelemetryRecord, 1), + } + + // No Segment received + telemetry.add(getEmptyTelemetryRecord()) + + assert.True(t, strings.Contains(log.Logs[0], "Skipped telemetry data as no segments found")) + + // Segment received + telemetry.SegmentReceived(1) + telemetry.add(getEmptyTelemetryRecord()) + telemetry.add(getEmptyTelemetryRecord()) + + assert.True(t, strings.Contains(log.Logs[1], "Telemetry Buffers truncated")) +} diff --git a/daemon/tracesegment/tracesegment.go b/daemon/tracesegment/tracesegment.go new file mode 100644 index 0000000..bfe8929 --- /dev/null +++ b/daemon/tracesegment/tracesegment.go @@ -0,0 +1,45 @@ +// Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and limitations under the License. + +package tracesegment + +import ( + "bytes" + "compress/zlib" + "strings" +) + +// Header stores header of trace segment. +type Header struct { + Format string `json:"format"` + Version int `json:"version"` +} + +// IsValid validates Header. +func (t Header) IsValid() bool { + return strings.EqualFold(t.Format, "json") && t.Version == 1 +} + +// TraceSegment stores raw segment. +type TraceSegment struct { + Raw *[]byte + PoolBuf *[]byte +} + +// Deflate converts TraceSegment to bytes +func (r *TraceSegment) Deflate() []byte { + var b bytes.Buffer + + w := zlib.NewWriter(&b) + rawBytes := *r.Raw + w.Write(rawBytes) + w.Close() + + return b.Bytes() +} diff --git a/daemon/tracesegment/tracesegment_test.go b/daemon/tracesegment/tracesegment_test.go new file mode 100644 index 0000000..14e4087 --- /dev/null +++ b/daemon/tracesegment/tracesegment_test.go @@ -0,0 +1,102 @@ +// Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and limitations under the License. + +package tracesegment + +import ( + "bytes" + "compress/zlib" + "io" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDeflateWithValidInput(t *testing.T) { + testSegment := GetTestTraceSegment() + + deflatedBytes := testSegment.Deflate() + rawBytes := *testSegment.Raw + + assert.True(t, len(rawBytes) > len(deflatedBytes), "Deflated bytes should compress raw bytes") + + // Testing reverting compression using zlib + deflatedBytesBuffer := bytes.NewBuffer(deflatedBytes) + reader, err := zlib.NewReader(deflatedBytesBuffer) + if err != nil { + panic(err) + } + var deflatedBytesRecovered = make([]byte, 1000) + n, err := reader.Read(deflatedBytesRecovered) + if err != nil && err != io.EOF { + panic(err) + } + deflatedBytesRecovered = deflatedBytesRecovered[:n] + + assert.Equal(t, n, len(deflatedBytesRecovered)) + assert.Equal(t, len(deflatedBytesRecovered), len(rawBytes)) + for index, byteVal := range rawBytes { + assert.Equal(t, byteVal, deflatedBytesRecovered[index], "Difference in recovered and original bytes") + } +} + +func TestTraceSegmentHeaderIsValid(t *testing.T) { + header := Header{ + Format: "json", + Version: 1, + } + + valid := header.IsValid() + + assert.True(t, valid) +} + +func TestTraceSegmentHeaderIsValidCaseInsensitive(t *testing.T) { + header := Header{ + Format: "jSoN", + Version: 1, + } + + valid := header.IsValid() + + assert.True(t, valid) +} + +func TestTraceSegmentHeaderIsValidWrongVersion(t *testing.T) { + header := Header{ + Format: "json", + Version: 2, + } + + valid := header.IsValid() + + assert.False(t, valid) +} + +func TestTraceSegmentHeaderIsValidWrongFormat(t *testing.T) { + header := Header{ + Format: "xml", + Version: 1, + } + + valid := header.IsValid() + + assert.False(t, valid) +} + +func TestTraceSegmentHeaderIsValidWrongFormatVersion(t *testing.T) { + header := Header{ + Format: "xml", + Version: 2, + } + + valid := header.IsValid() + + assert.False(t, valid) +} diff --git a/daemon/tracesegment/tracesegment_test_util.go b/daemon/tracesegment/tracesegment_test_util.go new file mode 100644 index 0000000..a6e2f8a --- /dev/null +++ b/daemon/tracesegment/tracesegment_test_util.go @@ -0,0 +1,34 @@ +// Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and limitations under the License. + +package tracesegment + +import ( + "fmt" + "math/rand" +) + +// GetTestTraceSegment returns new instance of TraceSegment used for testing. +func GetTestTraceSegment() TraceSegment { + traceRandomNumber := rand.Int() + segmentRandomNumber := rand.Int() + message := fmt.Sprintf("{\"trace_id\": \"%v\", \"id\": \"%v\", \"start_time\": 1461096053.37518, "+ + "\"end_time\": 1461096053.4042, "+ + "\"name\": \"hello-1.mbfzqxzcpe.us-east-1.elasticbeanstalk.com\"}", + traceRandomNumber, + segmentRandomNumber) + buf := make([]byte, 100) + messageBytes := []byte(message) + + segment := TraceSegment{ + PoolBuf: &buf, + Raw: &messageBytes, + } + return segment +} diff --git a/daemon/tracing.go b/daemon/tracing.go new file mode 100644 index 0000000..028f83c --- /dev/null +++ b/daemon/tracing.go @@ -0,0 +1,37 @@ +// Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and limitations under the License. + +package main + +import ( + "os" + "os/signal" + "syscall" + "time" + + log "github.com/cihub/seelog" +) + +func (d *Daemon) blockSignalReceived() { + sigs := make(chan os.Signal, 1) + signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM, os.Kill) + s := <-sigs + log.Debugf("Shutdown Initiated. Current epoch in nanoseconds: %v", time.Now().UnixNano()) + log.Infof("Got shutdown signal: %v", s) + d.stop() +} + +func main() { + d := initDaemon(config) + defer d.close() + go func() { + d.blockSignalReceived() + }() + runDaemon(d) +} diff --git a/daemon/tracing_windows.go b/daemon/tracing_windows.go new file mode 100644 index 0000000..b103cb3 --- /dev/null +++ b/daemon/tracing_windows.go @@ -0,0 +1,64 @@ +// Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and limitations under the License. + +package main + +import ( + "time" + + "golang.org/x/sys/windows/svc" +) + +const serviceName = "AmazonX-RayDaemon" + +func main() { + svc.Run(serviceName, &TracingDaemonService{}) +} + +// Structure for X-Ray daemon as a service. +type TracingDaemonService struct{} + +// Execute xray as Windows service. Implement golang.org/x/sys/windows/svc#Handler. +func (a *TracingDaemonService) Execute(args []string, r <-chan svc.ChangeRequest, s chan<- svc.Status) (bool, uint32) { + + // notify service controller status is now StartPending + s <- svc.Status{State: svc.StartPending} + + // start service + d := initDaemon(config) + // Start a routine to monitor all channels/routines initiated are closed + // This is required for windows as windows daemon wait for process to finish using infinite for loop below + go d.close() + runDaemon(d) + // update service status to Running + const acceptCmds = svc.AcceptStop | svc.AcceptShutdown + s <- svc.Status{State: svc.Running, Accepts: acceptCmds} +loop: + // using an infinite loop to wait for ChangeRequests + for { + // block and wait for ChangeRequests + c := <-r + + // handle ChangeRequest, svc.Pause is not supported + switch c.Cmd { + case svc.Interrogate: + s <- c.CurrentStatus + // Testing deadlock from https://code.google.com/p/winsvc/issues/detail?id=4 + time.Sleep(100 * time.Millisecond) + s <- c.CurrentStatus + case svc.Stop, svc.Shutdown: + break loop + default: + continue loop + } + } + s <- svc.Status{State: svc.StopPending} + d.stop() + return false, 0 +} diff --git a/daemon/util/test/log_writer.go b/daemon/util/test/log_writer.go new file mode 100644 index 0000000..97cbf73 --- /dev/null +++ b/daemon/util/test/log_writer.go @@ -0,0 +1,27 @@ +package test + +import ( + log "github.com/cihub/seelog" +) + +// LogWriter defines structure for log writer. +type LogWriter struct { + Logs []string +} + +// Write writes p bytes to log writer. +func (sw *LogWriter) Write(p []byte) (n int, err error) { + sw.Logs = append(sw.Logs, string(p)) + return len(p), nil +} + +// LogSetup initializes log writer. +func LogSetup() *LogWriter { + writer := &LogWriter{} + logger, err := log.LoggerFromWriterWithMinLevelAndFormat(writer, log.TraceLvl, "%Ns [%Level] %Msg") + if err != nil { + panic(err) + } + log.ReplaceLogger(logger) + return writer +} diff --git a/daemon/util/test/mock_timer_client.go b/daemon/util/test/mock_timer_client.go new file mode 100644 index 0000000..911e66a --- /dev/null +++ b/daemon/util/test/mock_timer_client.go @@ -0,0 +1,107 @@ +package test + +import ( + "sync/atomic" + "time" +) + +// MockTimerClient is a mock for timer client. +type MockTimerClient struct { + afterCalled int32 + tickCalled int32 + currentTime int64 + killAll bool + killRoutinesCount int32 + doneChannel []chan bool + update []chan bool +} + +// IncrementDuration increments mock timer by d duration. +func (t *MockTimerClient) IncrementDuration(d time.Duration) { + atomic.AddInt64(&t.currentTime, int64(d)) + for _, update := range t.update { + update <- true + } + for _, done := range t.doneChannel { + <-done + } +} + +// Dispose kills all mock timer clients. +func (t *MockTimerClient) Dispose() { + t.killAll = true + for _, update := range t.update { + update <- true + } + for _, done := range t.doneChannel { + <-done + } +} + +// TickRoutine is a routine for Timer.Tick(). +func (t *MockTimerClient) TickRoutine(d int64, c chan time.Time, done chan bool, update <-chan bool, startOfTick int64) { + for { + <-update + if t.killAll { + break + } + currentDuration := atomic.LoadInt64(&t.currentTime) + divisor := (currentDuration - startOfTick) / d + if d > 0 && divisor >= 1 { + var i int64 + for i = 0; i < divisor; i++ { + t := time.Now() + c <- t + } + startOfTick = startOfTick + divisor*d + } + done <- true + } + atomic.AddInt32(&t.killRoutinesCount, 1) + done <- true +} + +// AfterRoutine is a routine for Timer.After(). +func (t *MockTimerClient) AfterRoutine(d int64, c chan time.Time, done chan bool, startOfAfter int64) { + for !t.killAll { + currentDuration := atomic.LoadInt64(&t.currentTime) + if d > 0 && (currentDuration-startOfAfter)/d >= 1 { + c <- time.Now() + break + } + } + atomic.AddInt32(&t.killRoutinesCount, 1) +} + +// Tick mocks Timer.Tick(). +func (t *MockTimerClient) Tick(d time.Duration) <-chan time.Time { + // We use done and update channels on tick because it is long going process and infinite loop will + // consume lot of CPU + c := make(chan time.Time, 10) + done := make(chan bool, 1) + update := make(chan bool, 1) + go t.TickRoutine(int64(d), c, done, update, atomic.LoadInt64(&t.currentTime)) + t.doneChannel = append(t.doneChannel, done) + t.update = append(t.update, update) + atomic.AddInt32(&t.tickCalled, 1) + return c +} + +// After mocks Timer.After(). +func (t *MockTimerClient) After(d time.Duration) <-chan time.Time { + c := make(chan time.Time, 10) + done := make(chan bool, 1) + go t.AfterRoutine(int64(d), c, done, atomic.LoadInt64(&t.currentTime)) + atomic.AddInt32(&t.afterCalled, 1) + return c +} + +// AfterCalledTimes calculates number of times after is called. +func (t *MockTimerClient) AfterCalledTimes() int32 { + return atomic.LoadInt32(&t.afterCalled) +} + +// TickCalledTimes calculates number of times tick is called. +func (t *MockTimerClient) TickCalledTimes() int32 { + return atomic.LoadInt32(&t.tickCalled) +} diff --git a/daemon/util/test/mock_timer_client_test.go b/daemon/util/test/mock_timer_client_test.go new file mode 100644 index 0000000..08cbb67 --- /dev/null +++ b/daemon/util/test/mock_timer_client_test.go @@ -0,0 +1,250 @@ +package test + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +type EmptyStruct struct { +} + +func ChannelHasData(c chan EmptyStruct) bool { + var ok bool + select { + case <-c: + ok = true + default: + ok = false + + } + return ok +} + +// This function is used so that test cases will not freeze if chan is not responsive +func TryToGetValue(ch chan EmptyStruct) *EmptyStruct { + timeout := make(chan bool, 1) + go func() { + time.Sleep(100 * time.Millisecond) + timeout <- true + }() + select { + case <-ch: + return &EmptyStruct{} + case <-timeout: + return nil + } +} + +func TickTestHelper(tickDuration int64, t *testing.T) { + timer := MockTimerClient{currentTime: 35534432431} + tickChan := make(chan EmptyStruct, 1) + tickFunc := func() { + // Go routine started + tickChan <- EmptyStruct{} + t := timer.Tick(time.Duration(tickDuration)) + for { + <-t + tickChan <- EmptyStruct{} + } + } + + go tickFunc() + + // Go routine to monitor tick started + <-tickChan + testCasesTicksToTrigger := []int{1, 2, 1000} + var durationIncremented int64 + for _, ticksToTrigger := range testCasesTicksToTrigger { + for i := 0; i < ticksToTrigger; i++ { + var ok bool + ok = ChannelHasData(tickChan) + assert.False(t, ok) + initialIncrement := tickDuration / 2 + // Not enough to trigger tick + timer.IncrementDuration(time.Duration(initialIncrement)) + durationIncremented += initialIncrement + ok = ChannelHasData(tickChan) + assert.False(t, ok) + // tick triggered + timer.IncrementDuration(time.Duration(tickDuration)) + durationIncremented += tickDuration + val := TryToGetValue(tickChan) + assert.NotNil(t, + val, + fmt.Sprintf("Expected value passed thru the channel. Tick Duration: %v, Tick Trigger Iteration: %v, Ticket To Trigger: %v Current Clock Time: %v", + tickDuration, + i, + ticksToTrigger, + timer.currentTime)) + + // Adding 4th of the duration to trigger + durationForth := tickDuration / 4 + timer.IncrementDuration(time.Duration(durationForth)) + durationIncremented += durationForth + ok = ChannelHasData(tickChan) + assert.False(t, ok) + + // Leave the duration with exact divisor so that next loop can assume + // duration increment is zero + finalIncrement := tickDuration*2 - durationIncremented + // tick triggered + timer.IncrementDuration(time.Duration(finalIncrement)) + val = TryToGetValue(tickChan) + assert.NotNil(t, val) + durationIncremented = 0 + } + } + timer.Dispose() + + assert.EqualValues(t, 1, timer.killRoutinesCount) + assert.EqualValues(t, 1, timer.TickCalledTimes()) +} + +func TestTickDuration454(t *testing.T) { + var tickDuration int64 + tickDuration = 454 + TickTestHelper(tickDuration, t) +} + +func TestAfter(t *testing.T) { + var afterDuration int64 + afterDuration = 10 + timer := MockTimerClient{currentTime: 2153567564} + afterChan := make(chan EmptyStruct, 1) + tickFunc := func() { + // Go routine started + afterChan <- EmptyStruct{} + t := timer.After(time.Duration(afterDuration)) + for { + <-t + afterChan <- EmptyStruct{} + } + } + + go tickFunc() + + // Go routine started to monitor after messages + <-afterChan + var ok bool + ok = ChannelHasData(afterChan) + assert.False(t, ok) + initialIncrement := afterDuration / 2 + // Not enough to trigger after + timer.IncrementDuration(time.Duration(initialIncrement)) + ok = ChannelHasData(afterChan) + assert.False(t, ok) + // after triggered + timer.IncrementDuration(time.Duration(afterDuration)) + val := TryToGetValue(afterChan) + assert.NotNil(t, val, fmt.Sprintf("Expected value passed thru the channel. After Duration: %v, Current Clock Time: %v", afterDuration, timer.currentTime)) + + // After should trigger only once compared to tick + timer.IncrementDuration(time.Duration(afterDuration)) + ok = ChannelHasData(afterChan) + assert.False(t, ok) + + timer.Dispose() + + assert.EqualValues(t, 1, timer.killRoutinesCount) + assert.EqualValues(t, 1, timer.AfterCalledTimes()) +} + +func TestAfterTickTogether(t *testing.T) { + var tickDuration int64 + tickDuration = 10 + afterDuration := tickDuration * 2 + timer := MockTimerClient{currentTime: 23082153551} + tickChan := make(chan EmptyStruct, 1) + afterChan := make(chan EmptyStruct, 1) + tickFunc := func() { + // Go routine started + tick := timer.Tick(time.Duration(tickDuration)) + tickChan <- EmptyStruct{} + for { + select { + case <-tick: + tickChan <- EmptyStruct{} + } + } + } + afterFunc := func() { + // Go routine started + after := timer.After(time.Duration(afterDuration)) + afterChan <- EmptyStruct{} + for { + select { + case <-after: + afterChan <- EmptyStruct{} + + } + } + } + + go tickFunc() + go afterFunc() + + // Go routine started to monitor tick and after events + <-tickChan + <-afterChan + testCasesTicksToTrigger := []int{1, 2, 100} + var durationIncremented int64 + for triggerIndex, ticksToTrigger := range testCasesTicksToTrigger { + for i := 0; i < ticksToTrigger; i++ { + var ok bool + ok = ChannelHasData(tickChan) + assert.False(t, ok) + ok = ChannelHasData(afterChan) + assert.False(t, ok) + initialIncrement := tickDuration / 2 + // Not enough to trigger tick + timer.IncrementDuration(time.Duration(initialIncrement)) + durationIncremented += initialIncrement + ok = ChannelHasData(tickChan) + assert.False(t, ok) + ok = ChannelHasData(afterChan) + assert.False(t, ok) + // tick triggered + timer.IncrementDuration(time.Duration(tickDuration)) + durationIncremented += tickDuration + val := TryToGetValue(tickChan) + assert.NotNil(t, val) + ok = ChannelHasData(afterChan) + assert.False(t, ok) + + // Adding 4th of the duration to trigger + durationForth := tickDuration / 4 + timer.IncrementDuration(time.Duration(durationForth)) + durationIncremented += durationForth + ok = ChannelHasData(tickChan) + assert.False(t, ok) + ok = ChannelHasData(afterChan) + assert.False(t, ok) + + // Leave the duration with exact divisor so that next loop can assume + // duration increment is zero + finalIncrement := tickDuration*2 - durationIncremented + // tick triggered + timer.IncrementDuration(time.Duration(finalIncrement)) + // After will only trigger for first iteration as it only trigger once + if triggerIndex == 0 { + val = TryToGetValue(afterChan) + assert.NotNil(t, val) + } else { + ok = ChannelHasData(afterChan) + assert.False(t, ok) + } + val = TryToGetValue(tickChan) + assert.NotNil(t, val) + + durationIncremented = 0 + } + } + timer.Dispose() + + assert.EqualValues(t, 2, timer.killRoutinesCount) + assert.EqualValues(t, 1, timer.TickCalledTimes()) + assert.EqualValues(t, 1, timer.AfterCalledTimes()) +} diff --git a/daemon/util/timer/timer.go b/daemon/util/timer/timer.go new file mode 100644 index 0000000..e9b43bf --- /dev/null +++ b/daemon/util/timer/timer.go @@ -0,0 +1,22 @@ +package timer + +import "time" + +// Timer interface +type Timer interface { + Tick(d time.Duration) <-chan time.Time + After(d time.Duration) <-chan time.Time +} + +// Client is an empty timer client. +type Client struct{} + +// Tick is wrapper to time.Tick(). +func (t *Client) Tick(d time.Duration) <-chan time.Time { + return time.Tick(d) +} + +// After is wrapper to time.After(). +func (t *Client) After(d time.Duration) <-chan time.Time { + return time.After(d) +} diff --git a/daemon/util/util.go b/daemon/util/util.go new file mode 100644 index 0000000..b9fd1d5 --- /dev/null +++ b/daemon/util/util.go @@ -0,0 +1,49 @@ +package util + +import ( + "bytes" + + log "github.com/cihub/seelog" +) + +// SplitHeaderBody separates header and body of buf using provided separator sep, and stores in returnByte. +func SplitHeaderBody(buf, sep *[]byte, returnByte *[][]byte) [][]byte { + if buf == nil { + log.Error("Buf to split passed nil") + return nil + } + if sep == nil { + log.Error("Separator used to split passed nil") + return nil + } + if returnByte == nil { + log.Error("Return Buf to be used to store split passed nil") + return nil + } + + separator := *sep + bufVal := *buf + lenSeparator := len(separator) + var header, body []byte + header = *buf + for i := 0; i < len(bufVal); i++ { + if bytes.Equal(bufVal[i:i+lenSeparator], separator) { + header = bufVal[0:i] + body = bufVal[i+lenSeparator:] + break + } + if i == len(bufVal)-1 { + log.Warnf("Missing header: %s", header) + } + } + returnByteVal := *returnByte + return append(returnByteVal[:0], header, body) +} + +// GetMinIntValue returns minimum between a and b. +func GetMinIntValue(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/daemon/util/util_test.go b/daemon/util/util_test.go new file mode 100644 index 0000000..d7088a1 --- /dev/null +++ b/daemon/util/util_test.go @@ -0,0 +1,83 @@ +package util + +import ( + "strings" + "testing" + "github.com/aws/aws-xray-daemon/daemon/util/test" + + "github.com/stretchr/testify/assert" +) + +func TestSplitHeaderBodyWithSeparatorExists(t *testing.T) { + str := "Header\nBody" + separator := "\n" + buf := []byte(str) + separatorArray := []byte(separator) + result := make([][]byte, 2) + + returnResult := SplitHeaderBody(&buf, &separatorArray, &result) + + assert.EqualValues(t, len(result), 2) + assert.EqualValues(t, string(result[0]), "Header") + assert.EqualValues(t, string(result[1]), "Body") + assert.EqualValues(t, string(returnResult[0]), "Header") + assert.EqualValues(t, string(returnResult[1]), "Body") + assert.EqualValues(t, string(buf), str) + assert.EqualValues(t, string(separatorArray), separator) +} + +func TestSplitHeaderBodyWithSeparatorDoesNotExist(t *testing.T) { + str := "Header" + separator := "\n" + buf := []byte(str) + separatorArray := []byte(separator) + result := make([][]byte, 2) + + returnResult := SplitHeaderBody(&buf, &separatorArray, &result) + + assert.EqualValues(t, len(result), 2) + assert.EqualValues(t, string(result[0]), "Header") + assert.EqualValues(t, string(result[1]), "") + assert.EqualValues(t, string(returnResult[0]), "Header") + assert.EqualValues(t, string(returnResult[1]), "") + assert.EqualValues(t, string(buf), str) + assert.EqualValues(t, string(separatorArray), separator) +} + +func TestSplitHeaderBodyNilBuf(t *testing.T) { + log := test.LogSetup() + separator := "\n" + separatorArray := []byte(separator) + result := make([][]byte, 2) + SplitHeaderBody(nil, &separatorArray, &result) + + assert.True(t, strings.Contains(log.Logs[0], "Buf to split passed nil")) +} + +func TestSplitHeaderBodyNilSeparator(t *testing.T) { + log := test.LogSetup() + str := "Test String" + buf := []byte(str) + result := make([][]byte, 2) + + SplitHeaderBody(&buf, nil, &result) + + assert.True(t, strings.Contains(log.Logs[0], "Separator used to split passed nil")) +} + +func TestSplitHeaderBodyNilResult(t *testing.T) { + log := test.LogSetup() + str := "Test String" + buf := []byte(str) + separator := "\n" + separatorArray := []byte(separator) + SplitHeaderBody(&buf, &separatorArray, nil) + + assert.True(t, strings.Contains(log.Logs[0], "Return Buf to be used to store split passed nil")) +} + +func TestGetMinIntValue(t *testing.T) { + assert.Equal(t, GetMinIntValue(1, 1), 1, "Return value should be 1") + assert.Equal(t, GetMinIntValue(0, 1), 0, "Return value should be 0") + assert.Equal(t, GetMinIntValue(1, 0), 0, "Return value should be 0") +} diff --git a/glide.lock b/glide.lock new file mode 100644 index 0000000..dc284c3 --- /dev/null +++ b/glide.lock @@ -0,0 +1,69 @@ +hash: 843bf4eb162ad5ee1e8111753a5bb5d157a1c93adfbf207838c13eaddd86413e +updated: 2017-03-08T16:33:42.338096339-08:00 +imports: +- name: github.com/aws/aws-sdk-go + version: fa1a4bc634fffa6ac468d8fb217e05475e063440 + subpackages: + - aws + - aws/awserr + - aws/credentials/stscreds + - aws/ec2metadata + - aws/request + - aws/session + - service/sts + - service/xray + - aws/awsutil + - aws/client + - aws/client/metadata + - aws/signer/v4 + - private/protocol/restjson + - aws/credentials + - aws/endpoints + - aws/corehandlers + - aws/defaults + - private/protocol/query + - private/protocol/rest + - private/protocol/jsonrpc + - aws/credentials/ec2rolecreds + - aws/credentials/endpointcreds + - private/protocol/query/queryutil + - private/protocol/xml/xmlutil + - private/protocol/json/jsonutil + - private/protocol +- name: github.com/cihub/seelog + version: f561c5e57575bb1e0a2167028b7339b3a8d16fb4 + subpackages: + - archive + - archive/gzip + - archive/tar + - archive/zip +- name: github.com/go-ini/ini + version: c437d20015c2ab6454b7a66a13109ff0fb99e17a +- name: github.com/golang/sys + version: 99f16d856c9836c42d24e7ab64ea72916925fa97 + subpackages: + - windows +- name: github.com/jmespath/go-jmespath + version: bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d +- name: github.com/shirou/gopsutil + version: 23f4b7eb149c4b07835a23057d6d168d9301d373 + subpackages: + - mem + - internal/common +- name: github.com/stretchr/testify + version: 4d4bfba8f1d1027c4fdbe371823030df51419987 +- name: golang.org/x/net + version: a6577fac2d73be281a500b310739095313165611 + subpackages: + - http2 + - http2/hpack + - idna + - lex/httplex +- name: golang.org/x/sys + version: 99f16d856c9836c42d24e7ab64ea72916925fa97 + subpackages: + - windows/svc + - windows +- name: gopkg.in/yaml.v2 + version: a3f3340b5840cee44f372bddb5880fcbc419b46a +devImports: [] diff --git a/glide.yaml b/glide.yaml new file mode 100644 index 0000000..0f713ac --- /dev/null +++ b/glide.yaml @@ -0,0 +1,12 @@ +package: . +import: +- package: github.com/aws/aws-sdk-go + version: 1.7.5 +- package: github.com/cihub/seelog +- package: github.com/stretchr/testify +- package: github.com/shirou/gopsutil +- package: gopkg.in/yaml.v2 +- package: github.com/golang/sys/windows + subpackage: + - svc + diff --git a/makefile b/makefile new file mode 100644 index 0000000..096e5a7 --- /dev/null +++ b/makefile @@ -0,0 +1,93 @@ +SDK_BASE_FOLDERS=$(shell ls -d daemon/ | grep -v vendor) +GO_VET_CMD=go tool vet --all -shadow +PREFIX :=. + +# Initialize workspace if it's empty +ifeq ($(WORKSPACE),) + WORKSPACE := $(shell pwd)/../../../../ +endif + +# Initialize BGO_SPACE +export BGO_SPACE=$(shell pwd) +path := $(BGO_SPACE):$(WORKSPACE) +ifneq ($(GOPATH),) + GOPATH := $(path):$(GOPATH):$(BGO_SPACE) +else + GOPATH := $(path):$(BGO_SPACE) +endif + +export GOPATH + +build: pre-build create-folder copy-file build-mac build-linux build-windows zip-linux zip-osx zip-win + +packaging: package-rpm package-deb + +release: build test packaging clean-folder + +.PHONY: pre-build +pre-build: + go run $(BGO_SPACE)/Tool/src/versiongenerator/version-gen.go + +.PHONY: create-folder +create-folder: + mkdir -p build/xray + +.PHONY: copy-file +copy-file: + cp daemon/cfg.yaml build/xray/ + cp $(BGO_SPACE)/VERSION build/xray/ + +.PHONY: build-mac +build-mac: + @echo "Build for MAC amd64" + GOOS=darwin GOARCH=amd64 go build -ldflags "-s -w" -o $(BGO_SPACE)/build/xray/xray_mac ${PREFIX}/daemon/daemon.go ${PREFIX}/daemon/tracing.go + +.PHONY: build-linux +build-linux: + @echo "Build for Linux amd64" + GOOS=linux GOARCH=amd64 go build -ldflags "-s -w" -o $(BGO_SPACE)/build/xray/xray ${PREFIX}/daemon/daemon.go ${PREFIX}/daemon/tracing.go + +.PHONY: build-windows +build-windows: + @echo "Build for Windows amd64" + GOOS=windows GOARCH=amd64 go build -ldflags "-s -w" -o $(BGO_SPACE)/build/xray/xray.exe ${PREFIX}/daemon/daemon.go ${PREFIX}/daemon/tracing_windows.go + GOOS=windows GOARCH=amd64 go build -ldflags "-s -w" -o $(BGO_SPACE)/build/xray/xray_windows.exe ${PREFIX}/daemon/daemon.go ${PREFIX}/daemon/tracing.go + +.PHONY: zip-linux +zip-linux: + $(BGO_SPACE)/Tool/src/packaging/linux/build_zip_linux.sh + +.PHONY: zip-osx +zip-osx: + $(BGO_SPACE)/Tool/src/packaging/osx/build_zip_osx.sh + +.PHONY: zip-win +zip-win: + $(BGO_SPACE)/Tool/src/packaging/windows/build_zip_win.sh + +.PHONY: package-deb +package-deb: + $(BGO_SPACE)/Tool/src/packaging/debian/build_deb_linux.sh + +.PHONY: package-rpm +package-rpm: + -$(BGO_SPACE)/Tool/src/packaging/linux/build_rpm_linux.sh + +.PHONY: test +test: + @echo "Testing daemon" + go test -cover ${PREFIX}/daemon/ ./... + +vet: + ${GO_VET_CMD} ${SDK_BASE_FOLDERS} + +lint: + golint ${SDK_BASE_FOLDERS}... + +fmt: + go fmt daemon/... + +.PHONY: clean-folder +clean-folder: + cd build && \ + find . ! -name "xray" ! -name "." -type d -exec rm -rf {} +